binder_alloc.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958
  1. /* binder_alloc.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2017 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <asm/cacheflush.h>
  19. #include <linux/list.h>
  20. #include <linux/sched/mm.h>
  21. #include <linux/module.h>
  22. #include <linux/rtmutex.h>
  23. #include <linux/rbtree.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/slab.h>
  27. #include <linux/sched.h>
  28. #include <linux/list_lru.h>
  29. #include "binder_alloc.h"
  30. #include "binder_trace.h"
  31. struct list_lru binder_alloc_lru;
  32. static DEFINE_MUTEX(binder_alloc_mmap_lock);
  33. enum {
  34. BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
  35. BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
  36. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
  37. };
  38. static uint32_t binder_alloc_debug_mask;
  39. module_param_named(debug_mask, binder_alloc_debug_mask,
  40. uint, 0644);
  41. #define binder_alloc_debug(mask, x...) \
  42. do { \
  43. if (binder_alloc_debug_mask & mask) \
  44. pr_info(x); \
  45. } while (0)
  46. static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
  47. {
  48. return list_entry(buffer->entry.next, struct binder_buffer, entry);
  49. }
  50. static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
  51. {
  52. return list_entry(buffer->entry.prev, struct binder_buffer, entry);
  53. }
  54. static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  55. struct binder_buffer *buffer)
  56. {
  57. if (list_is_last(&buffer->entry, &alloc->buffers))
  58. return (u8 *)alloc->buffer +
  59. alloc->buffer_size - (u8 *)buffer->data;
  60. return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
  61. }
  62. static void binder_insert_free_buffer(struct binder_alloc *alloc,
  63. struct binder_buffer *new_buffer)
  64. {
  65. struct rb_node **p = &alloc->free_buffers.rb_node;
  66. struct rb_node *parent = NULL;
  67. struct binder_buffer *buffer;
  68. size_t buffer_size;
  69. size_t new_buffer_size;
  70. BUG_ON(!new_buffer->free);
  71. new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  72. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  73. "%d: add free buffer, size %zd, at %pK\n",
  74. alloc->pid, new_buffer_size, new_buffer);
  75. while (*p) {
  76. parent = *p;
  77. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  78. BUG_ON(!buffer->free);
  79. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  80. if (new_buffer_size < buffer_size)
  81. p = &parent->rb_left;
  82. else
  83. p = &parent->rb_right;
  84. }
  85. rb_link_node(&new_buffer->rb_node, parent, p);
  86. rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  87. }
  88. static void binder_insert_allocated_buffer_locked(
  89. struct binder_alloc *alloc, struct binder_buffer *new_buffer)
  90. {
  91. struct rb_node **p = &alloc->allocated_buffers.rb_node;
  92. struct rb_node *parent = NULL;
  93. struct binder_buffer *buffer;
  94. BUG_ON(new_buffer->free);
  95. while (*p) {
  96. parent = *p;
  97. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  98. BUG_ON(buffer->free);
  99. if (new_buffer->data < buffer->data)
  100. p = &parent->rb_left;
  101. else if (new_buffer->data > buffer->data)
  102. p = &parent->rb_right;
  103. else
  104. BUG();
  105. }
  106. rb_link_node(&new_buffer->rb_node, parent, p);
  107. rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
  108. }
  109. static struct binder_buffer *binder_alloc_prepare_to_free_locked(
  110. struct binder_alloc *alloc,
  111. uintptr_t user_ptr)
  112. {
  113. struct rb_node *n = alloc->allocated_buffers.rb_node;
  114. struct binder_buffer *buffer;
  115. void *kern_ptr;
  116. kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
  117. while (n) {
  118. buffer = rb_entry(n, struct binder_buffer, rb_node);
  119. BUG_ON(buffer->free);
  120. if (kern_ptr < buffer->data)
  121. n = n->rb_left;
  122. else if (kern_ptr > buffer->data)
  123. n = n->rb_right;
  124. else {
  125. /*
  126. * Guard against user threads attempting to
  127. * free the buffer twice
  128. */
  129. if (buffer->free_in_progress) {
  130. pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
  131. alloc->pid, current->pid, (u64)user_ptr);
  132. return NULL;
  133. }
  134. buffer->free_in_progress = 1;
  135. return buffer;
  136. }
  137. }
  138. return NULL;
  139. }
  140. /**
  141. * binder_alloc_buffer_lookup() - get buffer given user ptr
  142. * @alloc: binder_alloc for this proc
  143. * @user_ptr: User pointer to buffer data
  144. *
  145. * Validate userspace pointer to buffer data and return buffer corresponding to
  146. * that user pointer. Search the rb tree for buffer that matches user data
  147. * pointer.
  148. *
  149. * Return: Pointer to buffer or NULL
  150. */
  151. struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
  152. uintptr_t user_ptr)
  153. {
  154. struct binder_buffer *buffer;
  155. mutex_lock(&alloc->mutex);
  156. buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
  157. mutex_unlock(&alloc->mutex);
  158. return buffer;
  159. }
  160. static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
  161. void *start, void *end,
  162. struct vm_area_struct *vma)
  163. {
  164. void *page_addr;
  165. unsigned long user_page_addr;
  166. struct binder_lru_page *page;
  167. struct mm_struct *mm = NULL;
  168. bool need_mm = false;
  169. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  170. "%d: %s pages %pK-%pK\n", alloc->pid,
  171. allocate ? "allocate" : "free", start, end);
  172. if (end <= start)
  173. return 0;
  174. trace_binder_update_page_range(alloc, allocate, start, end);
  175. if (allocate == 0)
  176. goto free_range;
  177. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  178. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  179. if (!page->page_ptr) {
  180. need_mm = true;
  181. break;
  182. }
  183. }
  184. if (!vma && need_mm)
  185. mm = get_task_mm(alloc->tsk);
  186. if (mm) {
  187. down_write(&mm->mmap_sem);
  188. vma = alloc->vma;
  189. if (vma && mm != alloc->vma_vm_mm) {
  190. pr_err("%d: vma mm and task mm mismatch\n",
  191. alloc->pid);
  192. vma = NULL;
  193. }
  194. }
  195. if (!vma && need_mm) {
  196. pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
  197. alloc->pid);
  198. goto err_no_vma;
  199. }
  200. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  201. int ret;
  202. bool on_lru;
  203. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  204. if (page->page_ptr) {
  205. on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
  206. WARN_ON(!on_lru);
  207. continue;
  208. }
  209. if (WARN_ON(!vma))
  210. goto err_page_ptr_cleared;
  211. page->page_ptr = alloc_page(GFP_KERNEL |
  212. __GFP_HIGHMEM |
  213. __GFP_ZERO);
  214. if (!page->page_ptr) {
  215. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  216. alloc->pid, page_addr);
  217. goto err_alloc_page_failed;
  218. }
  219. page->alloc = alloc;
  220. INIT_LIST_HEAD(&page->lru);
  221. ret = map_kernel_range_noflush((unsigned long)page_addr,
  222. PAGE_SIZE, PAGE_KERNEL,
  223. &page->page_ptr);
  224. flush_cache_vmap((unsigned long)page_addr,
  225. (unsigned long)page_addr + PAGE_SIZE);
  226. if (ret != 1) {
  227. pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
  228. alloc->pid, page_addr);
  229. goto err_map_kernel_failed;
  230. }
  231. user_page_addr =
  232. (uintptr_t)page_addr + alloc->user_buffer_offset;
  233. ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
  234. if (ret) {
  235. pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  236. alloc->pid, user_page_addr);
  237. goto err_vm_insert_page_failed;
  238. }
  239. /* vm_insert_page does not seem to increment the refcount */
  240. }
  241. if (mm) {
  242. up_write(&mm->mmap_sem);
  243. mmput(mm);
  244. }
  245. return 0;
  246. free_range:
  247. for (page_addr = end - PAGE_SIZE; page_addr >= start;
  248. page_addr -= PAGE_SIZE) {
  249. bool ret;
  250. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  251. ret = list_lru_add(&binder_alloc_lru, &page->lru);
  252. WARN_ON(!ret);
  253. continue;
  254. err_vm_insert_page_failed:
  255. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  256. err_map_kernel_failed:
  257. __free_page(page->page_ptr);
  258. page->page_ptr = NULL;
  259. err_alloc_page_failed:
  260. err_page_ptr_cleared:
  261. ;
  262. }
  263. err_no_vma:
  264. if (mm) {
  265. up_write(&mm->mmap_sem);
  266. mmput(mm);
  267. }
  268. return vma ? -ENOMEM : -ESRCH;
  269. }
  270. struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
  271. size_t data_size,
  272. size_t offsets_size,
  273. size_t extra_buffers_size,
  274. int is_async)
  275. {
  276. struct rb_node *n = alloc->free_buffers.rb_node;
  277. struct binder_buffer *buffer;
  278. size_t buffer_size;
  279. struct rb_node *best_fit = NULL;
  280. void *has_page_addr;
  281. void *end_page_addr;
  282. size_t size, data_offsets_size;
  283. int ret;
  284. if (alloc->vma == NULL) {
  285. pr_err("%d: binder_alloc_buf, no vma\n",
  286. alloc->pid);
  287. return ERR_PTR(-ESRCH);
  288. }
  289. data_offsets_size = ALIGN(data_size, sizeof(void *)) +
  290. ALIGN(offsets_size, sizeof(void *));
  291. if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
  292. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  293. "%d: got transaction with invalid size %zd-%zd\n",
  294. alloc->pid, data_size, offsets_size);
  295. return ERR_PTR(-EINVAL);
  296. }
  297. size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
  298. if (size < data_offsets_size || size < extra_buffers_size) {
  299. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  300. "%d: got transaction with invalid extra_buffers_size %zd\n",
  301. alloc->pid, extra_buffers_size);
  302. return ERR_PTR(-EINVAL);
  303. }
  304. if (is_async &&
  305. alloc->free_async_space < size + sizeof(struct binder_buffer)) {
  306. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  307. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  308. alloc->pid, size);
  309. return ERR_PTR(-ENOSPC);
  310. }
  311. /* Pad 0-size buffers so they get assigned unique addresses */
  312. size = max(size, sizeof(void *));
  313. while (n) {
  314. buffer = rb_entry(n, struct binder_buffer, rb_node);
  315. BUG_ON(!buffer->free);
  316. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  317. if (size < buffer_size) {
  318. best_fit = n;
  319. n = n->rb_left;
  320. } else if (size > buffer_size)
  321. n = n->rb_right;
  322. else {
  323. best_fit = n;
  324. break;
  325. }
  326. }
  327. if (best_fit == NULL) {
  328. size_t allocated_buffers = 0;
  329. size_t largest_alloc_size = 0;
  330. size_t total_alloc_size = 0;
  331. size_t free_buffers = 0;
  332. size_t largest_free_size = 0;
  333. size_t total_free_size = 0;
  334. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  335. n = rb_next(n)) {
  336. buffer = rb_entry(n, struct binder_buffer, rb_node);
  337. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  338. allocated_buffers++;
  339. total_alloc_size += buffer_size;
  340. if (buffer_size > largest_alloc_size)
  341. largest_alloc_size = buffer_size;
  342. }
  343. for (n = rb_first(&alloc->free_buffers); n != NULL;
  344. n = rb_next(n)) {
  345. buffer = rb_entry(n, struct binder_buffer, rb_node);
  346. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  347. free_buffers++;
  348. total_free_size += buffer_size;
  349. if (buffer_size > largest_free_size)
  350. largest_free_size = buffer_size;
  351. }
  352. pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
  353. alloc->pid, size);
  354. pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
  355. total_alloc_size, allocated_buffers, largest_alloc_size,
  356. total_free_size, free_buffers, largest_free_size);
  357. return ERR_PTR(-ENOSPC);
  358. }
  359. if (n == NULL) {
  360. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  361. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  362. }
  363. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  364. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  365. alloc->pid, size, buffer, buffer_size);
  366. has_page_addr =
  367. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
  368. WARN_ON(n && buffer_size != size);
  369. end_page_addr =
  370. (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
  371. if (end_page_addr > has_page_addr)
  372. end_page_addr = has_page_addr;
  373. ret = binder_update_page_range(alloc, 1,
  374. (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
  375. if (ret)
  376. return ERR_PTR(ret);
  377. if (buffer_size != size) {
  378. struct binder_buffer *new_buffer;
  379. new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  380. if (!new_buffer) {
  381. pr_err("%s: %d failed to alloc new buffer struct\n",
  382. __func__, alloc->pid);
  383. goto err_alloc_buf_struct_failed;
  384. }
  385. new_buffer->data = (u8 *)buffer->data + size;
  386. list_add(&new_buffer->entry, &buffer->entry);
  387. new_buffer->free = 1;
  388. binder_insert_free_buffer(alloc, new_buffer);
  389. }
  390. rb_erase(best_fit, &alloc->free_buffers);
  391. buffer->free = 0;
  392. buffer->free_in_progress = 0;
  393. binder_insert_allocated_buffer_locked(alloc, buffer);
  394. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  395. "%d: binder_alloc_buf size %zd got %pK\n",
  396. alloc->pid, size, buffer);
  397. buffer->data_size = data_size;
  398. buffer->offsets_size = offsets_size;
  399. buffer->async_transaction = is_async;
  400. buffer->extra_buffers_size = extra_buffers_size;
  401. if (is_async) {
  402. alloc->free_async_space -= size + sizeof(struct binder_buffer);
  403. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  404. "%d: binder_alloc_buf size %zd async free %zd\n",
  405. alloc->pid, size, alloc->free_async_space);
  406. }
  407. return buffer;
  408. err_alloc_buf_struct_failed:
  409. binder_update_page_range(alloc, 0,
  410. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  411. end_page_addr, NULL);
  412. return ERR_PTR(-ENOMEM);
  413. }
  414. /**
  415. * binder_alloc_new_buf() - Allocate a new binder buffer
  416. * @alloc: binder_alloc for this proc
  417. * @data_size: size of user data buffer
  418. * @offsets_size: user specified buffer offset
  419. * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  420. * @is_async: buffer for async transaction
  421. *
  422. * Allocate a new buffer given the requested sizes. Returns
  423. * the kernel version of the buffer pointer. The size allocated
  424. * is the sum of the three given sizes (each rounded up to
  425. * pointer-sized boundary)
  426. *
  427. * Return: The allocated buffer or %NULL if error
  428. */
  429. struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
  430. size_t data_size,
  431. size_t offsets_size,
  432. size_t extra_buffers_size,
  433. int is_async)
  434. {
  435. struct binder_buffer *buffer;
  436. mutex_lock(&alloc->mutex);
  437. buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
  438. extra_buffers_size, is_async);
  439. mutex_unlock(&alloc->mutex);
  440. return buffer;
  441. }
  442. static void *buffer_start_page(struct binder_buffer *buffer)
  443. {
  444. return (void *)((uintptr_t)buffer->data & PAGE_MASK);
  445. }
  446. static void *prev_buffer_end_page(struct binder_buffer *buffer)
  447. {
  448. return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
  449. }
  450. static void binder_delete_free_buffer(struct binder_alloc *alloc,
  451. struct binder_buffer *buffer)
  452. {
  453. struct binder_buffer *prev, *next = NULL;
  454. bool to_free = true;
  455. BUG_ON(alloc->buffers.next == &buffer->entry);
  456. prev = binder_buffer_prev(buffer);
  457. BUG_ON(!prev->free);
  458. if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
  459. to_free = false;
  460. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  461. "%d: merge free, buffer %pK share page with %pK\n",
  462. alloc->pid, buffer->data, prev->data);
  463. }
  464. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  465. next = binder_buffer_next(buffer);
  466. if (buffer_start_page(next) == buffer_start_page(buffer)) {
  467. to_free = false;
  468. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  469. "%d: merge free, buffer %pK share page with %pK\n",
  470. alloc->pid,
  471. buffer->data,
  472. next->data);
  473. }
  474. }
  475. if (PAGE_ALIGNED(buffer->data)) {
  476. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  477. "%d: merge free, buffer start %pK is page aligned\n",
  478. alloc->pid, buffer->data);
  479. to_free = false;
  480. }
  481. if (to_free) {
  482. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  483. "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
  484. alloc->pid, buffer->data,
  485. prev->data, next->data);
  486. binder_update_page_range(alloc, 0, buffer_start_page(buffer),
  487. buffer_start_page(buffer) + PAGE_SIZE,
  488. NULL);
  489. }
  490. list_del(&buffer->entry);
  491. kfree(buffer);
  492. }
  493. static void binder_free_buf_locked(struct binder_alloc *alloc,
  494. struct binder_buffer *buffer)
  495. {
  496. size_t size, buffer_size;
  497. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  498. size = ALIGN(buffer->data_size, sizeof(void *)) +
  499. ALIGN(buffer->offsets_size, sizeof(void *)) +
  500. ALIGN(buffer->extra_buffers_size, sizeof(void *));
  501. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  502. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  503. alloc->pid, buffer, size, buffer_size);
  504. BUG_ON(buffer->free);
  505. BUG_ON(size > buffer_size);
  506. BUG_ON(buffer->transaction != NULL);
  507. BUG_ON(buffer->data < alloc->buffer);
  508. BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
  509. if (buffer->async_transaction) {
  510. alloc->free_async_space += size + sizeof(struct binder_buffer);
  511. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  512. "%d: binder_free_buf size %zd async free %zd\n",
  513. alloc->pid, size, alloc->free_async_space);
  514. }
  515. binder_update_page_range(alloc, 0,
  516. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  517. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
  518. NULL);
  519. rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
  520. buffer->free = 1;
  521. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  522. struct binder_buffer *next = binder_buffer_next(buffer);
  523. if (next->free) {
  524. rb_erase(&next->rb_node, &alloc->free_buffers);
  525. binder_delete_free_buffer(alloc, next);
  526. }
  527. }
  528. if (alloc->buffers.next != &buffer->entry) {
  529. struct binder_buffer *prev = binder_buffer_prev(buffer);
  530. if (prev->free) {
  531. binder_delete_free_buffer(alloc, buffer);
  532. rb_erase(&prev->rb_node, &alloc->free_buffers);
  533. buffer = prev;
  534. }
  535. }
  536. binder_insert_free_buffer(alloc, buffer);
  537. }
  538. /**
  539. * binder_alloc_free_buf() - free a binder buffer
  540. * @alloc: binder_alloc for this proc
  541. * @buffer: kernel pointer to buffer
  542. *
  543. * Free the buffer allocated via binder_alloc_new_buffer()
  544. */
  545. void binder_alloc_free_buf(struct binder_alloc *alloc,
  546. struct binder_buffer *buffer)
  547. {
  548. mutex_lock(&alloc->mutex);
  549. binder_free_buf_locked(alloc, buffer);
  550. mutex_unlock(&alloc->mutex);
  551. }
  552. /**
  553. * binder_alloc_mmap_handler() - map virtual address space for proc
  554. * @alloc: alloc structure for this proc
  555. * @vma: vma passed to mmap()
  556. *
  557. * Called by binder_mmap() to initialize the space specified in
  558. * vma for allocating binder buffers
  559. *
  560. * Return:
  561. * 0 = success
  562. * -EBUSY = address space already mapped
  563. * -ENOMEM = failed to map memory to given address space
  564. */
  565. int binder_alloc_mmap_handler(struct binder_alloc *alloc,
  566. struct vm_area_struct *vma)
  567. {
  568. int ret;
  569. struct vm_struct *area;
  570. const char *failure_string;
  571. struct binder_buffer *buffer;
  572. mutex_lock(&binder_alloc_mmap_lock);
  573. if (alloc->buffer) {
  574. ret = -EBUSY;
  575. failure_string = "already mapped";
  576. goto err_already_mapped;
  577. }
  578. area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
  579. if (area == NULL) {
  580. ret = -ENOMEM;
  581. failure_string = "get_vm_area";
  582. goto err_get_vm_area_failed;
  583. }
  584. alloc->buffer = area->addr;
  585. alloc->user_buffer_offset =
  586. vma->vm_start - (uintptr_t)alloc->buffer;
  587. mutex_unlock(&binder_alloc_mmap_lock);
  588. #ifdef CONFIG_CPU_CACHE_VIPT
  589. if (cache_is_vipt_aliasing()) {
  590. while (CACHE_COLOUR(
  591. (vma->vm_start ^ (uint32_t)alloc->buffer))) {
  592. pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
  593. __func__, alloc->pid, vma->vm_start,
  594. vma->vm_end, alloc->buffer);
  595. vma->vm_start += PAGE_SIZE;
  596. }
  597. }
  598. #endif
  599. alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
  600. ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
  601. GFP_KERNEL);
  602. if (alloc->pages == NULL) {
  603. ret = -ENOMEM;
  604. failure_string = "alloc page array";
  605. goto err_alloc_pages_failed;
  606. }
  607. alloc->buffer_size = vma->vm_end - vma->vm_start;
  608. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  609. if (!buffer) {
  610. ret = -ENOMEM;
  611. failure_string = "alloc buffer struct";
  612. goto err_alloc_buf_struct_failed;
  613. }
  614. buffer->data = alloc->buffer;
  615. INIT_LIST_HEAD(&alloc->buffers);
  616. list_add(&buffer->entry, &alloc->buffers);
  617. buffer->free = 1;
  618. binder_insert_free_buffer(alloc, buffer);
  619. alloc->free_async_space = alloc->buffer_size / 2;
  620. barrier();
  621. alloc->vma = vma;
  622. alloc->vma_vm_mm = vma->vm_mm;
  623. return 0;
  624. err_alloc_buf_struct_failed:
  625. kfree(alloc->pages);
  626. alloc->pages = NULL;
  627. err_alloc_pages_failed:
  628. mutex_lock(&binder_alloc_mmap_lock);
  629. vfree(alloc->buffer);
  630. alloc->buffer = NULL;
  631. err_get_vm_area_failed:
  632. err_already_mapped:
  633. mutex_unlock(&binder_alloc_mmap_lock);
  634. pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
  635. alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
  636. return ret;
  637. }
  638. void binder_alloc_deferred_release(struct binder_alloc *alloc)
  639. {
  640. struct rb_node *n;
  641. int buffers, page_count;
  642. struct binder_buffer *buffer;
  643. BUG_ON(alloc->vma);
  644. buffers = 0;
  645. mutex_lock(&alloc->mutex);
  646. while ((n = rb_first(&alloc->allocated_buffers))) {
  647. buffer = rb_entry(n, struct binder_buffer, rb_node);
  648. /* Transaction should already have been freed */
  649. BUG_ON(buffer->transaction);
  650. binder_free_buf_locked(alloc, buffer);
  651. buffers++;
  652. }
  653. while (!list_empty(&alloc->buffers)) {
  654. buffer = list_first_entry(&alloc->buffers,
  655. struct binder_buffer, entry);
  656. WARN_ON(!buffer->free);
  657. list_del(&buffer->entry);
  658. WARN_ON_ONCE(!list_empty(&alloc->buffers));
  659. kfree(buffer);
  660. }
  661. page_count = 0;
  662. if (alloc->pages) {
  663. int i;
  664. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  665. void *page_addr;
  666. bool on_lru;
  667. if (!alloc->pages[i].page_ptr)
  668. continue;
  669. on_lru = list_lru_del(&binder_alloc_lru,
  670. &alloc->pages[i].lru);
  671. page_addr = alloc->buffer + i * PAGE_SIZE;
  672. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  673. "%s: %d: page %d at %pK %s\n",
  674. __func__, alloc->pid, i, page_addr,
  675. on_lru ? "on lru" : "active");
  676. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  677. __free_page(alloc->pages[i].page_ptr);
  678. page_count++;
  679. }
  680. kfree(alloc->pages);
  681. vfree(alloc->buffer);
  682. }
  683. mutex_unlock(&alloc->mutex);
  684. binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
  685. "%s: %d buffers %d, pages %d\n",
  686. __func__, alloc->pid, buffers, page_count);
  687. }
  688. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  689. struct binder_buffer *buffer)
  690. {
  691. seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
  692. prefix, buffer->debug_id, buffer->data,
  693. buffer->data_size, buffer->offsets_size,
  694. buffer->extra_buffers_size,
  695. buffer->transaction ? "active" : "delivered");
  696. }
  697. /**
  698. * binder_alloc_print_allocated() - print buffer info
  699. * @m: seq_file for output via seq_printf()
  700. * @alloc: binder_alloc for this proc
  701. *
  702. * Prints information about every buffer associated with
  703. * the binder_alloc state to the given seq_file
  704. */
  705. void binder_alloc_print_allocated(struct seq_file *m,
  706. struct binder_alloc *alloc)
  707. {
  708. struct rb_node *n;
  709. mutex_lock(&alloc->mutex);
  710. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  711. print_binder_buffer(m, " buffer",
  712. rb_entry(n, struct binder_buffer, rb_node));
  713. mutex_unlock(&alloc->mutex);
  714. }
  715. /**
  716. * binder_alloc_get_allocated_count() - return count of buffers
  717. * @alloc: binder_alloc for this proc
  718. *
  719. * Return: count of allocated buffers
  720. */
  721. int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  722. {
  723. struct rb_node *n;
  724. int count = 0;
  725. mutex_lock(&alloc->mutex);
  726. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  727. count++;
  728. mutex_unlock(&alloc->mutex);
  729. return count;
  730. }
  731. /**
  732. * binder_alloc_vma_close() - invalidate address space
  733. * @alloc: binder_alloc for this proc
  734. *
  735. * Called from binder_vma_close() when releasing address space.
  736. * Clears alloc->vma to prevent new incoming transactions from
  737. * allocating more buffers.
  738. */
  739. void binder_alloc_vma_close(struct binder_alloc *alloc)
  740. {
  741. WRITE_ONCE(alloc->vma, NULL);
  742. WRITE_ONCE(alloc->vma_vm_mm, NULL);
  743. }
  744. /**
  745. * binder_alloc_free_page() - shrinker callback to free pages
  746. * @item: item to free
  747. * @lock: lock protecting the item
  748. * @cb_arg: callback argument
  749. *
  750. * Called from list_lru_walk() in binder_shrink_scan() to free
  751. * up pages when the system is under memory pressure.
  752. */
  753. enum lru_status binder_alloc_free_page(struct list_head *item,
  754. struct list_lru_one *lru,
  755. spinlock_t *lock,
  756. void *cb_arg)
  757. {
  758. struct mm_struct *mm = NULL;
  759. struct binder_lru_page *page = container_of(item,
  760. struct binder_lru_page,
  761. lru);
  762. struct binder_alloc *alloc;
  763. uintptr_t page_addr;
  764. size_t index;
  765. alloc = page->alloc;
  766. if (!mutex_trylock(&alloc->mutex))
  767. goto err_get_alloc_mutex_failed;
  768. if (!page->page_ptr)
  769. goto err_page_already_freed;
  770. index = page - alloc->pages;
  771. page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
  772. if (alloc->vma) {
  773. mm = get_task_mm(alloc->tsk);
  774. if (!mm)
  775. goto err_get_task_mm_failed;
  776. if (!down_write_trylock(&mm->mmap_sem))
  777. goto err_down_write_mmap_sem_failed;
  778. zap_page_range(alloc->vma,
  779. page_addr + alloc->user_buffer_offset,
  780. PAGE_SIZE);
  781. up_write(&mm->mmap_sem);
  782. mmput(mm);
  783. }
  784. unmap_kernel_range(page_addr, PAGE_SIZE);
  785. __free_page(page->page_ptr);
  786. page->page_ptr = NULL;
  787. list_lru_isolate(lru, item);
  788. mutex_unlock(&alloc->mutex);
  789. return LRU_REMOVED;
  790. err_down_write_mmap_sem_failed:
  791. mmput(mm);
  792. err_get_task_mm_failed:
  793. err_page_already_freed:
  794. mutex_unlock(&alloc->mutex);
  795. err_get_alloc_mutex_failed:
  796. return LRU_SKIP;
  797. }
  798. static unsigned long
  799. binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  800. {
  801. unsigned long ret = list_lru_count(&binder_alloc_lru);
  802. return ret;
  803. }
  804. static unsigned long
  805. binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  806. {
  807. unsigned long ret;
  808. ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
  809. NULL, sc->nr_to_scan);
  810. return ret;
  811. }
  812. struct shrinker binder_shrinker = {
  813. .count_objects = binder_shrink_count,
  814. .scan_objects = binder_shrink_scan,
  815. .seeks = DEFAULT_SEEKS,
  816. };
  817. /**
  818. * binder_alloc_init() - called by binder_open() for per-proc initialization
  819. * @alloc: binder_alloc for this proc
  820. *
  821. * Called from binder_open() to initialize binder_alloc fields for
  822. * new binder proc
  823. */
  824. void binder_alloc_init(struct binder_alloc *alloc)
  825. {
  826. alloc->tsk = current->group_leader;
  827. alloc->pid = current->group_leader->pid;
  828. mutex_init(&alloc->mutex);
  829. }
  830. void binder_alloc_shrinker_init(void)
  831. {
  832. list_lru_init(&binder_alloc_lru);
  833. register_shrinker(&binder_shrinker);
  834. }