binder_alloc.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011
  1. /* binder_alloc.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2017 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <asm/cacheflush.h>
  19. #include <linux/list.h>
  20. #include <linux/sched/mm.h>
  21. #include <linux/module.h>
  22. #include <linux/rtmutex.h>
  23. #include <linux/rbtree.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/slab.h>
  27. #include <linux/sched.h>
  28. #include <linux/list_lru.h>
  29. #include "binder_alloc.h"
  30. #include "binder_trace.h"
  31. struct list_lru binder_alloc_lru;
  32. static DEFINE_MUTEX(binder_alloc_mmap_lock);
  33. enum {
  34. BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
  35. BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
  36. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
  37. };
  38. static uint32_t binder_alloc_debug_mask;
  39. module_param_named(debug_mask, binder_alloc_debug_mask,
  40. uint, 0644);
  41. #define binder_alloc_debug(mask, x...) \
  42. do { \
  43. if (binder_alloc_debug_mask & mask) \
  44. pr_info(x); \
  45. } while (0)
  46. static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
  47. {
  48. return list_entry(buffer->entry.next, struct binder_buffer, entry);
  49. }
  50. static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
  51. {
  52. return list_entry(buffer->entry.prev, struct binder_buffer, entry);
  53. }
  54. static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  55. struct binder_buffer *buffer)
  56. {
  57. if (list_is_last(&buffer->entry, &alloc->buffers))
  58. return (u8 *)alloc->buffer +
  59. alloc->buffer_size - (u8 *)buffer->data;
  60. return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
  61. }
  62. static void binder_insert_free_buffer(struct binder_alloc *alloc,
  63. struct binder_buffer *new_buffer)
  64. {
  65. struct rb_node **p = &alloc->free_buffers.rb_node;
  66. struct rb_node *parent = NULL;
  67. struct binder_buffer *buffer;
  68. size_t buffer_size;
  69. size_t new_buffer_size;
  70. BUG_ON(!new_buffer->free);
  71. new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  72. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  73. "%d: add free buffer, size %zd, at %pK\n",
  74. alloc->pid, new_buffer_size, new_buffer);
  75. while (*p) {
  76. parent = *p;
  77. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  78. BUG_ON(!buffer->free);
  79. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  80. if (new_buffer_size < buffer_size)
  81. p = &parent->rb_left;
  82. else
  83. p = &parent->rb_right;
  84. }
  85. rb_link_node(&new_buffer->rb_node, parent, p);
  86. rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  87. }
  88. static void binder_insert_allocated_buffer_locked(
  89. struct binder_alloc *alloc, struct binder_buffer *new_buffer)
  90. {
  91. struct rb_node **p = &alloc->allocated_buffers.rb_node;
  92. struct rb_node *parent = NULL;
  93. struct binder_buffer *buffer;
  94. BUG_ON(new_buffer->free);
  95. while (*p) {
  96. parent = *p;
  97. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  98. BUG_ON(buffer->free);
  99. if (new_buffer->data < buffer->data)
  100. p = &parent->rb_left;
  101. else if (new_buffer->data > buffer->data)
  102. p = &parent->rb_right;
  103. else
  104. BUG();
  105. }
  106. rb_link_node(&new_buffer->rb_node, parent, p);
  107. rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
  108. }
  109. static struct binder_buffer *binder_alloc_prepare_to_free_locked(
  110. struct binder_alloc *alloc,
  111. uintptr_t user_ptr)
  112. {
  113. struct rb_node *n = alloc->allocated_buffers.rb_node;
  114. struct binder_buffer *buffer;
  115. void *kern_ptr;
  116. kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
  117. while (n) {
  118. buffer = rb_entry(n, struct binder_buffer, rb_node);
  119. BUG_ON(buffer->free);
  120. if (kern_ptr < buffer->data)
  121. n = n->rb_left;
  122. else if (kern_ptr > buffer->data)
  123. n = n->rb_right;
  124. else {
  125. /*
  126. * Guard against user threads attempting to
  127. * free the buffer twice
  128. */
  129. if (buffer->free_in_progress) {
  130. pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
  131. alloc->pid, current->pid, (u64)user_ptr);
  132. return NULL;
  133. }
  134. buffer->free_in_progress = 1;
  135. return buffer;
  136. }
  137. }
  138. return NULL;
  139. }
  140. /**
  141. * binder_alloc_buffer_lookup() - get buffer given user ptr
  142. * @alloc: binder_alloc for this proc
  143. * @user_ptr: User pointer to buffer data
  144. *
  145. * Validate userspace pointer to buffer data and return buffer corresponding to
  146. * that user pointer. Search the rb tree for buffer that matches user data
  147. * pointer.
  148. *
  149. * Return: Pointer to buffer or NULL
  150. */
  151. struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
  152. uintptr_t user_ptr)
  153. {
  154. struct binder_buffer *buffer;
  155. mutex_lock(&alloc->mutex);
  156. buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
  157. mutex_unlock(&alloc->mutex);
  158. return buffer;
  159. }
  160. static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
  161. void *start, void *end,
  162. struct vm_area_struct *vma)
  163. {
  164. void *page_addr;
  165. unsigned long user_page_addr;
  166. struct binder_lru_page *page;
  167. struct mm_struct *mm = NULL;
  168. bool need_mm = false;
  169. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  170. "%d: %s pages %pK-%pK\n", alloc->pid,
  171. allocate ? "allocate" : "free", start, end);
  172. if (end <= start)
  173. return 0;
  174. trace_binder_update_page_range(alloc, allocate, start, end);
  175. if (allocate == 0)
  176. goto free_range;
  177. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  178. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  179. if (!page->page_ptr) {
  180. need_mm = true;
  181. break;
  182. }
  183. }
  184. if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm))
  185. mm = alloc->vma_vm_mm;
  186. if (mm) {
  187. down_write(&mm->mmap_sem);
  188. vma = alloc->vma;
  189. }
  190. if (!vma && need_mm) {
  191. pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
  192. alloc->pid);
  193. goto err_no_vma;
  194. }
  195. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  196. int ret;
  197. bool on_lru;
  198. size_t index;
  199. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  200. page = &alloc->pages[index];
  201. if (page->page_ptr) {
  202. trace_binder_alloc_lru_start(alloc, index);
  203. on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
  204. WARN_ON(!on_lru);
  205. trace_binder_alloc_lru_end(alloc, index);
  206. continue;
  207. }
  208. if (WARN_ON(!vma))
  209. goto err_page_ptr_cleared;
  210. trace_binder_alloc_page_start(alloc, index);
  211. page->page_ptr = alloc_page(GFP_KERNEL |
  212. __GFP_HIGHMEM |
  213. __GFP_ZERO);
  214. if (!page->page_ptr) {
  215. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  216. alloc->pid, page_addr);
  217. goto err_alloc_page_failed;
  218. }
  219. page->alloc = alloc;
  220. INIT_LIST_HEAD(&page->lru);
  221. ret = map_kernel_range_noflush((unsigned long)page_addr,
  222. PAGE_SIZE, PAGE_KERNEL,
  223. &page->page_ptr);
  224. flush_cache_vmap((unsigned long)page_addr,
  225. (unsigned long)page_addr + PAGE_SIZE);
  226. if (ret != 1) {
  227. pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
  228. alloc->pid, page_addr);
  229. goto err_map_kernel_failed;
  230. }
  231. user_page_addr =
  232. (uintptr_t)page_addr + alloc->user_buffer_offset;
  233. ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
  234. if (ret) {
  235. pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  236. alloc->pid, user_page_addr);
  237. goto err_vm_insert_page_failed;
  238. }
  239. trace_binder_alloc_page_end(alloc, index);
  240. /* vm_insert_page does not seem to increment the refcount */
  241. }
  242. if (mm) {
  243. up_write(&mm->mmap_sem);
  244. mmput(mm);
  245. }
  246. return 0;
  247. free_range:
  248. for (page_addr = end - PAGE_SIZE; page_addr >= start;
  249. page_addr -= PAGE_SIZE) {
  250. bool ret;
  251. size_t index;
  252. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  253. page = &alloc->pages[index];
  254. trace_binder_free_lru_start(alloc, index);
  255. ret = list_lru_add(&binder_alloc_lru, &page->lru);
  256. WARN_ON(!ret);
  257. trace_binder_free_lru_end(alloc, index);
  258. continue;
  259. err_vm_insert_page_failed:
  260. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  261. err_map_kernel_failed:
  262. __free_page(page->page_ptr);
  263. page->page_ptr = NULL;
  264. err_alloc_page_failed:
  265. err_page_ptr_cleared:
  266. ;
  267. }
  268. err_no_vma:
  269. if (mm) {
  270. up_write(&mm->mmap_sem);
  271. mmput(mm);
  272. }
  273. return vma ? -ENOMEM : -ESRCH;
  274. }
  275. struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
  276. size_t data_size,
  277. size_t offsets_size,
  278. size_t extra_buffers_size,
  279. int is_async)
  280. {
  281. struct rb_node *n = alloc->free_buffers.rb_node;
  282. struct binder_buffer *buffer;
  283. size_t buffer_size;
  284. struct rb_node *best_fit = NULL;
  285. void *has_page_addr;
  286. void *end_page_addr;
  287. size_t size, data_offsets_size;
  288. int ret;
  289. if (alloc->vma == NULL) {
  290. pr_err("%d: binder_alloc_buf, no vma\n",
  291. alloc->pid);
  292. return ERR_PTR(-ESRCH);
  293. }
  294. data_offsets_size = ALIGN(data_size, sizeof(void *)) +
  295. ALIGN(offsets_size, sizeof(void *));
  296. if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
  297. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  298. "%d: got transaction with invalid size %zd-%zd\n",
  299. alloc->pid, data_size, offsets_size);
  300. return ERR_PTR(-EINVAL);
  301. }
  302. size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
  303. if (size < data_offsets_size || size < extra_buffers_size) {
  304. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  305. "%d: got transaction with invalid extra_buffers_size %zd\n",
  306. alloc->pid, extra_buffers_size);
  307. return ERR_PTR(-EINVAL);
  308. }
  309. if (is_async &&
  310. alloc->free_async_space < size + sizeof(struct binder_buffer)) {
  311. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  312. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  313. alloc->pid, size);
  314. return ERR_PTR(-ENOSPC);
  315. }
  316. /* Pad 0-size buffers so they get assigned unique addresses */
  317. size = max(size, sizeof(void *));
  318. while (n) {
  319. buffer = rb_entry(n, struct binder_buffer, rb_node);
  320. BUG_ON(!buffer->free);
  321. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  322. if (size < buffer_size) {
  323. best_fit = n;
  324. n = n->rb_left;
  325. } else if (size > buffer_size)
  326. n = n->rb_right;
  327. else {
  328. best_fit = n;
  329. break;
  330. }
  331. }
  332. if (best_fit == NULL) {
  333. size_t allocated_buffers = 0;
  334. size_t largest_alloc_size = 0;
  335. size_t total_alloc_size = 0;
  336. size_t free_buffers = 0;
  337. size_t largest_free_size = 0;
  338. size_t total_free_size = 0;
  339. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  340. n = rb_next(n)) {
  341. buffer = rb_entry(n, struct binder_buffer, rb_node);
  342. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  343. allocated_buffers++;
  344. total_alloc_size += buffer_size;
  345. if (buffer_size > largest_alloc_size)
  346. largest_alloc_size = buffer_size;
  347. }
  348. for (n = rb_first(&alloc->free_buffers); n != NULL;
  349. n = rb_next(n)) {
  350. buffer = rb_entry(n, struct binder_buffer, rb_node);
  351. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  352. free_buffers++;
  353. total_free_size += buffer_size;
  354. if (buffer_size > largest_free_size)
  355. largest_free_size = buffer_size;
  356. }
  357. pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
  358. alloc->pid, size);
  359. pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
  360. total_alloc_size, allocated_buffers, largest_alloc_size,
  361. total_free_size, free_buffers, largest_free_size);
  362. return ERR_PTR(-ENOSPC);
  363. }
  364. if (n == NULL) {
  365. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  366. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  367. }
  368. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  369. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  370. alloc->pid, size, buffer, buffer_size);
  371. has_page_addr =
  372. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
  373. WARN_ON(n && buffer_size != size);
  374. end_page_addr =
  375. (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
  376. if (end_page_addr > has_page_addr)
  377. end_page_addr = has_page_addr;
  378. ret = binder_update_page_range(alloc, 1,
  379. (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
  380. if (ret)
  381. return ERR_PTR(ret);
  382. if (buffer_size != size) {
  383. struct binder_buffer *new_buffer;
  384. new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  385. if (!new_buffer) {
  386. pr_err("%s: %d failed to alloc new buffer struct\n",
  387. __func__, alloc->pid);
  388. goto err_alloc_buf_struct_failed;
  389. }
  390. new_buffer->data = (u8 *)buffer->data + size;
  391. list_add(&new_buffer->entry, &buffer->entry);
  392. new_buffer->free = 1;
  393. binder_insert_free_buffer(alloc, new_buffer);
  394. }
  395. rb_erase(best_fit, &alloc->free_buffers);
  396. buffer->free = 0;
  397. buffer->free_in_progress = 0;
  398. binder_insert_allocated_buffer_locked(alloc, buffer);
  399. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  400. "%d: binder_alloc_buf size %zd got %pK\n",
  401. alloc->pid, size, buffer);
  402. buffer->data_size = data_size;
  403. buffer->offsets_size = offsets_size;
  404. buffer->async_transaction = is_async;
  405. buffer->extra_buffers_size = extra_buffers_size;
  406. if (is_async) {
  407. alloc->free_async_space -= size + sizeof(struct binder_buffer);
  408. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  409. "%d: binder_alloc_buf size %zd async free %zd\n",
  410. alloc->pid, size, alloc->free_async_space);
  411. }
  412. return buffer;
  413. err_alloc_buf_struct_failed:
  414. binder_update_page_range(alloc, 0,
  415. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  416. end_page_addr, NULL);
  417. return ERR_PTR(-ENOMEM);
  418. }
  419. /**
  420. * binder_alloc_new_buf() - Allocate a new binder buffer
  421. * @alloc: binder_alloc for this proc
  422. * @data_size: size of user data buffer
  423. * @offsets_size: user specified buffer offset
  424. * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  425. * @is_async: buffer for async transaction
  426. *
  427. * Allocate a new buffer given the requested sizes. Returns
  428. * the kernel version of the buffer pointer. The size allocated
  429. * is the sum of the three given sizes (each rounded up to
  430. * pointer-sized boundary)
  431. *
  432. * Return: The allocated buffer or %NULL if error
  433. */
  434. struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
  435. size_t data_size,
  436. size_t offsets_size,
  437. size_t extra_buffers_size,
  438. int is_async)
  439. {
  440. struct binder_buffer *buffer;
  441. mutex_lock(&alloc->mutex);
  442. buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
  443. extra_buffers_size, is_async);
  444. mutex_unlock(&alloc->mutex);
  445. return buffer;
  446. }
  447. static void *buffer_start_page(struct binder_buffer *buffer)
  448. {
  449. return (void *)((uintptr_t)buffer->data & PAGE_MASK);
  450. }
  451. static void *prev_buffer_end_page(struct binder_buffer *buffer)
  452. {
  453. return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
  454. }
  455. static void binder_delete_free_buffer(struct binder_alloc *alloc,
  456. struct binder_buffer *buffer)
  457. {
  458. struct binder_buffer *prev, *next = NULL;
  459. bool to_free = true;
  460. BUG_ON(alloc->buffers.next == &buffer->entry);
  461. prev = binder_buffer_prev(buffer);
  462. BUG_ON(!prev->free);
  463. if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
  464. to_free = false;
  465. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  466. "%d: merge free, buffer %pK share page with %pK\n",
  467. alloc->pid, buffer->data, prev->data);
  468. }
  469. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  470. next = binder_buffer_next(buffer);
  471. if (buffer_start_page(next) == buffer_start_page(buffer)) {
  472. to_free = false;
  473. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  474. "%d: merge free, buffer %pK share page with %pK\n",
  475. alloc->pid,
  476. buffer->data,
  477. next->data);
  478. }
  479. }
  480. if (PAGE_ALIGNED(buffer->data)) {
  481. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  482. "%d: merge free, buffer start %pK is page aligned\n",
  483. alloc->pid, buffer->data);
  484. to_free = false;
  485. }
  486. if (to_free) {
  487. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  488. "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
  489. alloc->pid, buffer->data,
  490. prev->data, next ? next->data : NULL);
  491. binder_update_page_range(alloc, 0, buffer_start_page(buffer),
  492. buffer_start_page(buffer) + PAGE_SIZE,
  493. NULL);
  494. }
  495. list_del(&buffer->entry);
  496. kfree(buffer);
  497. }
  498. static void binder_free_buf_locked(struct binder_alloc *alloc,
  499. struct binder_buffer *buffer)
  500. {
  501. size_t size, buffer_size;
  502. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  503. size = ALIGN(buffer->data_size, sizeof(void *)) +
  504. ALIGN(buffer->offsets_size, sizeof(void *)) +
  505. ALIGN(buffer->extra_buffers_size, sizeof(void *));
  506. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  507. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  508. alloc->pid, buffer, size, buffer_size);
  509. BUG_ON(buffer->free);
  510. BUG_ON(size > buffer_size);
  511. BUG_ON(buffer->transaction != NULL);
  512. BUG_ON(buffer->data < alloc->buffer);
  513. BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
  514. if (buffer->async_transaction) {
  515. alloc->free_async_space += size + sizeof(struct binder_buffer);
  516. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  517. "%d: binder_free_buf size %zd async free %zd\n",
  518. alloc->pid, size, alloc->free_async_space);
  519. }
  520. binder_update_page_range(alloc, 0,
  521. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  522. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
  523. NULL);
  524. rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
  525. buffer->free = 1;
  526. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  527. struct binder_buffer *next = binder_buffer_next(buffer);
  528. if (next->free) {
  529. rb_erase(&next->rb_node, &alloc->free_buffers);
  530. binder_delete_free_buffer(alloc, next);
  531. }
  532. }
  533. if (alloc->buffers.next != &buffer->entry) {
  534. struct binder_buffer *prev = binder_buffer_prev(buffer);
  535. if (prev->free) {
  536. binder_delete_free_buffer(alloc, buffer);
  537. rb_erase(&prev->rb_node, &alloc->free_buffers);
  538. buffer = prev;
  539. }
  540. }
  541. binder_insert_free_buffer(alloc, buffer);
  542. }
  543. /**
  544. * binder_alloc_free_buf() - free a binder buffer
  545. * @alloc: binder_alloc for this proc
  546. * @buffer: kernel pointer to buffer
  547. *
  548. * Free the buffer allocated via binder_alloc_new_buffer()
  549. */
  550. void binder_alloc_free_buf(struct binder_alloc *alloc,
  551. struct binder_buffer *buffer)
  552. {
  553. mutex_lock(&alloc->mutex);
  554. binder_free_buf_locked(alloc, buffer);
  555. mutex_unlock(&alloc->mutex);
  556. }
  557. /**
  558. * binder_alloc_mmap_handler() - map virtual address space for proc
  559. * @alloc: alloc structure for this proc
  560. * @vma: vma passed to mmap()
  561. *
  562. * Called by binder_mmap() to initialize the space specified in
  563. * vma for allocating binder buffers
  564. *
  565. * Return:
  566. * 0 = success
  567. * -EBUSY = address space already mapped
  568. * -ENOMEM = failed to map memory to given address space
  569. */
  570. int binder_alloc_mmap_handler(struct binder_alloc *alloc,
  571. struct vm_area_struct *vma)
  572. {
  573. int ret;
  574. struct vm_struct *area;
  575. const char *failure_string;
  576. struct binder_buffer *buffer;
  577. mutex_lock(&binder_alloc_mmap_lock);
  578. if (alloc->buffer) {
  579. ret = -EBUSY;
  580. failure_string = "already mapped";
  581. goto err_already_mapped;
  582. }
  583. area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
  584. if (area == NULL) {
  585. ret = -ENOMEM;
  586. failure_string = "get_vm_area";
  587. goto err_get_vm_area_failed;
  588. }
  589. alloc->buffer = area->addr;
  590. alloc->user_buffer_offset =
  591. vma->vm_start - (uintptr_t)alloc->buffer;
  592. mutex_unlock(&binder_alloc_mmap_lock);
  593. #ifdef CONFIG_CPU_CACHE_VIPT
  594. if (cache_is_vipt_aliasing()) {
  595. while (CACHE_COLOUR(
  596. (vma->vm_start ^ (uint32_t)alloc->buffer))) {
  597. pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
  598. __func__, alloc->pid, vma->vm_start,
  599. vma->vm_end, alloc->buffer);
  600. vma->vm_start += PAGE_SIZE;
  601. }
  602. }
  603. #endif
  604. alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
  605. ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
  606. GFP_KERNEL);
  607. if (alloc->pages == NULL) {
  608. ret = -ENOMEM;
  609. failure_string = "alloc page array";
  610. goto err_alloc_pages_failed;
  611. }
  612. alloc->buffer_size = vma->vm_end - vma->vm_start;
  613. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  614. if (!buffer) {
  615. ret = -ENOMEM;
  616. failure_string = "alloc buffer struct";
  617. goto err_alloc_buf_struct_failed;
  618. }
  619. buffer->data = alloc->buffer;
  620. list_add(&buffer->entry, &alloc->buffers);
  621. buffer->free = 1;
  622. binder_insert_free_buffer(alloc, buffer);
  623. alloc->free_async_space = alloc->buffer_size / 2;
  624. barrier();
  625. alloc->vma = vma;
  626. alloc->vma_vm_mm = vma->vm_mm;
  627. mmgrab(alloc->vma_vm_mm);
  628. return 0;
  629. err_alloc_buf_struct_failed:
  630. kfree(alloc->pages);
  631. alloc->pages = NULL;
  632. err_alloc_pages_failed:
  633. mutex_lock(&binder_alloc_mmap_lock);
  634. vfree(alloc->buffer);
  635. alloc->buffer = NULL;
  636. err_get_vm_area_failed:
  637. err_already_mapped:
  638. mutex_unlock(&binder_alloc_mmap_lock);
  639. pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
  640. alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
  641. return ret;
  642. }
  643. void binder_alloc_deferred_release(struct binder_alloc *alloc)
  644. {
  645. struct rb_node *n;
  646. int buffers, page_count;
  647. struct binder_buffer *buffer;
  648. BUG_ON(alloc->vma);
  649. buffers = 0;
  650. mutex_lock(&alloc->mutex);
  651. while ((n = rb_first(&alloc->allocated_buffers))) {
  652. buffer = rb_entry(n, struct binder_buffer, rb_node);
  653. /* Transaction should already have been freed */
  654. BUG_ON(buffer->transaction);
  655. binder_free_buf_locked(alloc, buffer);
  656. buffers++;
  657. }
  658. while (!list_empty(&alloc->buffers)) {
  659. buffer = list_first_entry(&alloc->buffers,
  660. struct binder_buffer, entry);
  661. WARN_ON(!buffer->free);
  662. list_del(&buffer->entry);
  663. WARN_ON_ONCE(!list_empty(&alloc->buffers));
  664. kfree(buffer);
  665. }
  666. page_count = 0;
  667. if (alloc->pages) {
  668. int i;
  669. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  670. void *page_addr;
  671. bool on_lru;
  672. if (!alloc->pages[i].page_ptr)
  673. continue;
  674. on_lru = list_lru_del(&binder_alloc_lru,
  675. &alloc->pages[i].lru);
  676. page_addr = alloc->buffer + i * PAGE_SIZE;
  677. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  678. "%s: %d: page %d at %pK %s\n",
  679. __func__, alloc->pid, i, page_addr,
  680. on_lru ? "on lru" : "active");
  681. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  682. __free_page(alloc->pages[i].page_ptr);
  683. page_count++;
  684. }
  685. kfree(alloc->pages);
  686. vfree(alloc->buffer);
  687. }
  688. mutex_unlock(&alloc->mutex);
  689. if (alloc->vma_vm_mm)
  690. mmdrop(alloc->vma_vm_mm);
  691. binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
  692. "%s: %d buffers %d, pages %d\n",
  693. __func__, alloc->pid, buffers, page_count);
  694. }
  695. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  696. struct binder_buffer *buffer)
  697. {
  698. seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
  699. prefix, buffer->debug_id, buffer->data,
  700. buffer->data_size, buffer->offsets_size,
  701. buffer->extra_buffers_size,
  702. buffer->transaction ? "active" : "delivered");
  703. }
  704. /**
  705. * binder_alloc_print_allocated() - print buffer info
  706. * @m: seq_file for output via seq_printf()
  707. * @alloc: binder_alloc for this proc
  708. *
  709. * Prints information about every buffer associated with
  710. * the binder_alloc state to the given seq_file
  711. */
  712. void binder_alloc_print_allocated(struct seq_file *m,
  713. struct binder_alloc *alloc)
  714. {
  715. struct rb_node *n;
  716. mutex_lock(&alloc->mutex);
  717. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  718. print_binder_buffer(m, " buffer",
  719. rb_entry(n, struct binder_buffer, rb_node));
  720. mutex_unlock(&alloc->mutex);
  721. }
  722. /**
  723. * binder_alloc_print_pages() - print page usage
  724. * @m: seq_file for output via seq_printf()
  725. * @alloc: binder_alloc for this proc
  726. */
  727. void binder_alloc_print_pages(struct seq_file *m,
  728. struct binder_alloc *alloc)
  729. {
  730. struct binder_lru_page *page;
  731. int i;
  732. int active = 0;
  733. int lru = 0;
  734. int free = 0;
  735. mutex_lock(&alloc->mutex);
  736. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  737. page = &alloc->pages[i];
  738. if (!page->page_ptr)
  739. free++;
  740. else if (list_empty(&page->lru))
  741. active++;
  742. else
  743. lru++;
  744. }
  745. mutex_unlock(&alloc->mutex);
  746. seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
  747. }
  748. /**
  749. * binder_alloc_get_allocated_count() - return count of buffers
  750. * @alloc: binder_alloc for this proc
  751. *
  752. * Return: count of allocated buffers
  753. */
  754. int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  755. {
  756. struct rb_node *n;
  757. int count = 0;
  758. mutex_lock(&alloc->mutex);
  759. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  760. count++;
  761. mutex_unlock(&alloc->mutex);
  762. return count;
  763. }
  764. /**
  765. * binder_alloc_vma_close() - invalidate address space
  766. * @alloc: binder_alloc for this proc
  767. *
  768. * Called from binder_vma_close() when releasing address space.
  769. * Clears alloc->vma to prevent new incoming transactions from
  770. * allocating more buffers.
  771. */
  772. void binder_alloc_vma_close(struct binder_alloc *alloc)
  773. {
  774. WRITE_ONCE(alloc->vma, NULL);
  775. }
  776. /**
  777. * binder_alloc_free_page() - shrinker callback to free pages
  778. * @item: item to free
  779. * @lock: lock protecting the item
  780. * @cb_arg: callback argument
  781. *
  782. * Called from list_lru_walk() in binder_shrink_scan() to free
  783. * up pages when the system is under memory pressure.
  784. */
  785. enum lru_status binder_alloc_free_page(struct list_head *item,
  786. struct list_lru_one *lru,
  787. spinlock_t *lock,
  788. void *cb_arg)
  789. {
  790. struct mm_struct *mm = NULL;
  791. struct binder_lru_page *page = container_of(item,
  792. struct binder_lru_page,
  793. lru);
  794. struct binder_alloc *alloc;
  795. uintptr_t page_addr;
  796. size_t index;
  797. struct vm_area_struct *vma;
  798. alloc = page->alloc;
  799. if (!mutex_trylock(&alloc->mutex))
  800. goto err_get_alloc_mutex_failed;
  801. if (!page->page_ptr)
  802. goto err_page_already_freed;
  803. index = page - alloc->pages;
  804. page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
  805. vma = alloc->vma;
  806. if (vma) {
  807. if (!mmget_not_zero(alloc->vma_vm_mm))
  808. goto err_mmget;
  809. mm = alloc->vma_vm_mm;
  810. if (!down_write_trylock(&mm->mmap_sem))
  811. goto err_down_write_mmap_sem_failed;
  812. }
  813. list_lru_isolate(lru, item);
  814. spin_unlock(lock);
  815. if (vma) {
  816. trace_binder_unmap_user_start(alloc, index);
  817. zap_page_range(vma,
  818. page_addr + alloc->user_buffer_offset,
  819. PAGE_SIZE);
  820. trace_binder_unmap_user_end(alloc, index);
  821. up_write(&mm->mmap_sem);
  822. mmput(mm);
  823. }
  824. trace_binder_unmap_kernel_start(alloc, index);
  825. unmap_kernel_range(page_addr, PAGE_SIZE);
  826. __free_page(page->page_ptr);
  827. page->page_ptr = NULL;
  828. trace_binder_unmap_kernel_end(alloc, index);
  829. spin_lock(lock);
  830. mutex_unlock(&alloc->mutex);
  831. return LRU_REMOVED_RETRY;
  832. err_down_write_mmap_sem_failed:
  833. mmput_async(mm);
  834. err_mmget:
  835. err_page_already_freed:
  836. mutex_unlock(&alloc->mutex);
  837. err_get_alloc_mutex_failed:
  838. return LRU_SKIP;
  839. }
  840. static unsigned long
  841. binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  842. {
  843. unsigned long ret = list_lru_count(&binder_alloc_lru);
  844. return ret;
  845. }
  846. static unsigned long
  847. binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  848. {
  849. unsigned long ret;
  850. ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
  851. NULL, sc->nr_to_scan);
  852. return ret;
  853. }
  854. struct shrinker binder_shrinker = {
  855. .count_objects = binder_shrink_count,
  856. .scan_objects = binder_shrink_scan,
  857. .seeks = DEFAULT_SEEKS,
  858. };
  859. /**
  860. * binder_alloc_init() - called by binder_open() for per-proc initialization
  861. * @alloc: binder_alloc for this proc
  862. *
  863. * Called from binder_open() to initialize binder_alloc fields for
  864. * new binder proc
  865. */
  866. void binder_alloc_init(struct binder_alloc *alloc)
  867. {
  868. alloc->pid = current->group_leader->pid;
  869. mutex_init(&alloc->mutex);
  870. INIT_LIST_HEAD(&alloc->buffers);
  871. }
  872. void binder_alloc_shrinker_init(void)
  873. {
  874. list_lru_init(&binder_alloc_lru);
  875. register_shrinker(&binder_shrinker);
  876. }