binder_alloc.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020
  1. /* binder_alloc.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2017 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <asm/cacheflush.h>
  19. #include <linux/list.h>
  20. #include <linux/sched/mm.h>
  21. #include <linux/module.h>
  22. #include <linux/rtmutex.h>
  23. #include <linux/rbtree.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/slab.h>
  27. #include <linux/sched.h>
  28. #include <linux/list_lru.h>
  29. #include "binder_alloc.h"
  30. #include "binder_trace.h"
  31. struct list_lru binder_alloc_lru;
  32. static DEFINE_MUTEX(binder_alloc_mmap_lock);
  33. enum {
  34. BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
  35. BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
  36. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
  37. };
  38. static uint32_t binder_alloc_debug_mask;
  39. module_param_named(debug_mask, binder_alloc_debug_mask,
  40. uint, 0644);
  41. #define binder_alloc_debug(mask, x...) \
  42. do { \
  43. if (binder_alloc_debug_mask & mask) \
  44. pr_info(x); \
  45. } while (0)
  46. static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
  47. {
  48. return list_entry(buffer->entry.next, struct binder_buffer, entry);
  49. }
  50. static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
  51. {
  52. return list_entry(buffer->entry.prev, struct binder_buffer, entry);
  53. }
  54. static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  55. struct binder_buffer *buffer)
  56. {
  57. if (list_is_last(&buffer->entry, &alloc->buffers))
  58. return (u8 *)alloc->buffer +
  59. alloc->buffer_size - (u8 *)buffer->data;
  60. return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
  61. }
  62. static void binder_insert_free_buffer(struct binder_alloc *alloc,
  63. struct binder_buffer *new_buffer)
  64. {
  65. struct rb_node **p = &alloc->free_buffers.rb_node;
  66. struct rb_node *parent = NULL;
  67. struct binder_buffer *buffer;
  68. size_t buffer_size;
  69. size_t new_buffer_size;
  70. BUG_ON(!new_buffer->free);
  71. new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  72. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  73. "%d: add free buffer, size %zd, at %pK\n",
  74. alloc->pid, new_buffer_size, new_buffer);
  75. while (*p) {
  76. parent = *p;
  77. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  78. BUG_ON(!buffer->free);
  79. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  80. if (new_buffer_size < buffer_size)
  81. p = &parent->rb_left;
  82. else
  83. p = &parent->rb_right;
  84. }
  85. rb_link_node(&new_buffer->rb_node, parent, p);
  86. rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  87. }
  88. static void binder_insert_allocated_buffer_locked(
  89. struct binder_alloc *alloc, struct binder_buffer *new_buffer)
  90. {
  91. struct rb_node **p = &alloc->allocated_buffers.rb_node;
  92. struct rb_node *parent = NULL;
  93. struct binder_buffer *buffer;
  94. BUG_ON(new_buffer->free);
  95. while (*p) {
  96. parent = *p;
  97. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  98. BUG_ON(buffer->free);
  99. if (new_buffer->data < buffer->data)
  100. p = &parent->rb_left;
  101. else if (new_buffer->data > buffer->data)
  102. p = &parent->rb_right;
  103. else
  104. BUG();
  105. }
  106. rb_link_node(&new_buffer->rb_node, parent, p);
  107. rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
  108. }
  109. static struct binder_buffer *binder_alloc_prepare_to_free_locked(
  110. struct binder_alloc *alloc,
  111. uintptr_t user_ptr)
  112. {
  113. struct rb_node *n = alloc->allocated_buffers.rb_node;
  114. struct binder_buffer *buffer;
  115. void *kern_ptr;
  116. kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
  117. while (n) {
  118. buffer = rb_entry(n, struct binder_buffer, rb_node);
  119. BUG_ON(buffer->free);
  120. if (kern_ptr < buffer->data)
  121. n = n->rb_left;
  122. else if (kern_ptr > buffer->data)
  123. n = n->rb_right;
  124. else {
  125. /*
  126. * Guard against user threads attempting to
  127. * free the buffer twice
  128. */
  129. if (buffer->free_in_progress) {
  130. pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
  131. alloc->pid, current->pid, (u64)user_ptr);
  132. return NULL;
  133. }
  134. buffer->free_in_progress = 1;
  135. return buffer;
  136. }
  137. }
  138. return NULL;
  139. }
  140. /**
  141. * binder_alloc_buffer_lookup() - get buffer given user ptr
  142. * @alloc: binder_alloc for this proc
  143. * @user_ptr: User pointer to buffer data
  144. *
  145. * Validate userspace pointer to buffer data and return buffer corresponding to
  146. * that user pointer. Search the rb tree for buffer that matches user data
  147. * pointer.
  148. *
  149. * Return: Pointer to buffer or NULL
  150. */
  151. struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
  152. uintptr_t user_ptr)
  153. {
  154. struct binder_buffer *buffer;
  155. mutex_lock(&alloc->mutex);
  156. buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
  157. mutex_unlock(&alloc->mutex);
  158. return buffer;
  159. }
  160. static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
  161. void *start, void *end)
  162. {
  163. void *page_addr;
  164. unsigned long user_page_addr;
  165. struct binder_lru_page *page;
  166. struct vm_area_struct *vma = NULL;
  167. struct mm_struct *mm = NULL;
  168. bool need_mm = false;
  169. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  170. "%d: %s pages %pK-%pK\n", alloc->pid,
  171. allocate ? "allocate" : "free", start, end);
  172. if (end <= start)
  173. return 0;
  174. trace_binder_update_page_range(alloc, allocate, start, end);
  175. if (allocate == 0)
  176. goto free_range;
  177. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  178. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  179. if (!page->page_ptr) {
  180. need_mm = true;
  181. break;
  182. }
  183. }
  184. if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
  185. mm = alloc->vma_vm_mm;
  186. if (mm) {
  187. down_read(&mm->mmap_sem);
  188. vma = alloc->vma;
  189. }
  190. if (!vma && need_mm) {
  191. pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
  192. alloc->pid);
  193. goto err_no_vma;
  194. }
  195. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  196. int ret;
  197. bool on_lru;
  198. size_t index;
  199. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  200. page = &alloc->pages[index];
  201. if (page->page_ptr) {
  202. trace_binder_alloc_lru_start(alloc, index);
  203. on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
  204. WARN_ON(!on_lru);
  205. trace_binder_alloc_lru_end(alloc, index);
  206. continue;
  207. }
  208. if (WARN_ON(!vma))
  209. goto err_page_ptr_cleared;
  210. trace_binder_alloc_page_start(alloc, index);
  211. page->page_ptr = alloc_page(GFP_KERNEL |
  212. __GFP_HIGHMEM |
  213. __GFP_ZERO);
  214. if (!page->page_ptr) {
  215. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  216. alloc->pid, page_addr);
  217. goto err_alloc_page_failed;
  218. }
  219. page->alloc = alloc;
  220. INIT_LIST_HEAD(&page->lru);
  221. ret = map_kernel_range_noflush((unsigned long)page_addr,
  222. PAGE_SIZE, PAGE_KERNEL,
  223. &page->page_ptr);
  224. flush_cache_vmap((unsigned long)page_addr,
  225. (unsigned long)page_addr + PAGE_SIZE);
  226. if (ret != 1) {
  227. pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
  228. alloc->pid, page_addr);
  229. goto err_map_kernel_failed;
  230. }
  231. user_page_addr =
  232. (uintptr_t)page_addr + alloc->user_buffer_offset;
  233. ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
  234. if (ret) {
  235. pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  236. alloc->pid, user_page_addr);
  237. goto err_vm_insert_page_failed;
  238. }
  239. if (index + 1 > alloc->pages_high)
  240. alloc->pages_high = index + 1;
  241. trace_binder_alloc_page_end(alloc, index);
  242. /* vm_insert_page does not seem to increment the refcount */
  243. }
  244. if (mm) {
  245. up_read(&mm->mmap_sem);
  246. mmput(mm);
  247. }
  248. return 0;
  249. free_range:
  250. for (page_addr = end - PAGE_SIZE; page_addr >= start;
  251. page_addr -= PAGE_SIZE) {
  252. bool ret;
  253. size_t index;
  254. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  255. page = &alloc->pages[index];
  256. trace_binder_free_lru_start(alloc, index);
  257. ret = list_lru_add(&binder_alloc_lru, &page->lru);
  258. WARN_ON(!ret);
  259. trace_binder_free_lru_end(alloc, index);
  260. continue;
  261. err_vm_insert_page_failed:
  262. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  263. err_map_kernel_failed:
  264. __free_page(page->page_ptr);
  265. page->page_ptr = NULL;
  266. err_alloc_page_failed:
  267. err_page_ptr_cleared:
  268. ;
  269. }
  270. err_no_vma:
  271. if (mm) {
  272. up_read(&mm->mmap_sem);
  273. mmput(mm);
  274. }
  275. return vma ? -ENOMEM : -ESRCH;
  276. }
  277. static struct binder_buffer *binder_alloc_new_buf_locked(
  278. struct binder_alloc *alloc,
  279. size_t data_size,
  280. size_t offsets_size,
  281. size_t extra_buffers_size,
  282. int is_async)
  283. {
  284. struct rb_node *n = alloc->free_buffers.rb_node;
  285. struct binder_buffer *buffer;
  286. size_t buffer_size;
  287. struct rb_node *best_fit = NULL;
  288. void *has_page_addr;
  289. void *end_page_addr;
  290. size_t size, data_offsets_size;
  291. int ret;
  292. if (alloc->vma == NULL) {
  293. pr_err("%d: binder_alloc_buf, no vma\n",
  294. alloc->pid);
  295. return ERR_PTR(-ESRCH);
  296. }
  297. data_offsets_size = ALIGN(data_size, sizeof(void *)) +
  298. ALIGN(offsets_size, sizeof(void *));
  299. if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
  300. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  301. "%d: got transaction with invalid size %zd-%zd\n",
  302. alloc->pid, data_size, offsets_size);
  303. return ERR_PTR(-EINVAL);
  304. }
  305. size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
  306. if (size < data_offsets_size || size < extra_buffers_size) {
  307. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  308. "%d: got transaction with invalid extra_buffers_size %zd\n",
  309. alloc->pid, extra_buffers_size);
  310. return ERR_PTR(-EINVAL);
  311. }
  312. if (is_async &&
  313. alloc->free_async_space < size + sizeof(struct binder_buffer)) {
  314. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  315. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  316. alloc->pid, size);
  317. return ERR_PTR(-ENOSPC);
  318. }
  319. /* Pad 0-size buffers so they get assigned unique addresses */
  320. size = max(size, sizeof(void *));
  321. while (n) {
  322. buffer = rb_entry(n, struct binder_buffer, rb_node);
  323. BUG_ON(!buffer->free);
  324. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  325. if (size < buffer_size) {
  326. best_fit = n;
  327. n = n->rb_left;
  328. } else if (size > buffer_size)
  329. n = n->rb_right;
  330. else {
  331. best_fit = n;
  332. break;
  333. }
  334. }
  335. if (best_fit == NULL) {
  336. size_t allocated_buffers = 0;
  337. size_t largest_alloc_size = 0;
  338. size_t total_alloc_size = 0;
  339. size_t free_buffers = 0;
  340. size_t largest_free_size = 0;
  341. size_t total_free_size = 0;
  342. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  343. n = rb_next(n)) {
  344. buffer = rb_entry(n, struct binder_buffer, rb_node);
  345. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  346. allocated_buffers++;
  347. total_alloc_size += buffer_size;
  348. if (buffer_size > largest_alloc_size)
  349. largest_alloc_size = buffer_size;
  350. }
  351. for (n = rb_first(&alloc->free_buffers); n != NULL;
  352. n = rb_next(n)) {
  353. buffer = rb_entry(n, struct binder_buffer, rb_node);
  354. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  355. free_buffers++;
  356. total_free_size += buffer_size;
  357. if (buffer_size > largest_free_size)
  358. largest_free_size = buffer_size;
  359. }
  360. pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
  361. alloc->pid, size);
  362. pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
  363. total_alloc_size, allocated_buffers, largest_alloc_size,
  364. total_free_size, free_buffers, largest_free_size);
  365. return ERR_PTR(-ENOSPC);
  366. }
  367. if (n == NULL) {
  368. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  369. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  370. }
  371. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  372. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  373. alloc->pid, size, buffer, buffer_size);
  374. has_page_addr =
  375. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
  376. WARN_ON(n && buffer_size != size);
  377. end_page_addr =
  378. (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
  379. if (end_page_addr > has_page_addr)
  380. end_page_addr = has_page_addr;
  381. ret = binder_update_page_range(alloc, 1,
  382. (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
  383. if (ret)
  384. return ERR_PTR(ret);
  385. if (buffer_size != size) {
  386. struct binder_buffer *new_buffer;
  387. new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  388. if (!new_buffer) {
  389. pr_err("%s: %d failed to alloc new buffer struct\n",
  390. __func__, alloc->pid);
  391. goto err_alloc_buf_struct_failed;
  392. }
  393. new_buffer->data = (u8 *)buffer->data + size;
  394. list_add(&new_buffer->entry, &buffer->entry);
  395. new_buffer->free = 1;
  396. binder_insert_free_buffer(alloc, new_buffer);
  397. }
  398. rb_erase(best_fit, &alloc->free_buffers);
  399. buffer->free = 0;
  400. buffer->free_in_progress = 0;
  401. binder_insert_allocated_buffer_locked(alloc, buffer);
  402. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  403. "%d: binder_alloc_buf size %zd got %pK\n",
  404. alloc->pid, size, buffer);
  405. buffer->data_size = data_size;
  406. buffer->offsets_size = offsets_size;
  407. buffer->async_transaction = is_async;
  408. buffer->extra_buffers_size = extra_buffers_size;
  409. if (is_async) {
  410. alloc->free_async_space -= size + sizeof(struct binder_buffer);
  411. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  412. "%d: binder_alloc_buf size %zd async free %zd\n",
  413. alloc->pid, size, alloc->free_async_space);
  414. }
  415. return buffer;
  416. err_alloc_buf_struct_failed:
  417. binder_update_page_range(alloc, 0,
  418. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  419. end_page_addr);
  420. return ERR_PTR(-ENOMEM);
  421. }
  422. /**
  423. * binder_alloc_new_buf() - Allocate a new binder buffer
  424. * @alloc: binder_alloc for this proc
  425. * @data_size: size of user data buffer
  426. * @offsets_size: user specified buffer offset
  427. * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  428. * @is_async: buffer for async transaction
  429. *
  430. * Allocate a new buffer given the requested sizes. Returns
  431. * the kernel version of the buffer pointer. The size allocated
  432. * is the sum of the three given sizes (each rounded up to
  433. * pointer-sized boundary)
  434. *
  435. * Return: The allocated buffer or %NULL if error
  436. */
  437. struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
  438. size_t data_size,
  439. size_t offsets_size,
  440. size_t extra_buffers_size,
  441. int is_async)
  442. {
  443. struct binder_buffer *buffer;
  444. mutex_lock(&alloc->mutex);
  445. buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
  446. extra_buffers_size, is_async);
  447. mutex_unlock(&alloc->mutex);
  448. return buffer;
  449. }
  450. static void *buffer_start_page(struct binder_buffer *buffer)
  451. {
  452. return (void *)((uintptr_t)buffer->data & PAGE_MASK);
  453. }
  454. static void *prev_buffer_end_page(struct binder_buffer *buffer)
  455. {
  456. return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
  457. }
  458. static void binder_delete_free_buffer(struct binder_alloc *alloc,
  459. struct binder_buffer *buffer)
  460. {
  461. struct binder_buffer *prev, *next = NULL;
  462. bool to_free = true;
  463. BUG_ON(alloc->buffers.next == &buffer->entry);
  464. prev = binder_buffer_prev(buffer);
  465. BUG_ON(!prev->free);
  466. if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
  467. to_free = false;
  468. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  469. "%d: merge free, buffer %pK share page with %pK\n",
  470. alloc->pid, buffer->data, prev->data);
  471. }
  472. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  473. next = binder_buffer_next(buffer);
  474. if (buffer_start_page(next) == buffer_start_page(buffer)) {
  475. to_free = false;
  476. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  477. "%d: merge free, buffer %pK share page with %pK\n",
  478. alloc->pid,
  479. buffer->data,
  480. next->data);
  481. }
  482. }
  483. if (PAGE_ALIGNED(buffer->data)) {
  484. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  485. "%d: merge free, buffer start %pK is page aligned\n",
  486. alloc->pid, buffer->data);
  487. to_free = false;
  488. }
  489. if (to_free) {
  490. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  491. "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
  492. alloc->pid, buffer->data,
  493. prev->data, next ? next->data : NULL);
  494. binder_update_page_range(alloc, 0, buffer_start_page(buffer),
  495. buffer_start_page(buffer) + PAGE_SIZE);
  496. }
  497. list_del(&buffer->entry);
  498. kfree(buffer);
  499. }
  500. static void binder_free_buf_locked(struct binder_alloc *alloc,
  501. struct binder_buffer *buffer)
  502. {
  503. size_t size, buffer_size;
  504. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  505. size = ALIGN(buffer->data_size, sizeof(void *)) +
  506. ALIGN(buffer->offsets_size, sizeof(void *)) +
  507. ALIGN(buffer->extra_buffers_size, sizeof(void *));
  508. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  509. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  510. alloc->pid, buffer, size, buffer_size);
  511. BUG_ON(buffer->free);
  512. BUG_ON(size > buffer_size);
  513. BUG_ON(buffer->transaction != NULL);
  514. BUG_ON(buffer->data < alloc->buffer);
  515. BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
  516. if (buffer->async_transaction) {
  517. alloc->free_async_space += size + sizeof(struct binder_buffer);
  518. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  519. "%d: binder_free_buf size %zd async free %zd\n",
  520. alloc->pid, size, alloc->free_async_space);
  521. }
  522. binder_update_page_range(alloc, 0,
  523. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  524. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
  525. rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
  526. buffer->free = 1;
  527. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  528. struct binder_buffer *next = binder_buffer_next(buffer);
  529. if (next->free) {
  530. rb_erase(&next->rb_node, &alloc->free_buffers);
  531. binder_delete_free_buffer(alloc, next);
  532. }
  533. }
  534. if (alloc->buffers.next != &buffer->entry) {
  535. struct binder_buffer *prev = binder_buffer_prev(buffer);
  536. if (prev->free) {
  537. binder_delete_free_buffer(alloc, buffer);
  538. rb_erase(&prev->rb_node, &alloc->free_buffers);
  539. buffer = prev;
  540. }
  541. }
  542. binder_insert_free_buffer(alloc, buffer);
  543. }
  544. /**
  545. * binder_alloc_free_buf() - free a binder buffer
  546. * @alloc: binder_alloc for this proc
  547. * @buffer: kernel pointer to buffer
  548. *
  549. * Free the buffer allocated via binder_alloc_new_buffer()
  550. */
  551. void binder_alloc_free_buf(struct binder_alloc *alloc,
  552. struct binder_buffer *buffer)
  553. {
  554. mutex_lock(&alloc->mutex);
  555. binder_free_buf_locked(alloc, buffer);
  556. mutex_unlock(&alloc->mutex);
  557. }
  558. /**
  559. * binder_alloc_mmap_handler() - map virtual address space for proc
  560. * @alloc: alloc structure for this proc
  561. * @vma: vma passed to mmap()
  562. *
  563. * Called by binder_mmap() to initialize the space specified in
  564. * vma for allocating binder buffers
  565. *
  566. * Return:
  567. * 0 = success
  568. * -EBUSY = address space already mapped
  569. * -ENOMEM = failed to map memory to given address space
  570. */
  571. int binder_alloc_mmap_handler(struct binder_alloc *alloc,
  572. struct vm_area_struct *vma)
  573. {
  574. int ret;
  575. struct vm_struct *area;
  576. const char *failure_string;
  577. struct binder_buffer *buffer;
  578. mutex_lock(&binder_alloc_mmap_lock);
  579. if (alloc->buffer) {
  580. ret = -EBUSY;
  581. failure_string = "already mapped";
  582. goto err_already_mapped;
  583. }
  584. area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
  585. if (area == NULL) {
  586. ret = -ENOMEM;
  587. failure_string = "get_vm_area";
  588. goto err_get_vm_area_failed;
  589. }
  590. alloc->buffer = area->addr;
  591. alloc->user_buffer_offset =
  592. vma->vm_start - (uintptr_t)alloc->buffer;
  593. mutex_unlock(&binder_alloc_mmap_lock);
  594. #ifdef CONFIG_CPU_CACHE_VIPT
  595. if (cache_is_vipt_aliasing()) {
  596. while (CACHE_COLOUR(
  597. (vma->vm_start ^ (uint32_t)alloc->buffer))) {
  598. pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
  599. __func__, alloc->pid, vma->vm_start,
  600. vma->vm_end, alloc->buffer);
  601. vma->vm_start += PAGE_SIZE;
  602. }
  603. }
  604. #endif
  605. alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
  606. sizeof(alloc->pages[0]),
  607. GFP_KERNEL);
  608. if (alloc->pages == NULL) {
  609. ret = -ENOMEM;
  610. failure_string = "alloc page array";
  611. goto err_alloc_pages_failed;
  612. }
  613. alloc->buffer_size = vma->vm_end - vma->vm_start;
  614. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  615. if (!buffer) {
  616. ret = -ENOMEM;
  617. failure_string = "alloc buffer struct";
  618. goto err_alloc_buf_struct_failed;
  619. }
  620. buffer->data = alloc->buffer;
  621. list_add(&buffer->entry, &alloc->buffers);
  622. buffer->free = 1;
  623. binder_insert_free_buffer(alloc, buffer);
  624. alloc->free_async_space = alloc->buffer_size / 2;
  625. barrier();
  626. alloc->vma = vma;
  627. alloc->vma_vm_mm = vma->vm_mm;
  628. mmgrab(alloc->vma_vm_mm);
  629. return 0;
  630. err_alloc_buf_struct_failed:
  631. kfree(alloc->pages);
  632. alloc->pages = NULL;
  633. err_alloc_pages_failed:
  634. mutex_lock(&binder_alloc_mmap_lock);
  635. vfree(alloc->buffer);
  636. alloc->buffer = NULL;
  637. err_get_vm_area_failed:
  638. err_already_mapped:
  639. mutex_unlock(&binder_alloc_mmap_lock);
  640. pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
  641. alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
  642. return ret;
  643. }
  644. void binder_alloc_deferred_release(struct binder_alloc *alloc)
  645. {
  646. struct rb_node *n;
  647. int buffers, page_count;
  648. struct binder_buffer *buffer;
  649. BUG_ON(alloc->vma);
  650. buffers = 0;
  651. mutex_lock(&alloc->mutex);
  652. while ((n = rb_first(&alloc->allocated_buffers))) {
  653. buffer = rb_entry(n, struct binder_buffer, rb_node);
  654. /* Transaction should already have been freed */
  655. BUG_ON(buffer->transaction);
  656. binder_free_buf_locked(alloc, buffer);
  657. buffers++;
  658. }
  659. while (!list_empty(&alloc->buffers)) {
  660. buffer = list_first_entry(&alloc->buffers,
  661. struct binder_buffer, entry);
  662. WARN_ON(!buffer->free);
  663. list_del(&buffer->entry);
  664. WARN_ON_ONCE(!list_empty(&alloc->buffers));
  665. kfree(buffer);
  666. }
  667. page_count = 0;
  668. if (alloc->pages) {
  669. int i;
  670. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  671. void *page_addr;
  672. bool on_lru;
  673. if (!alloc->pages[i].page_ptr)
  674. continue;
  675. on_lru = list_lru_del(&binder_alloc_lru,
  676. &alloc->pages[i].lru);
  677. page_addr = alloc->buffer + i * PAGE_SIZE;
  678. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  679. "%s: %d: page %d at %pK %s\n",
  680. __func__, alloc->pid, i, page_addr,
  681. on_lru ? "on lru" : "active");
  682. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  683. __free_page(alloc->pages[i].page_ptr);
  684. page_count++;
  685. }
  686. kfree(alloc->pages);
  687. vfree(alloc->buffer);
  688. }
  689. mutex_unlock(&alloc->mutex);
  690. if (alloc->vma_vm_mm)
  691. mmdrop(alloc->vma_vm_mm);
  692. binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
  693. "%s: %d buffers %d, pages %d\n",
  694. __func__, alloc->pid, buffers, page_count);
  695. }
  696. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  697. struct binder_buffer *buffer)
  698. {
  699. seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
  700. prefix, buffer->debug_id, buffer->data,
  701. buffer->data_size, buffer->offsets_size,
  702. buffer->extra_buffers_size,
  703. buffer->transaction ? "active" : "delivered");
  704. }
  705. /**
  706. * binder_alloc_print_allocated() - print buffer info
  707. * @m: seq_file for output via seq_printf()
  708. * @alloc: binder_alloc for this proc
  709. *
  710. * Prints information about every buffer associated with
  711. * the binder_alloc state to the given seq_file
  712. */
  713. void binder_alloc_print_allocated(struct seq_file *m,
  714. struct binder_alloc *alloc)
  715. {
  716. struct rb_node *n;
  717. mutex_lock(&alloc->mutex);
  718. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  719. print_binder_buffer(m, " buffer",
  720. rb_entry(n, struct binder_buffer, rb_node));
  721. mutex_unlock(&alloc->mutex);
  722. }
  723. /**
  724. * binder_alloc_print_pages() - print page usage
  725. * @m: seq_file for output via seq_printf()
  726. * @alloc: binder_alloc for this proc
  727. */
  728. void binder_alloc_print_pages(struct seq_file *m,
  729. struct binder_alloc *alloc)
  730. {
  731. struct binder_lru_page *page;
  732. int i;
  733. int active = 0;
  734. int lru = 0;
  735. int free = 0;
  736. mutex_lock(&alloc->mutex);
  737. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  738. page = &alloc->pages[i];
  739. if (!page->page_ptr)
  740. free++;
  741. else if (list_empty(&page->lru))
  742. active++;
  743. else
  744. lru++;
  745. }
  746. mutex_unlock(&alloc->mutex);
  747. seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
  748. seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
  749. }
  750. /**
  751. * binder_alloc_get_allocated_count() - return count of buffers
  752. * @alloc: binder_alloc for this proc
  753. *
  754. * Return: count of allocated buffers
  755. */
  756. int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  757. {
  758. struct rb_node *n;
  759. int count = 0;
  760. mutex_lock(&alloc->mutex);
  761. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  762. count++;
  763. mutex_unlock(&alloc->mutex);
  764. return count;
  765. }
  766. /**
  767. * binder_alloc_vma_close() - invalidate address space
  768. * @alloc: binder_alloc for this proc
  769. *
  770. * Called from binder_vma_close() when releasing address space.
  771. * Clears alloc->vma to prevent new incoming transactions from
  772. * allocating more buffers.
  773. */
  774. void binder_alloc_vma_close(struct binder_alloc *alloc)
  775. {
  776. WRITE_ONCE(alloc->vma, NULL);
  777. }
  778. /**
  779. * binder_alloc_free_page() - shrinker callback to free pages
  780. * @item: item to free
  781. * @lock: lock protecting the item
  782. * @cb_arg: callback argument
  783. *
  784. * Called from list_lru_walk() in binder_shrink_scan() to free
  785. * up pages when the system is under memory pressure.
  786. */
  787. enum lru_status binder_alloc_free_page(struct list_head *item,
  788. struct list_lru_one *lru,
  789. spinlock_t *lock,
  790. void *cb_arg)
  791. {
  792. struct mm_struct *mm = NULL;
  793. struct binder_lru_page *page = container_of(item,
  794. struct binder_lru_page,
  795. lru);
  796. struct binder_alloc *alloc;
  797. uintptr_t page_addr;
  798. size_t index;
  799. struct vm_area_struct *vma;
  800. alloc = page->alloc;
  801. if (!mutex_trylock(&alloc->mutex))
  802. goto err_get_alloc_mutex_failed;
  803. if (!page->page_ptr)
  804. goto err_page_already_freed;
  805. index = page - alloc->pages;
  806. page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
  807. vma = alloc->vma;
  808. if (vma) {
  809. if (!mmget_not_zero(alloc->vma_vm_mm))
  810. goto err_mmget;
  811. mm = alloc->vma_vm_mm;
  812. if (!down_write_trylock(&mm->mmap_sem))
  813. goto err_down_write_mmap_sem_failed;
  814. }
  815. list_lru_isolate(lru, item);
  816. spin_unlock(lock);
  817. if (vma) {
  818. trace_binder_unmap_user_start(alloc, index);
  819. zap_page_range(vma,
  820. page_addr + alloc->user_buffer_offset,
  821. PAGE_SIZE);
  822. trace_binder_unmap_user_end(alloc, index);
  823. up_write(&mm->mmap_sem);
  824. mmput(mm);
  825. }
  826. trace_binder_unmap_kernel_start(alloc, index);
  827. unmap_kernel_range(page_addr, PAGE_SIZE);
  828. __free_page(page->page_ptr);
  829. page->page_ptr = NULL;
  830. trace_binder_unmap_kernel_end(alloc, index);
  831. spin_lock(lock);
  832. mutex_unlock(&alloc->mutex);
  833. return LRU_REMOVED_RETRY;
  834. err_down_write_mmap_sem_failed:
  835. mmput_async(mm);
  836. err_mmget:
  837. err_page_already_freed:
  838. mutex_unlock(&alloc->mutex);
  839. err_get_alloc_mutex_failed:
  840. return LRU_SKIP;
  841. }
  842. static unsigned long
  843. binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  844. {
  845. unsigned long ret = list_lru_count(&binder_alloc_lru);
  846. return ret;
  847. }
  848. static unsigned long
  849. binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  850. {
  851. unsigned long ret;
  852. ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
  853. NULL, sc->nr_to_scan);
  854. return ret;
  855. }
  856. static struct shrinker binder_shrinker = {
  857. .count_objects = binder_shrink_count,
  858. .scan_objects = binder_shrink_scan,
  859. .seeks = DEFAULT_SEEKS,
  860. };
  861. /**
  862. * binder_alloc_init() - called by binder_open() for per-proc initialization
  863. * @alloc: binder_alloc for this proc
  864. *
  865. * Called from binder_open() to initialize binder_alloc fields for
  866. * new binder proc
  867. */
  868. void binder_alloc_init(struct binder_alloc *alloc)
  869. {
  870. alloc->pid = current->group_leader->pid;
  871. mutex_init(&alloc->mutex);
  872. INIT_LIST_HEAD(&alloc->buffers);
  873. }
  874. int binder_alloc_shrinker_init(void)
  875. {
  876. int ret = list_lru_init(&binder_alloc_lru);
  877. if (ret == 0) {
  878. ret = register_shrinker(&binder_shrinker);
  879. if (ret)
  880. list_lru_destroy(&binder_alloc_lru);
  881. }
  882. return ret;
  883. }