binder_alloc.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802
  1. /* binder_alloc.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2017 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <asm/cacheflush.h>
  19. #include <linux/list.h>
  20. #include <linux/sched/mm.h>
  21. #include <linux/module.h>
  22. #include <linux/rtmutex.h>
  23. #include <linux/rbtree.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/slab.h>
  27. #include <linux/sched.h>
  28. #include "binder_alloc.h"
  29. #include "binder_trace.h"
  30. static DEFINE_MUTEX(binder_alloc_mmap_lock);
  31. enum {
  32. BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
  33. BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
  34. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
  35. };
  36. static uint32_t binder_alloc_debug_mask;
  37. module_param_named(debug_mask, binder_alloc_debug_mask,
  38. uint, 0644);
  39. #define binder_alloc_debug(mask, x...) \
  40. do { \
  41. if (binder_alloc_debug_mask & mask) \
  42. pr_info(x); \
  43. } while (0)
  44. static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  45. struct binder_buffer *buffer)
  46. {
  47. if (list_is_last(&buffer->entry, &alloc->buffers))
  48. return alloc->buffer +
  49. alloc->buffer_size - (void *)buffer->data;
  50. return (size_t)list_entry(buffer->entry.next,
  51. struct binder_buffer, entry) - (size_t)buffer->data;
  52. }
  53. static void binder_insert_free_buffer(struct binder_alloc *alloc,
  54. struct binder_buffer *new_buffer)
  55. {
  56. struct rb_node **p = &alloc->free_buffers.rb_node;
  57. struct rb_node *parent = NULL;
  58. struct binder_buffer *buffer;
  59. size_t buffer_size;
  60. size_t new_buffer_size;
  61. BUG_ON(!new_buffer->free);
  62. new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  63. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  64. "%d: add free buffer, size %zd, at %pK\n",
  65. alloc->pid, new_buffer_size, new_buffer);
  66. while (*p) {
  67. parent = *p;
  68. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  69. BUG_ON(!buffer->free);
  70. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  71. if (new_buffer_size < buffer_size)
  72. p = &parent->rb_left;
  73. else
  74. p = &parent->rb_right;
  75. }
  76. rb_link_node(&new_buffer->rb_node, parent, p);
  77. rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  78. }
  79. static void binder_insert_allocated_buffer_locked(
  80. struct binder_alloc *alloc, struct binder_buffer *new_buffer)
  81. {
  82. struct rb_node **p = &alloc->allocated_buffers.rb_node;
  83. struct rb_node *parent = NULL;
  84. struct binder_buffer *buffer;
  85. BUG_ON(new_buffer->free);
  86. while (*p) {
  87. parent = *p;
  88. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  89. BUG_ON(buffer->free);
  90. if (new_buffer < buffer)
  91. p = &parent->rb_left;
  92. else if (new_buffer > buffer)
  93. p = &parent->rb_right;
  94. else
  95. BUG();
  96. }
  97. rb_link_node(&new_buffer->rb_node, parent, p);
  98. rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
  99. }
  100. static struct binder_buffer *binder_alloc_prepare_to_free_locked(
  101. struct binder_alloc *alloc,
  102. uintptr_t user_ptr)
  103. {
  104. struct rb_node *n = alloc->allocated_buffers.rb_node;
  105. struct binder_buffer *buffer;
  106. struct binder_buffer *kern_ptr;
  107. kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
  108. - offsetof(struct binder_buffer, data));
  109. while (n) {
  110. buffer = rb_entry(n, struct binder_buffer, rb_node);
  111. BUG_ON(buffer->free);
  112. if (kern_ptr < buffer)
  113. n = n->rb_left;
  114. else if (kern_ptr > buffer)
  115. n = n->rb_right;
  116. else {
  117. /*
  118. * Guard against user threads attempting to
  119. * free the buffer twice
  120. */
  121. if (buffer->free_in_progress) {
  122. pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
  123. alloc->pid, current->pid, (u64)user_ptr);
  124. return NULL;
  125. }
  126. buffer->free_in_progress = 1;
  127. return buffer;
  128. }
  129. }
  130. return NULL;
  131. }
  132. /**
  133. * binder_alloc_buffer_lookup() - get buffer given user ptr
  134. * @alloc: binder_alloc for this proc
  135. * @user_ptr: User pointer to buffer data
  136. *
  137. * Validate userspace pointer to buffer data and return buffer corresponding to
  138. * that user pointer. Search the rb tree for buffer that matches user data
  139. * pointer.
  140. *
  141. * Return: Pointer to buffer or NULL
  142. */
  143. struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
  144. uintptr_t user_ptr)
  145. {
  146. struct binder_buffer *buffer;
  147. mutex_lock(&alloc->mutex);
  148. buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
  149. mutex_unlock(&alloc->mutex);
  150. return buffer;
  151. }
  152. static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
  153. void *start, void *end,
  154. struct vm_area_struct *vma)
  155. {
  156. void *page_addr;
  157. unsigned long user_page_addr;
  158. struct page **page;
  159. struct mm_struct *mm;
  160. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  161. "%d: %s pages %pK-%pK\n", alloc->pid,
  162. allocate ? "allocate" : "free", start, end);
  163. if (end <= start)
  164. return 0;
  165. trace_binder_update_page_range(alloc, allocate, start, end);
  166. if (vma)
  167. mm = NULL;
  168. else
  169. mm = get_task_mm(alloc->tsk);
  170. if (mm) {
  171. down_write(&mm->mmap_sem);
  172. vma = alloc->vma;
  173. if (vma && mm != alloc->vma_vm_mm) {
  174. pr_err("%d: vma mm and task mm mismatch\n",
  175. alloc->pid);
  176. vma = NULL;
  177. }
  178. }
  179. if (allocate == 0)
  180. goto free_range;
  181. if (vma == NULL) {
  182. pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
  183. alloc->pid);
  184. goto err_no_vma;
  185. }
  186. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  187. int ret;
  188. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  189. BUG_ON(*page);
  190. *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
  191. if (*page == NULL) {
  192. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  193. alloc->pid, page_addr);
  194. goto err_alloc_page_failed;
  195. }
  196. ret = map_kernel_range_noflush((unsigned long)page_addr,
  197. PAGE_SIZE, PAGE_KERNEL, page);
  198. flush_cache_vmap((unsigned long)page_addr,
  199. (unsigned long)page_addr + PAGE_SIZE);
  200. if (ret != 1) {
  201. pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
  202. alloc->pid, page_addr);
  203. goto err_map_kernel_failed;
  204. }
  205. user_page_addr =
  206. (uintptr_t)page_addr + alloc->user_buffer_offset;
  207. ret = vm_insert_page(vma, user_page_addr, page[0]);
  208. if (ret) {
  209. pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  210. alloc->pid, user_page_addr);
  211. goto err_vm_insert_page_failed;
  212. }
  213. /* vm_insert_page does not seem to increment the refcount */
  214. }
  215. if (mm) {
  216. up_write(&mm->mmap_sem);
  217. mmput(mm);
  218. }
  219. return 0;
  220. free_range:
  221. for (page_addr = end - PAGE_SIZE; page_addr >= start;
  222. page_addr -= PAGE_SIZE) {
  223. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  224. if (vma)
  225. zap_page_range(vma, (uintptr_t)page_addr +
  226. alloc->user_buffer_offset, PAGE_SIZE);
  227. err_vm_insert_page_failed:
  228. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  229. err_map_kernel_failed:
  230. __free_page(*page);
  231. *page = NULL;
  232. err_alloc_page_failed:
  233. ;
  234. }
  235. err_no_vma:
  236. if (mm) {
  237. up_write(&mm->mmap_sem);
  238. mmput(mm);
  239. }
  240. return vma ? -ENOMEM : -ESRCH;
  241. }
  242. struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
  243. size_t data_size,
  244. size_t offsets_size,
  245. size_t extra_buffers_size,
  246. int is_async)
  247. {
  248. struct rb_node *n = alloc->free_buffers.rb_node;
  249. struct binder_buffer *buffer;
  250. size_t buffer_size;
  251. struct rb_node *best_fit = NULL;
  252. void *has_page_addr;
  253. void *end_page_addr;
  254. size_t size, data_offsets_size;
  255. int ret;
  256. if (alloc->vma == NULL) {
  257. pr_err("%d: binder_alloc_buf, no vma\n",
  258. alloc->pid);
  259. return ERR_PTR(-ESRCH);
  260. }
  261. data_offsets_size = ALIGN(data_size, sizeof(void *)) +
  262. ALIGN(offsets_size, sizeof(void *));
  263. if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
  264. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  265. "%d: got transaction with invalid size %zd-%zd\n",
  266. alloc->pid, data_size, offsets_size);
  267. return ERR_PTR(-EINVAL);
  268. }
  269. size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
  270. if (size < data_offsets_size || size < extra_buffers_size) {
  271. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  272. "%d: got transaction with invalid extra_buffers_size %zd\n",
  273. alloc->pid, extra_buffers_size);
  274. return ERR_PTR(-EINVAL);
  275. }
  276. if (is_async &&
  277. alloc->free_async_space < size + sizeof(struct binder_buffer)) {
  278. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  279. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  280. alloc->pid, size);
  281. return ERR_PTR(-ENOSPC);
  282. }
  283. while (n) {
  284. buffer = rb_entry(n, struct binder_buffer, rb_node);
  285. BUG_ON(!buffer->free);
  286. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  287. if (size < buffer_size) {
  288. best_fit = n;
  289. n = n->rb_left;
  290. } else if (size > buffer_size)
  291. n = n->rb_right;
  292. else {
  293. best_fit = n;
  294. break;
  295. }
  296. }
  297. if (best_fit == NULL) {
  298. size_t allocated_buffers = 0;
  299. size_t largest_alloc_size = 0;
  300. size_t total_alloc_size = 0;
  301. size_t free_buffers = 0;
  302. size_t largest_free_size = 0;
  303. size_t total_free_size = 0;
  304. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  305. n = rb_next(n)) {
  306. buffer = rb_entry(n, struct binder_buffer, rb_node);
  307. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  308. allocated_buffers++;
  309. total_alloc_size += buffer_size;
  310. if (buffer_size > largest_alloc_size)
  311. largest_alloc_size = buffer_size;
  312. }
  313. for (n = rb_first(&alloc->free_buffers); n != NULL;
  314. n = rb_next(n)) {
  315. buffer = rb_entry(n, struct binder_buffer, rb_node);
  316. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  317. free_buffers++;
  318. total_free_size += buffer_size;
  319. if (buffer_size > largest_free_size)
  320. largest_free_size = buffer_size;
  321. }
  322. pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
  323. alloc->pid, size);
  324. pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
  325. total_alloc_size, allocated_buffers, largest_alloc_size,
  326. total_free_size, free_buffers, largest_free_size);
  327. return ERR_PTR(-ENOSPC);
  328. }
  329. if (n == NULL) {
  330. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  331. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  332. }
  333. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  334. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  335. alloc->pid, size, buffer, buffer_size);
  336. has_page_addr =
  337. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
  338. if (n == NULL) {
  339. if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
  340. buffer_size = size; /* no room for other buffers */
  341. else
  342. buffer_size = size + sizeof(struct binder_buffer);
  343. }
  344. end_page_addr =
  345. (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
  346. if (end_page_addr > has_page_addr)
  347. end_page_addr = has_page_addr;
  348. ret = binder_update_page_range(alloc, 1,
  349. (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
  350. if (ret)
  351. return ERR_PTR(ret);
  352. rb_erase(best_fit, &alloc->free_buffers);
  353. buffer->free = 0;
  354. buffer->free_in_progress = 0;
  355. binder_insert_allocated_buffer_locked(alloc, buffer);
  356. if (buffer_size != size) {
  357. struct binder_buffer *new_buffer = (void *)buffer->data + size;
  358. list_add(&new_buffer->entry, &buffer->entry);
  359. new_buffer->free = 1;
  360. binder_insert_free_buffer(alloc, new_buffer);
  361. }
  362. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  363. "%d: binder_alloc_buf size %zd got %pK\n",
  364. alloc->pid, size, buffer);
  365. buffer->data_size = data_size;
  366. buffer->offsets_size = offsets_size;
  367. buffer->async_transaction = is_async;
  368. buffer->extra_buffers_size = extra_buffers_size;
  369. if (is_async) {
  370. alloc->free_async_space -= size + sizeof(struct binder_buffer);
  371. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  372. "%d: binder_alloc_buf size %zd async free %zd\n",
  373. alloc->pid, size, alloc->free_async_space);
  374. }
  375. return buffer;
  376. }
  377. /**
  378. * binder_alloc_new_buf() - Allocate a new binder buffer
  379. * @alloc: binder_alloc for this proc
  380. * @data_size: size of user data buffer
  381. * @offsets_size: user specified buffer offset
  382. * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  383. * @is_async: buffer for async transaction
  384. *
  385. * Allocate a new buffer given the requested sizes. Returns
  386. * the kernel version of the buffer pointer. The size allocated
  387. * is the sum of the three given sizes (each rounded up to
  388. * pointer-sized boundary)
  389. *
  390. * Return: The allocated buffer or %NULL if error
  391. */
  392. struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
  393. size_t data_size,
  394. size_t offsets_size,
  395. size_t extra_buffers_size,
  396. int is_async)
  397. {
  398. struct binder_buffer *buffer;
  399. mutex_lock(&alloc->mutex);
  400. buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
  401. extra_buffers_size, is_async);
  402. mutex_unlock(&alloc->mutex);
  403. return buffer;
  404. }
  405. static void *buffer_start_page(struct binder_buffer *buffer)
  406. {
  407. return (void *)((uintptr_t)buffer & PAGE_MASK);
  408. }
  409. static void *buffer_end_page(struct binder_buffer *buffer)
  410. {
  411. return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
  412. }
  413. static void binder_delete_free_buffer(struct binder_alloc *alloc,
  414. struct binder_buffer *buffer)
  415. {
  416. struct binder_buffer *prev, *next = NULL;
  417. int free_page_end = 1;
  418. int free_page_start = 1;
  419. BUG_ON(alloc->buffers.next == &buffer->entry);
  420. prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
  421. BUG_ON(!prev->free);
  422. if (buffer_end_page(prev) == buffer_start_page(buffer)) {
  423. free_page_start = 0;
  424. if (buffer_end_page(prev) == buffer_end_page(buffer))
  425. free_page_end = 0;
  426. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  427. "%d: merge free, buffer %pK share page with %pK\n",
  428. alloc->pid, buffer, prev);
  429. }
  430. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  431. next = list_entry(buffer->entry.next,
  432. struct binder_buffer, entry);
  433. if (buffer_start_page(next) == buffer_end_page(buffer)) {
  434. free_page_end = 0;
  435. if (buffer_start_page(next) ==
  436. buffer_start_page(buffer))
  437. free_page_start = 0;
  438. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  439. "%d: merge free, buffer %pK share page with %pK\n",
  440. alloc->pid, buffer, prev);
  441. }
  442. }
  443. list_del(&buffer->entry);
  444. if (free_page_start || free_page_end) {
  445. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  446. "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
  447. alloc->pid, buffer, free_page_start ? "" : " end",
  448. free_page_end ? "" : " start", prev, next);
  449. binder_update_page_range(alloc, 0, free_page_start ?
  450. buffer_start_page(buffer) : buffer_end_page(buffer),
  451. (free_page_end ? buffer_end_page(buffer) :
  452. buffer_start_page(buffer)) + PAGE_SIZE, NULL);
  453. }
  454. }
  455. static void binder_free_buf_locked(struct binder_alloc *alloc,
  456. struct binder_buffer *buffer)
  457. {
  458. size_t size, buffer_size;
  459. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  460. size = ALIGN(buffer->data_size, sizeof(void *)) +
  461. ALIGN(buffer->offsets_size, sizeof(void *)) +
  462. ALIGN(buffer->extra_buffers_size, sizeof(void *));
  463. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  464. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  465. alloc->pid, buffer, size, buffer_size);
  466. BUG_ON(buffer->free);
  467. BUG_ON(size > buffer_size);
  468. BUG_ON(buffer->transaction != NULL);
  469. BUG_ON((void *)buffer < alloc->buffer);
  470. BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
  471. if (buffer->async_transaction) {
  472. alloc->free_async_space += size + sizeof(struct binder_buffer);
  473. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  474. "%d: binder_free_buf size %zd async free %zd\n",
  475. alloc->pid, size, alloc->free_async_space);
  476. }
  477. binder_update_page_range(alloc, 0,
  478. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  479. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
  480. NULL);
  481. rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
  482. buffer->free = 1;
  483. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  484. struct binder_buffer *next = list_entry(buffer->entry.next,
  485. struct binder_buffer, entry);
  486. if (next->free) {
  487. rb_erase(&next->rb_node, &alloc->free_buffers);
  488. binder_delete_free_buffer(alloc, next);
  489. }
  490. }
  491. if (alloc->buffers.next != &buffer->entry) {
  492. struct binder_buffer *prev = list_entry(buffer->entry.prev,
  493. struct binder_buffer, entry);
  494. if (prev->free) {
  495. binder_delete_free_buffer(alloc, buffer);
  496. rb_erase(&prev->rb_node, &alloc->free_buffers);
  497. buffer = prev;
  498. }
  499. }
  500. binder_insert_free_buffer(alloc, buffer);
  501. }
  502. /**
  503. * binder_alloc_free_buf() - free a binder buffer
  504. * @alloc: binder_alloc for this proc
  505. * @buffer: kernel pointer to buffer
  506. *
  507. * Free the buffer allocated via binder_alloc_new_buffer()
  508. */
  509. void binder_alloc_free_buf(struct binder_alloc *alloc,
  510. struct binder_buffer *buffer)
  511. {
  512. mutex_lock(&alloc->mutex);
  513. binder_free_buf_locked(alloc, buffer);
  514. mutex_unlock(&alloc->mutex);
  515. }
  516. /**
  517. * binder_alloc_mmap_handler() - map virtual address space for proc
  518. * @alloc: alloc structure for this proc
  519. * @vma: vma passed to mmap()
  520. *
  521. * Called by binder_mmap() to initialize the space specified in
  522. * vma for allocating binder buffers
  523. *
  524. * Return:
  525. * 0 = success
  526. * -EBUSY = address space already mapped
  527. * -ENOMEM = failed to map memory to given address space
  528. */
  529. int binder_alloc_mmap_handler(struct binder_alloc *alloc,
  530. struct vm_area_struct *vma)
  531. {
  532. int ret;
  533. struct vm_struct *area;
  534. const char *failure_string;
  535. struct binder_buffer *buffer;
  536. mutex_lock(&binder_alloc_mmap_lock);
  537. if (alloc->buffer) {
  538. ret = -EBUSY;
  539. failure_string = "already mapped";
  540. goto err_already_mapped;
  541. }
  542. area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
  543. if (area == NULL) {
  544. ret = -ENOMEM;
  545. failure_string = "get_vm_area";
  546. goto err_get_vm_area_failed;
  547. }
  548. alloc->buffer = area->addr;
  549. alloc->user_buffer_offset =
  550. vma->vm_start - (uintptr_t)alloc->buffer;
  551. mutex_unlock(&binder_alloc_mmap_lock);
  552. #ifdef CONFIG_CPU_CACHE_VIPT
  553. if (cache_is_vipt_aliasing()) {
  554. while (CACHE_COLOUR(
  555. (vma->vm_start ^ (uint32_t)alloc->buffer))) {
  556. pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
  557. __func__, alloc->pid, vma->vm_start,
  558. vma->vm_end, alloc->buffer);
  559. vma->vm_start += PAGE_SIZE;
  560. }
  561. }
  562. #endif
  563. alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
  564. ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
  565. GFP_KERNEL);
  566. if (alloc->pages == NULL) {
  567. ret = -ENOMEM;
  568. failure_string = "alloc page array";
  569. goto err_alloc_pages_failed;
  570. }
  571. alloc->buffer_size = vma->vm_end - vma->vm_start;
  572. if (binder_update_page_range(alloc, 1, alloc->buffer,
  573. alloc->buffer + PAGE_SIZE, vma)) {
  574. ret = -ENOMEM;
  575. failure_string = "alloc small buf";
  576. goto err_alloc_small_buf_failed;
  577. }
  578. buffer = alloc->buffer;
  579. INIT_LIST_HEAD(&alloc->buffers);
  580. list_add(&buffer->entry, &alloc->buffers);
  581. buffer->free = 1;
  582. binder_insert_free_buffer(alloc, buffer);
  583. alloc->free_async_space = alloc->buffer_size / 2;
  584. barrier();
  585. alloc->vma = vma;
  586. alloc->vma_vm_mm = vma->vm_mm;
  587. return 0;
  588. err_alloc_small_buf_failed:
  589. kfree(alloc->pages);
  590. alloc->pages = NULL;
  591. err_alloc_pages_failed:
  592. mutex_lock(&binder_alloc_mmap_lock);
  593. vfree(alloc->buffer);
  594. alloc->buffer = NULL;
  595. err_get_vm_area_failed:
  596. err_already_mapped:
  597. mutex_unlock(&binder_alloc_mmap_lock);
  598. pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
  599. alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
  600. return ret;
  601. }
  602. void binder_alloc_deferred_release(struct binder_alloc *alloc)
  603. {
  604. struct rb_node *n;
  605. int buffers, page_count;
  606. BUG_ON(alloc->vma);
  607. buffers = 0;
  608. mutex_lock(&alloc->mutex);
  609. while ((n = rb_first(&alloc->allocated_buffers))) {
  610. struct binder_buffer *buffer;
  611. buffer = rb_entry(n, struct binder_buffer, rb_node);
  612. /* Transaction should already have been freed */
  613. BUG_ON(buffer->transaction);
  614. binder_free_buf_locked(alloc, buffer);
  615. buffers++;
  616. }
  617. page_count = 0;
  618. if (alloc->pages) {
  619. int i;
  620. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  621. void *page_addr;
  622. if (!alloc->pages[i])
  623. continue;
  624. page_addr = alloc->buffer + i * PAGE_SIZE;
  625. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  626. "%s: %d: page %d at %pK not freed\n",
  627. __func__, alloc->pid, i, page_addr);
  628. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  629. __free_page(alloc->pages[i]);
  630. page_count++;
  631. }
  632. kfree(alloc->pages);
  633. vfree(alloc->buffer);
  634. }
  635. mutex_unlock(&alloc->mutex);
  636. binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
  637. "%s: %d buffers %d, pages %d\n",
  638. __func__, alloc->pid, buffers, page_count);
  639. }
  640. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  641. struct binder_buffer *buffer)
  642. {
  643. seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
  644. prefix, buffer->debug_id, buffer->data,
  645. buffer->data_size, buffer->offsets_size,
  646. buffer->extra_buffers_size,
  647. buffer->transaction ? "active" : "delivered");
  648. }
  649. /**
  650. * binder_alloc_print_allocated() - print buffer info
  651. * @m: seq_file for output via seq_printf()
  652. * @alloc: binder_alloc for this proc
  653. *
  654. * Prints information about every buffer associated with
  655. * the binder_alloc state to the given seq_file
  656. */
  657. void binder_alloc_print_allocated(struct seq_file *m,
  658. struct binder_alloc *alloc)
  659. {
  660. struct rb_node *n;
  661. mutex_lock(&alloc->mutex);
  662. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  663. print_binder_buffer(m, " buffer",
  664. rb_entry(n, struct binder_buffer, rb_node));
  665. mutex_unlock(&alloc->mutex);
  666. }
  667. /**
  668. * binder_alloc_get_allocated_count() - return count of buffers
  669. * @alloc: binder_alloc for this proc
  670. *
  671. * Return: count of allocated buffers
  672. */
  673. int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  674. {
  675. struct rb_node *n;
  676. int count = 0;
  677. mutex_lock(&alloc->mutex);
  678. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  679. count++;
  680. mutex_unlock(&alloc->mutex);
  681. return count;
  682. }
  683. /**
  684. * binder_alloc_vma_close() - invalidate address space
  685. * @alloc: binder_alloc for this proc
  686. *
  687. * Called from binder_vma_close() when releasing address space.
  688. * Clears alloc->vma to prevent new incoming transactions from
  689. * allocating more buffers.
  690. */
  691. void binder_alloc_vma_close(struct binder_alloc *alloc)
  692. {
  693. WRITE_ONCE(alloc->vma, NULL);
  694. WRITE_ONCE(alloc->vma_vm_mm, NULL);
  695. }
  696. /**
  697. * binder_alloc_init() - called by binder_open() for per-proc initialization
  698. * @alloc: binder_alloc for this proc
  699. *
  700. * Called from binder_open() to initialize binder_alloc fields for
  701. * new binder proc
  702. */
  703. void binder_alloc_init(struct binder_alloc *alloc)
  704. {
  705. alloc->tsk = current->group_leader;
  706. alloc->pid = current->group_leader->pid;
  707. mutex_init(&alloc->mutex);
  708. }