binder_alloc.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. /* binder_alloc.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2017 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <asm/cacheflush.h>
  19. #include <linux/list.h>
  20. #include <linux/sched/mm.h>
  21. #include <linux/module.h>
  22. #include <linux/rtmutex.h>
  23. #include <linux/rbtree.h>
  24. #include <linux/seq_file.h>
  25. #include <linux/vmalloc.h>
  26. #include <linux/slab.h>
  27. #include <linux/sched.h>
  28. #include "binder_alloc.h"
  29. #include "binder_trace.h"
  30. static DEFINE_MUTEX(binder_alloc_mmap_lock);
  31. enum {
  32. BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
  33. BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
  34. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
  35. };
  36. static uint32_t binder_alloc_debug_mask;
  37. module_param_named(debug_mask, binder_alloc_debug_mask,
  38. uint, 0644);
  39. #define binder_alloc_debug(mask, x...) \
  40. do { \
  41. if (binder_alloc_debug_mask & mask) \
  42. pr_info(x); \
  43. } while (0)
  44. static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  45. struct binder_buffer *buffer)
  46. {
  47. if (list_is_last(&buffer->entry, &alloc->buffers))
  48. return alloc->buffer +
  49. alloc->buffer_size - (void *)buffer->data;
  50. return (size_t)list_entry(buffer->entry.next,
  51. struct binder_buffer, entry) - (size_t)buffer->data;
  52. }
  53. static void binder_insert_free_buffer(struct binder_alloc *alloc,
  54. struct binder_buffer *new_buffer)
  55. {
  56. struct rb_node **p = &alloc->free_buffers.rb_node;
  57. struct rb_node *parent = NULL;
  58. struct binder_buffer *buffer;
  59. size_t buffer_size;
  60. size_t new_buffer_size;
  61. BUG_ON(!new_buffer->free);
  62. new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  63. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  64. "%d: add free buffer, size %zd, at %pK\n",
  65. alloc->pid, new_buffer_size, new_buffer);
  66. while (*p) {
  67. parent = *p;
  68. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  69. BUG_ON(!buffer->free);
  70. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  71. if (new_buffer_size < buffer_size)
  72. p = &parent->rb_left;
  73. else
  74. p = &parent->rb_right;
  75. }
  76. rb_link_node(&new_buffer->rb_node, parent, p);
  77. rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  78. }
  79. static void binder_insert_allocated_buffer_locked(
  80. struct binder_alloc *alloc, struct binder_buffer *new_buffer)
  81. {
  82. struct rb_node **p = &alloc->allocated_buffers.rb_node;
  83. struct rb_node *parent = NULL;
  84. struct binder_buffer *buffer;
  85. BUG_ON(new_buffer->free);
  86. while (*p) {
  87. parent = *p;
  88. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  89. BUG_ON(buffer->free);
  90. if (new_buffer < buffer)
  91. p = &parent->rb_left;
  92. else if (new_buffer > buffer)
  93. p = &parent->rb_right;
  94. else
  95. BUG();
  96. }
  97. rb_link_node(&new_buffer->rb_node, parent, p);
  98. rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
  99. }
  100. static struct binder_buffer *binder_alloc_buffer_lookup_locked(
  101. struct binder_alloc *alloc,
  102. uintptr_t user_ptr)
  103. {
  104. struct rb_node *n = alloc->allocated_buffers.rb_node;
  105. struct binder_buffer *buffer;
  106. struct binder_buffer *kern_ptr;
  107. kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
  108. - offsetof(struct binder_buffer, data));
  109. while (n) {
  110. buffer = rb_entry(n, struct binder_buffer, rb_node);
  111. BUG_ON(buffer->free);
  112. if (kern_ptr < buffer)
  113. n = n->rb_left;
  114. else if (kern_ptr > buffer)
  115. n = n->rb_right;
  116. else
  117. return buffer;
  118. }
  119. return NULL;
  120. }
  121. /**
  122. * binder_alloc_buffer_lookup() - get buffer given user ptr
  123. * @alloc: binder_alloc for this proc
  124. * @user_ptr: User pointer to buffer data
  125. *
  126. * Validate userspace pointer to buffer data and return buffer corresponding to
  127. * that user pointer. Search the rb tree for buffer that matches user data
  128. * pointer.
  129. *
  130. * Return: Pointer to buffer or NULL
  131. */
  132. struct binder_buffer *binder_alloc_buffer_lookup(struct binder_alloc *alloc,
  133. uintptr_t user_ptr)
  134. {
  135. struct binder_buffer *buffer;
  136. mutex_lock(&alloc->mutex);
  137. buffer = binder_alloc_buffer_lookup_locked(alloc, user_ptr);
  138. mutex_unlock(&alloc->mutex);
  139. return buffer;
  140. }
  141. static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
  142. void *start, void *end,
  143. struct vm_area_struct *vma)
  144. {
  145. void *page_addr;
  146. unsigned long user_page_addr;
  147. struct page **page;
  148. struct mm_struct *mm;
  149. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  150. "%d: %s pages %pK-%pK\n", alloc->pid,
  151. allocate ? "allocate" : "free", start, end);
  152. if (end <= start)
  153. return 0;
  154. trace_binder_update_page_range(alloc, allocate, start, end);
  155. if (vma)
  156. mm = NULL;
  157. else
  158. mm = get_task_mm(alloc->tsk);
  159. if (mm) {
  160. down_write(&mm->mmap_sem);
  161. vma = alloc->vma;
  162. if (vma && mm != alloc->vma_vm_mm) {
  163. pr_err("%d: vma mm and task mm mismatch\n",
  164. alloc->pid);
  165. vma = NULL;
  166. }
  167. }
  168. if (allocate == 0)
  169. goto free_range;
  170. if (vma == NULL) {
  171. pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
  172. alloc->pid);
  173. goto err_no_vma;
  174. }
  175. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  176. int ret;
  177. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  178. BUG_ON(*page);
  179. *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
  180. if (*page == NULL) {
  181. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  182. alloc->pid, page_addr);
  183. goto err_alloc_page_failed;
  184. }
  185. ret = map_kernel_range_noflush((unsigned long)page_addr,
  186. PAGE_SIZE, PAGE_KERNEL, page);
  187. flush_cache_vmap((unsigned long)page_addr,
  188. (unsigned long)page_addr + PAGE_SIZE);
  189. if (ret != 1) {
  190. pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
  191. alloc->pid, page_addr);
  192. goto err_map_kernel_failed;
  193. }
  194. user_page_addr =
  195. (uintptr_t)page_addr + alloc->user_buffer_offset;
  196. ret = vm_insert_page(vma, user_page_addr, page[0]);
  197. if (ret) {
  198. pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  199. alloc->pid, user_page_addr);
  200. goto err_vm_insert_page_failed;
  201. }
  202. /* vm_insert_page does not seem to increment the refcount */
  203. }
  204. if (mm) {
  205. up_write(&mm->mmap_sem);
  206. mmput(mm);
  207. }
  208. return 0;
  209. free_range:
  210. for (page_addr = end - PAGE_SIZE; page_addr >= start;
  211. page_addr -= PAGE_SIZE) {
  212. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  213. if (vma)
  214. zap_page_range(vma, (uintptr_t)page_addr +
  215. alloc->user_buffer_offset, PAGE_SIZE);
  216. err_vm_insert_page_failed:
  217. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  218. err_map_kernel_failed:
  219. __free_page(*page);
  220. *page = NULL;
  221. err_alloc_page_failed:
  222. ;
  223. }
  224. err_no_vma:
  225. if (mm) {
  226. up_write(&mm->mmap_sem);
  227. mmput(mm);
  228. }
  229. return -ENOMEM;
  230. }
  231. struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
  232. size_t data_size,
  233. size_t offsets_size,
  234. size_t extra_buffers_size,
  235. int is_async)
  236. {
  237. struct rb_node *n = alloc->free_buffers.rb_node;
  238. struct binder_buffer *buffer;
  239. size_t buffer_size;
  240. struct rb_node *best_fit = NULL;
  241. void *has_page_addr;
  242. void *end_page_addr;
  243. size_t size, data_offsets_size;
  244. if (alloc->vma == NULL) {
  245. pr_err("%d: binder_alloc_buf, no vma\n",
  246. alloc->pid);
  247. return NULL;
  248. }
  249. data_offsets_size = ALIGN(data_size, sizeof(void *)) +
  250. ALIGN(offsets_size, sizeof(void *));
  251. if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
  252. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  253. "%d: got transaction with invalid size %zd-%zd\n",
  254. alloc->pid, data_size, offsets_size);
  255. return NULL;
  256. }
  257. size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
  258. if (size < data_offsets_size || size < extra_buffers_size) {
  259. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  260. "%d: got transaction with invalid extra_buffers_size %zd\n",
  261. alloc->pid, extra_buffers_size);
  262. return NULL;
  263. }
  264. if (is_async &&
  265. alloc->free_async_space < size + sizeof(struct binder_buffer)) {
  266. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  267. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  268. alloc->pid, size);
  269. return NULL;
  270. }
  271. while (n) {
  272. buffer = rb_entry(n, struct binder_buffer, rb_node);
  273. BUG_ON(!buffer->free);
  274. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  275. if (size < buffer_size) {
  276. best_fit = n;
  277. n = n->rb_left;
  278. } else if (size > buffer_size)
  279. n = n->rb_right;
  280. else {
  281. best_fit = n;
  282. break;
  283. }
  284. }
  285. if (best_fit == NULL) {
  286. pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
  287. alloc->pid, size);
  288. return NULL;
  289. }
  290. if (n == NULL) {
  291. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  292. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  293. }
  294. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  295. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  296. alloc->pid, size, buffer, buffer_size);
  297. has_page_addr =
  298. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
  299. if (n == NULL) {
  300. if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
  301. buffer_size = size; /* no room for other buffers */
  302. else
  303. buffer_size = size + sizeof(struct binder_buffer);
  304. }
  305. end_page_addr =
  306. (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
  307. if (end_page_addr > has_page_addr)
  308. end_page_addr = has_page_addr;
  309. if (binder_update_page_range(alloc, 1,
  310. (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
  311. return NULL;
  312. rb_erase(best_fit, &alloc->free_buffers);
  313. buffer->free = 0;
  314. binder_insert_allocated_buffer_locked(alloc, buffer);
  315. if (buffer_size != size) {
  316. struct binder_buffer *new_buffer = (void *)buffer->data + size;
  317. list_add(&new_buffer->entry, &buffer->entry);
  318. new_buffer->free = 1;
  319. binder_insert_free_buffer(alloc, new_buffer);
  320. }
  321. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  322. "%d: binder_alloc_buf size %zd got %pK\n",
  323. alloc->pid, size, buffer);
  324. buffer->data_size = data_size;
  325. buffer->offsets_size = offsets_size;
  326. buffer->async_transaction = is_async;
  327. buffer->extra_buffers_size = extra_buffers_size;
  328. if (is_async) {
  329. alloc->free_async_space -= size + sizeof(struct binder_buffer);
  330. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  331. "%d: binder_alloc_buf size %zd async free %zd\n",
  332. alloc->pid, size, alloc->free_async_space);
  333. }
  334. return buffer;
  335. }
  336. /**
  337. * binder_alloc_new_buf() - Allocate a new binder buffer
  338. * @alloc: binder_alloc for this proc
  339. * @data_size: size of user data buffer
  340. * @offsets_size: user specified buffer offset
  341. * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  342. * @is_async: buffer for async transaction
  343. *
  344. * Allocate a new buffer given the requested sizes. Returns
  345. * the kernel version of the buffer pointer. The size allocated
  346. * is the sum of the three given sizes (each rounded up to
  347. * pointer-sized boundary)
  348. *
  349. * Return: The allocated buffer or %NULL if error
  350. */
  351. struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
  352. size_t data_size,
  353. size_t offsets_size,
  354. size_t extra_buffers_size,
  355. int is_async)
  356. {
  357. struct binder_buffer *buffer;
  358. mutex_lock(&alloc->mutex);
  359. buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
  360. extra_buffers_size, is_async);
  361. mutex_unlock(&alloc->mutex);
  362. return buffer;
  363. }
  364. static void *buffer_start_page(struct binder_buffer *buffer)
  365. {
  366. return (void *)((uintptr_t)buffer & PAGE_MASK);
  367. }
  368. static void *buffer_end_page(struct binder_buffer *buffer)
  369. {
  370. return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
  371. }
  372. static void binder_delete_free_buffer(struct binder_alloc *alloc,
  373. struct binder_buffer *buffer)
  374. {
  375. struct binder_buffer *prev, *next = NULL;
  376. int free_page_end = 1;
  377. int free_page_start = 1;
  378. BUG_ON(alloc->buffers.next == &buffer->entry);
  379. prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
  380. BUG_ON(!prev->free);
  381. if (buffer_end_page(prev) == buffer_start_page(buffer)) {
  382. free_page_start = 0;
  383. if (buffer_end_page(prev) == buffer_end_page(buffer))
  384. free_page_end = 0;
  385. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  386. "%d: merge free, buffer %pK share page with %pK\n",
  387. alloc->pid, buffer, prev);
  388. }
  389. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  390. next = list_entry(buffer->entry.next,
  391. struct binder_buffer, entry);
  392. if (buffer_start_page(next) == buffer_end_page(buffer)) {
  393. free_page_end = 0;
  394. if (buffer_start_page(next) ==
  395. buffer_start_page(buffer))
  396. free_page_start = 0;
  397. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  398. "%d: merge free, buffer %pK share page with %pK\n",
  399. alloc->pid, buffer, prev);
  400. }
  401. }
  402. list_del(&buffer->entry);
  403. if (free_page_start || free_page_end) {
  404. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  405. "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
  406. alloc->pid, buffer, free_page_start ? "" : " end",
  407. free_page_end ? "" : " start", prev, next);
  408. binder_update_page_range(alloc, 0, free_page_start ?
  409. buffer_start_page(buffer) : buffer_end_page(buffer),
  410. (free_page_end ? buffer_end_page(buffer) :
  411. buffer_start_page(buffer)) + PAGE_SIZE, NULL);
  412. }
  413. }
  414. static void binder_free_buf_locked(struct binder_alloc *alloc,
  415. struct binder_buffer *buffer)
  416. {
  417. size_t size, buffer_size;
  418. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  419. size = ALIGN(buffer->data_size, sizeof(void *)) +
  420. ALIGN(buffer->offsets_size, sizeof(void *)) +
  421. ALIGN(buffer->extra_buffers_size, sizeof(void *));
  422. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  423. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  424. alloc->pid, buffer, size, buffer_size);
  425. BUG_ON(buffer->free);
  426. BUG_ON(size > buffer_size);
  427. BUG_ON(buffer->transaction != NULL);
  428. BUG_ON((void *)buffer < alloc->buffer);
  429. BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
  430. if (buffer->async_transaction) {
  431. alloc->free_async_space += size + sizeof(struct binder_buffer);
  432. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  433. "%d: binder_free_buf size %zd async free %zd\n",
  434. alloc->pid, size, alloc->free_async_space);
  435. }
  436. binder_update_page_range(alloc, 0,
  437. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  438. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
  439. NULL);
  440. rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
  441. buffer->free = 1;
  442. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  443. struct binder_buffer *next = list_entry(buffer->entry.next,
  444. struct binder_buffer, entry);
  445. if (next->free) {
  446. rb_erase(&next->rb_node, &alloc->free_buffers);
  447. binder_delete_free_buffer(alloc, next);
  448. }
  449. }
  450. if (alloc->buffers.next != &buffer->entry) {
  451. struct binder_buffer *prev = list_entry(buffer->entry.prev,
  452. struct binder_buffer, entry);
  453. if (prev->free) {
  454. binder_delete_free_buffer(alloc, buffer);
  455. rb_erase(&prev->rb_node, &alloc->free_buffers);
  456. buffer = prev;
  457. }
  458. }
  459. binder_insert_free_buffer(alloc, buffer);
  460. }
  461. /**
  462. * binder_alloc_free_buf() - free a binder buffer
  463. * @alloc: binder_alloc for this proc
  464. * @buffer: kernel pointer to buffer
  465. *
  466. * Free the buffer allocated via binder_alloc_new_buffer()
  467. */
  468. void binder_alloc_free_buf(struct binder_alloc *alloc,
  469. struct binder_buffer *buffer)
  470. {
  471. mutex_lock(&alloc->mutex);
  472. binder_free_buf_locked(alloc, buffer);
  473. mutex_unlock(&alloc->mutex);
  474. }
  475. /**
  476. * binder_alloc_mmap_handler() - map virtual address space for proc
  477. * @alloc: alloc structure for this proc
  478. * @vma: vma passed to mmap()
  479. *
  480. * Called by binder_mmap() to initialize the space specified in
  481. * vma for allocating binder buffers
  482. *
  483. * Return:
  484. * 0 = success
  485. * -EBUSY = address space already mapped
  486. * -ENOMEM = failed to map memory to given address space
  487. */
  488. int binder_alloc_mmap_handler(struct binder_alloc *alloc,
  489. struct vm_area_struct *vma)
  490. {
  491. int ret;
  492. struct vm_struct *area;
  493. const char *failure_string;
  494. struct binder_buffer *buffer;
  495. mutex_lock(&binder_alloc_mmap_lock);
  496. if (alloc->buffer) {
  497. ret = -EBUSY;
  498. failure_string = "already mapped";
  499. goto err_already_mapped;
  500. }
  501. area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
  502. if (area == NULL) {
  503. ret = -ENOMEM;
  504. failure_string = "get_vm_area";
  505. goto err_get_vm_area_failed;
  506. }
  507. alloc->buffer = area->addr;
  508. alloc->user_buffer_offset =
  509. vma->vm_start - (uintptr_t)alloc->buffer;
  510. mutex_unlock(&binder_alloc_mmap_lock);
  511. #ifdef CONFIG_CPU_CACHE_VIPT
  512. if (cache_is_vipt_aliasing()) {
  513. while (CACHE_COLOUR(
  514. (vma->vm_start ^ (uint32_t)alloc->buffer))) {
  515. pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
  516. __func__, alloc->pid, vma->vm_start,
  517. vma->vm_end, alloc->buffer);
  518. vma->vm_start += PAGE_SIZE;
  519. }
  520. }
  521. #endif
  522. alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
  523. ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
  524. GFP_KERNEL);
  525. if (alloc->pages == NULL) {
  526. ret = -ENOMEM;
  527. failure_string = "alloc page array";
  528. goto err_alloc_pages_failed;
  529. }
  530. alloc->buffer_size = vma->vm_end - vma->vm_start;
  531. if (binder_update_page_range(alloc, 1, alloc->buffer,
  532. alloc->buffer + PAGE_SIZE, vma)) {
  533. ret = -ENOMEM;
  534. failure_string = "alloc small buf";
  535. goto err_alloc_small_buf_failed;
  536. }
  537. buffer = alloc->buffer;
  538. INIT_LIST_HEAD(&alloc->buffers);
  539. list_add(&buffer->entry, &alloc->buffers);
  540. buffer->free = 1;
  541. binder_insert_free_buffer(alloc, buffer);
  542. alloc->free_async_space = alloc->buffer_size / 2;
  543. barrier();
  544. alloc->vma = vma;
  545. alloc->vma_vm_mm = vma->vm_mm;
  546. return 0;
  547. err_alloc_small_buf_failed:
  548. kfree(alloc->pages);
  549. alloc->pages = NULL;
  550. err_alloc_pages_failed:
  551. mutex_lock(&binder_alloc_mmap_lock);
  552. vfree(alloc->buffer);
  553. alloc->buffer = NULL;
  554. err_get_vm_area_failed:
  555. err_already_mapped:
  556. mutex_unlock(&binder_alloc_mmap_lock);
  557. pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
  558. alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
  559. return ret;
  560. }
  561. void binder_alloc_deferred_release(struct binder_alloc *alloc)
  562. {
  563. struct rb_node *n;
  564. int buffers, page_count;
  565. BUG_ON(alloc->vma);
  566. buffers = 0;
  567. mutex_lock(&alloc->mutex);
  568. while ((n = rb_first(&alloc->allocated_buffers))) {
  569. struct binder_buffer *buffer;
  570. buffer = rb_entry(n, struct binder_buffer, rb_node);
  571. /* Transaction should already have been freed */
  572. BUG_ON(buffer->transaction);
  573. binder_free_buf_locked(alloc, buffer);
  574. buffers++;
  575. }
  576. page_count = 0;
  577. if (alloc->pages) {
  578. int i;
  579. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  580. void *page_addr;
  581. if (!alloc->pages[i])
  582. continue;
  583. page_addr = alloc->buffer + i * PAGE_SIZE;
  584. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  585. "%s: %d: page %d at %pK not freed\n",
  586. __func__, alloc->pid, i, page_addr);
  587. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  588. __free_page(alloc->pages[i]);
  589. page_count++;
  590. }
  591. kfree(alloc->pages);
  592. vfree(alloc->buffer);
  593. }
  594. mutex_unlock(&alloc->mutex);
  595. binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
  596. "%s: %d buffers %d, pages %d\n",
  597. __func__, alloc->pid, buffers, page_count);
  598. }
  599. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  600. struct binder_buffer *buffer)
  601. {
  602. seq_printf(m, "%s %d: %pK size %zd:%zd %s\n",
  603. prefix, buffer->debug_id, buffer->data,
  604. buffer->data_size, buffer->offsets_size,
  605. buffer->transaction ? "active" : "delivered");
  606. }
  607. /**
  608. * binder_alloc_print_allocated() - print buffer info
  609. * @m: seq_file for output via seq_printf()
  610. * @alloc: binder_alloc for this proc
  611. *
  612. * Prints information about every buffer associated with
  613. * the binder_alloc state to the given seq_file
  614. */
  615. void binder_alloc_print_allocated(struct seq_file *m,
  616. struct binder_alloc *alloc)
  617. {
  618. struct rb_node *n;
  619. mutex_lock(&alloc->mutex);
  620. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  621. print_binder_buffer(m, " buffer",
  622. rb_entry(n, struct binder_buffer, rb_node));
  623. mutex_unlock(&alloc->mutex);
  624. }
  625. /**
  626. * binder_alloc_get_allocated_count() - return count of buffers
  627. * @alloc: binder_alloc for this proc
  628. *
  629. * Return: count of allocated buffers
  630. */
  631. int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  632. {
  633. struct rb_node *n;
  634. int count = 0;
  635. mutex_lock(&alloc->mutex);
  636. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  637. count++;
  638. mutex_unlock(&alloc->mutex);
  639. return count;
  640. }
  641. /**
  642. * binder_alloc_vma_close() - invalidate address space
  643. * @alloc: binder_alloc for this proc
  644. *
  645. * Called from binder_vma_close() when releasing address space.
  646. * Clears alloc->vma to prevent new incoming transactions from
  647. * allocating more buffers.
  648. */
  649. void binder_alloc_vma_close(struct binder_alloc *alloc)
  650. {
  651. WRITE_ONCE(alloc->vma, NULL);
  652. WRITE_ONCE(alloc->vma_vm_mm, NULL);
  653. }
  654. /**
  655. * binder_alloc_init() - called by binder_open() for per-proc initialization
  656. * @alloc: binder_alloc for this proc
  657. *
  658. * Called from binder_open() to initialize binder_alloc fields for
  659. * new binder proc
  660. */
  661. void binder_alloc_init(struct binder_alloc *alloc)
  662. {
  663. alloc->tsk = current->group_leader;
  664. alloc->pid = current->group_leader->pid;
  665. mutex_init(&alloc->mutex);
  666. }