binder_alloc.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031
  1. /* binder_alloc.c
  2. *
  3. * Android IPC Subsystem
  4. *
  5. * Copyright (C) 2007-2017 Google, Inc.
  6. *
  7. * This software is licensed under the terms of the GNU General Public
  8. * License version 2, as published by the Free Software Foundation, and
  9. * may be copied, distributed, and modified under those terms.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <linux/list.h>
  19. #include <linux/sched/mm.h>
  20. #include <linux/module.h>
  21. #include <linux/rtmutex.h>
  22. #include <linux/rbtree.h>
  23. #include <linux/seq_file.h>
  24. #include <linux/vmalloc.h>
  25. #include <linux/slab.h>
  26. #include <linux/sched.h>
  27. #include <linux/list_lru.h>
  28. #include <linux/ratelimit.h>
  29. #include <asm/cacheflush.h>
  30. #include "binder_alloc.h"
  31. #include "binder_trace.h"
  32. struct list_lru binder_alloc_lru;
  33. static DEFINE_MUTEX(binder_alloc_mmap_lock);
  34. enum {
  35. BINDER_DEBUG_USER_ERROR = 1U << 0,
  36. BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
  37. BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
  38. BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
  39. };
  40. static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
  41. module_param_named(debug_mask, binder_alloc_debug_mask,
  42. uint, 0644);
  43. #define binder_alloc_debug(mask, x...) \
  44. do { \
  45. if (binder_alloc_debug_mask & mask) \
  46. pr_info_ratelimited(x); \
  47. } while (0)
  48. static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
  49. {
  50. return list_entry(buffer->entry.next, struct binder_buffer, entry);
  51. }
  52. static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
  53. {
  54. return list_entry(buffer->entry.prev, struct binder_buffer, entry);
  55. }
  56. static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
  57. struct binder_buffer *buffer)
  58. {
  59. if (list_is_last(&buffer->entry, &alloc->buffers))
  60. return (u8 *)alloc->buffer +
  61. alloc->buffer_size - (u8 *)buffer->data;
  62. return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
  63. }
  64. static void binder_insert_free_buffer(struct binder_alloc *alloc,
  65. struct binder_buffer *new_buffer)
  66. {
  67. struct rb_node **p = &alloc->free_buffers.rb_node;
  68. struct rb_node *parent = NULL;
  69. struct binder_buffer *buffer;
  70. size_t buffer_size;
  71. size_t new_buffer_size;
  72. BUG_ON(!new_buffer->free);
  73. new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
  74. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  75. "%d: add free buffer, size %zd, at %pK\n",
  76. alloc->pid, new_buffer_size, new_buffer);
  77. while (*p) {
  78. parent = *p;
  79. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  80. BUG_ON(!buffer->free);
  81. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  82. if (new_buffer_size < buffer_size)
  83. p = &parent->rb_left;
  84. else
  85. p = &parent->rb_right;
  86. }
  87. rb_link_node(&new_buffer->rb_node, parent, p);
  88. rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
  89. }
  90. static void binder_insert_allocated_buffer_locked(
  91. struct binder_alloc *alloc, struct binder_buffer *new_buffer)
  92. {
  93. struct rb_node **p = &alloc->allocated_buffers.rb_node;
  94. struct rb_node *parent = NULL;
  95. struct binder_buffer *buffer;
  96. BUG_ON(new_buffer->free);
  97. while (*p) {
  98. parent = *p;
  99. buffer = rb_entry(parent, struct binder_buffer, rb_node);
  100. BUG_ON(buffer->free);
  101. if (new_buffer->data < buffer->data)
  102. p = &parent->rb_left;
  103. else if (new_buffer->data > buffer->data)
  104. p = &parent->rb_right;
  105. else
  106. BUG();
  107. }
  108. rb_link_node(&new_buffer->rb_node, parent, p);
  109. rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
  110. }
  111. static struct binder_buffer *binder_alloc_prepare_to_free_locked(
  112. struct binder_alloc *alloc,
  113. uintptr_t user_ptr)
  114. {
  115. struct rb_node *n = alloc->allocated_buffers.rb_node;
  116. struct binder_buffer *buffer;
  117. void *kern_ptr;
  118. kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
  119. while (n) {
  120. buffer = rb_entry(n, struct binder_buffer, rb_node);
  121. BUG_ON(buffer->free);
  122. if (kern_ptr < buffer->data)
  123. n = n->rb_left;
  124. else if (kern_ptr > buffer->data)
  125. n = n->rb_right;
  126. else {
  127. /*
  128. * Guard against user threads attempting to
  129. * free the buffer twice
  130. */
  131. if (buffer->free_in_progress) {
  132. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  133. "%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
  134. alloc->pid, current->pid,
  135. (u64)user_ptr);
  136. return NULL;
  137. }
  138. buffer->free_in_progress = 1;
  139. return buffer;
  140. }
  141. }
  142. return NULL;
  143. }
  144. /**
  145. * binder_alloc_buffer_lookup() - get buffer given user ptr
  146. * @alloc: binder_alloc for this proc
  147. * @user_ptr: User pointer to buffer data
  148. *
  149. * Validate userspace pointer to buffer data and return buffer corresponding to
  150. * that user pointer. Search the rb tree for buffer that matches user data
  151. * pointer.
  152. *
  153. * Return: Pointer to buffer or NULL
  154. */
  155. struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
  156. uintptr_t user_ptr)
  157. {
  158. struct binder_buffer *buffer;
  159. mutex_lock(&alloc->mutex);
  160. buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
  161. mutex_unlock(&alloc->mutex);
  162. return buffer;
  163. }
  164. static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
  165. void *start, void *end)
  166. {
  167. void *page_addr;
  168. unsigned long user_page_addr;
  169. struct binder_lru_page *page;
  170. struct vm_area_struct *vma = NULL;
  171. struct mm_struct *mm = NULL;
  172. bool need_mm = false;
  173. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  174. "%d: %s pages %pK-%pK\n", alloc->pid,
  175. allocate ? "allocate" : "free", start, end);
  176. if (end <= start)
  177. return 0;
  178. trace_binder_update_page_range(alloc, allocate, start, end);
  179. if (allocate == 0)
  180. goto free_range;
  181. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  182. page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
  183. if (!page->page_ptr) {
  184. need_mm = true;
  185. break;
  186. }
  187. }
  188. if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
  189. mm = alloc->vma_vm_mm;
  190. if (mm) {
  191. down_read(&mm->mmap_sem);
  192. vma = alloc->vma;
  193. }
  194. if (!vma && need_mm) {
  195. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  196. "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
  197. alloc->pid);
  198. goto err_no_vma;
  199. }
  200. for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
  201. int ret;
  202. bool on_lru;
  203. size_t index;
  204. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  205. page = &alloc->pages[index];
  206. if (page->page_ptr) {
  207. trace_binder_alloc_lru_start(alloc, index);
  208. on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
  209. WARN_ON(!on_lru);
  210. trace_binder_alloc_lru_end(alloc, index);
  211. continue;
  212. }
  213. if (WARN_ON(!vma))
  214. goto err_page_ptr_cleared;
  215. trace_binder_alloc_page_start(alloc, index);
  216. page->page_ptr = alloc_page(GFP_KERNEL |
  217. __GFP_HIGHMEM |
  218. __GFP_ZERO);
  219. if (!page->page_ptr) {
  220. pr_err("%d: binder_alloc_buf failed for page at %pK\n",
  221. alloc->pid, page_addr);
  222. goto err_alloc_page_failed;
  223. }
  224. page->alloc = alloc;
  225. INIT_LIST_HEAD(&page->lru);
  226. ret = map_kernel_range_noflush((unsigned long)page_addr,
  227. PAGE_SIZE, PAGE_KERNEL,
  228. &page->page_ptr);
  229. flush_cache_vmap((unsigned long)page_addr,
  230. (unsigned long)page_addr + PAGE_SIZE);
  231. if (ret != 1) {
  232. pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
  233. alloc->pid, page_addr);
  234. goto err_map_kernel_failed;
  235. }
  236. user_page_addr =
  237. (uintptr_t)page_addr + alloc->user_buffer_offset;
  238. ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
  239. if (ret) {
  240. pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
  241. alloc->pid, user_page_addr);
  242. goto err_vm_insert_page_failed;
  243. }
  244. if (index + 1 > alloc->pages_high)
  245. alloc->pages_high = index + 1;
  246. trace_binder_alloc_page_end(alloc, index);
  247. /* vm_insert_page does not seem to increment the refcount */
  248. }
  249. if (mm) {
  250. up_read(&mm->mmap_sem);
  251. mmput(mm);
  252. }
  253. return 0;
  254. free_range:
  255. for (page_addr = end - PAGE_SIZE; page_addr >= start;
  256. page_addr -= PAGE_SIZE) {
  257. bool ret;
  258. size_t index;
  259. index = (page_addr - alloc->buffer) / PAGE_SIZE;
  260. page = &alloc->pages[index];
  261. trace_binder_free_lru_start(alloc, index);
  262. ret = list_lru_add(&binder_alloc_lru, &page->lru);
  263. WARN_ON(!ret);
  264. trace_binder_free_lru_end(alloc, index);
  265. continue;
  266. err_vm_insert_page_failed:
  267. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  268. err_map_kernel_failed:
  269. __free_page(page->page_ptr);
  270. page->page_ptr = NULL;
  271. err_alloc_page_failed:
  272. err_page_ptr_cleared:
  273. ;
  274. }
  275. err_no_vma:
  276. if (mm) {
  277. up_read(&mm->mmap_sem);
  278. mmput(mm);
  279. }
  280. return vma ? -ENOMEM : -ESRCH;
  281. }
  282. static struct binder_buffer *binder_alloc_new_buf_locked(
  283. struct binder_alloc *alloc,
  284. size_t data_size,
  285. size_t offsets_size,
  286. size_t extra_buffers_size,
  287. int is_async)
  288. {
  289. struct rb_node *n = alloc->free_buffers.rb_node;
  290. struct binder_buffer *buffer;
  291. size_t buffer_size;
  292. struct rb_node *best_fit = NULL;
  293. void *has_page_addr;
  294. void *end_page_addr;
  295. size_t size, data_offsets_size;
  296. int ret;
  297. if (alloc->vma == NULL) {
  298. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  299. "%d: binder_alloc_buf, no vma\n",
  300. alloc->pid);
  301. return ERR_PTR(-ESRCH);
  302. }
  303. data_offsets_size = ALIGN(data_size, sizeof(void *)) +
  304. ALIGN(offsets_size, sizeof(void *));
  305. if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
  306. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  307. "%d: got transaction with invalid size %zd-%zd\n",
  308. alloc->pid, data_size, offsets_size);
  309. return ERR_PTR(-EINVAL);
  310. }
  311. size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
  312. if (size < data_offsets_size || size < extra_buffers_size) {
  313. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  314. "%d: got transaction with invalid extra_buffers_size %zd\n",
  315. alloc->pid, extra_buffers_size);
  316. return ERR_PTR(-EINVAL);
  317. }
  318. if (is_async &&
  319. alloc->free_async_space < size + sizeof(struct binder_buffer)) {
  320. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  321. "%d: binder_alloc_buf size %zd failed, no async space left\n",
  322. alloc->pid, size);
  323. return ERR_PTR(-ENOSPC);
  324. }
  325. /* Pad 0-size buffers so they get assigned unique addresses */
  326. size = max(size, sizeof(void *));
  327. while (n) {
  328. buffer = rb_entry(n, struct binder_buffer, rb_node);
  329. BUG_ON(!buffer->free);
  330. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  331. if (size < buffer_size) {
  332. best_fit = n;
  333. n = n->rb_left;
  334. } else if (size > buffer_size)
  335. n = n->rb_right;
  336. else {
  337. best_fit = n;
  338. break;
  339. }
  340. }
  341. if (best_fit == NULL) {
  342. size_t allocated_buffers = 0;
  343. size_t largest_alloc_size = 0;
  344. size_t total_alloc_size = 0;
  345. size_t free_buffers = 0;
  346. size_t largest_free_size = 0;
  347. size_t total_free_size = 0;
  348. for (n = rb_first(&alloc->allocated_buffers); n != NULL;
  349. n = rb_next(n)) {
  350. buffer = rb_entry(n, struct binder_buffer, rb_node);
  351. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  352. allocated_buffers++;
  353. total_alloc_size += buffer_size;
  354. if (buffer_size > largest_alloc_size)
  355. largest_alloc_size = buffer_size;
  356. }
  357. for (n = rb_first(&alloc->free_buffers); n != NULL;
  358. n = rb_next(n)) {
  359. buffer = rb_entry(n, struct binder_buffer, rb_node);
  360. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  361. free_buffers++;
  362. total_free_size += buffer_size;
  363. if (buffer_size > largest_free_size)
  364. largest_free_size = buffer_size;
  365. }
  366. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  367. "%d: binder_alloc_buf size %zd failed, no address space\n",
  368. alloc->pid, size);
  369. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  370. "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
  371. total_alloc_size, allocated_buffers,
  372. largest_alloc_size, total_free_size,
  373. free_buffers, largest_free_size);
  374. return ERR_PTR(-ENOSPC);
  375. }
  376. if (n == NULL) {
  377. buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
  378. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  379. }
  380. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  381. "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
  382. alloc->pid, size, buffer, buffer_size);
  383. has_page_addr =
  384. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
  385. WARN_ON(n && buffer_size != size);
  386. end_page_addr =
  387. (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
  388. if (end_page_addr > has_page_addr)
  389. end_page_addr = has_page_addr;
  390. ret = binder_update_page_range(alloc, 1,
  391. (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
  392. if (ret)
  393. return ERR_PTR(ret);
  394. if (buffer_size != size) {
  395. struct binder_buffer *new_buffer;
  396. new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  397. if (!new_buffer) {
  398. pr_err("%s: %d failed to alloc new buffer struct\n",
  399. __func__, alloc->pid);
  400. goto err_alloc_buf_struct_failed;
  401. }
  402. new_buffer->data = (u8 *)buffer->data + size;
  403. list_add(&new_buffer->entry, &buffer->entry);
  404. new_buffer->free = 1;
  405. binder_insert_free_buffer(alloc, new_buffer);
  406. }
  407. rb_erase(best_fit, &alloc->free_buffers);
  408. buffer->free = 0;
  409. buffer->free_in_progress = 0;
  410. binder_insert_allocated_buffer_locked(alloc, buffer);
  411. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  412. "%d: binder_alloc_buf size %zd got %pK\n",
  413. alloc->pid, size, buffer);
  414. buffer->data_size = data_size;
  415. buffer->offsets_size = offsets_size;
  416. buffer->async_transaction = is_async;
  417. buffer->extra_buffers_size = extra_buffers_size;
  418. if (is_async) {
  419. alloc->free_async_space -= size + sizeof(struct binder_buffer);
  420. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  421. "%d: binder_alloc_buf size %zd async free %zd\n",
  422. alloc->pid, size, alloc->free_async_space);
  423. }
  424. return buffer;
  425. err_alloc_buf_struct_failed:
  426. binder_update_page_range(alloc, 0,
  427. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  428. end_page_addr);
  429. return ERR_PTR(-ENOMEM);
  430. }
  431. /**
  432. * binder_alloc_new_buf() - Allocate a new binder buffer
  433. * @alloc: binder_alloc for this proc
  434. * @data_size: size of user data buffer
  435. * @offsets_size: user specified buffer offset
  436. * @extra_buffers_size: size of extra space for meta-data (eg, security context)
  437. * @is_async: buffer for async transaction
  438. *
  439. * Allocate a new buffer given the requested sizes. Returns
  440. * the kernel version of the buffer pointer. The size allocated
  441. * is the sum of the three given sizes (each rounded up to
  442. * pointer-sized boundary)
  443. *
  444. * Return: The allocated buffer or %NULL if error
  445. */
  446. struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
  447. size_t data_size,
  448. size_t offsets_size,
  449. size_t extra_buffers_size,
  450. int is_async)
  451. {
  452. struct binder_buffer *buffer;
  453. mutex_lock(&alloc->mutex);
  454. buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
  455. extra_buffers_size, is_async);
  456. mutex_unlock(&alloc->mutex);
  457. return buffer;
  458. }
  459. static void *buffer_start_page(struct binder_buffer *buffer)
  460. {
  461. return (void *)((uintptr_t)buffer->data & PAGE_MASK);
  462. }
  463. static void *prev_buffer_end_page(struct binder_buffer *buffer)
  464. {
  465. return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
  466. }
  467. static void binder_delete_free_buffer(struct binder_alloc *alloc,
  468. struct binder_buffer *buffer)
  469. {
  470. struct binder_buffer *prev, *next = NULL;
  471. bool to_free = true;
  472. BUG_ON(alloc->buffers.next == &buffer->entry);
  473. prev = binder_buffer_prev(buffer);
  474. BUG_ON(!prev->free);
  475. if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
  476. to_free = false;
  477. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  478. "%d: merge free, buffer %pK share page with %pK\n",
  479. alloc->pid, buffer->data, prev->data);
  480. }
  481. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  482. next = binder_buffer_next(buffer);
  483. if (buffer_start_page(next) == buffer_start_page(buffer)) {
  484. to_free = false;
  485. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  486. "%d: merge free, buffer %pK share page with %pK\n",
  487. alloc->pid,
  488. buffer->data,
  489. next->data);
  490. }
  491. }
  492. if (PAGE_ALIGNED(buffer->data)) {
  493. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  494. "%d: merge free, buffer start %pK is page aligned\n",
  495. alloc->pid, buffer->data);
  496. to_free = false;
  497. }
  498. if (to_free) {
  499. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  500. "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
  501. alloc->pid, buffer->data,
  502. prev->data, next ? next->data : NULL);
  503. binder_update_page_range(alloc, 0, buffer_start_page(buffer),
  504. buffer_start_page(buffer) + PAGE_SIZE);
  505. }
  506. list_del(&buffer->entry);
  507. kfree(buffer);
  508. }
  509. static void binder_free_buf_locked(struct binder_alloc *alloc,
  510. struct binder_buffer *buffer)
  511. {
  512. size_t size, buffer_size;
  513. buffer_size = binder_alloc_buffer_size(alloc, buffer);
  514. size = ALIGN(buffer->data_size, sizeof(void *)) +
  515. ALIGN(buffer->offsets_size, sizeof(void *)) +
  516. ALIGN(buffer->extra_buffers_size, sizeof(void *));
  517. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  518. "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
  519. alloc->pid, buffer, size, buffer_size);
  520. BUG_ON(buffer->free);
  521. BUG_ON(size > buffer_size);
  522. BUG_ON(buffer->transaction != NULL);
  523. BUG_ON(buffer->data < alloc->buffer);
  524. BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
  525. if (buffer->async_transaction) {
  526. alloc->free_async_space += size + sizeof(struct binder_buffer);
  527. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
  528. "%d: binder_free_buf size %zd async free %zd\n",
  529. alloc->pid, size, alloc->free_async_space);
  530. }
  531. binder_update_page_range(alloc, 0,
  532. (void *)PAGE_ALIGN((uintptr_t)buffer->data),
  533. (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
  534. rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
  535. buffer->free = 1;
  536. if (!list_is_last(&buffer->entry, &alloc->buffers)) {
  537. struct binder_buffer *next = binder_buffer_next(buffer);
  538. if (next->free) {
  539. rb_erase(&next->rb_node, &alloc->free_buffers);
  540. binder_delete_free_buffer(alloc, next);
  541. }
  542. }
  543. if (alloc->buffers.next != &buffer->entry) {
  544. struct binder_buffer *prev = binder_buffer_prev(buffer);
  545. if (prev->free) {
  546. binder_delete_free_buffer(alloc, buffer);
  547. rb_erase(&prev->rb_node, &alloc->free_buffers);
  548. buffer = prev;
  549. }
  550. }
  551. binder_insert_free_buffer(alloc, buffer);
  552. }
  553. /**
  554. * binder_alloc_free_buf() - free a binder buffer
  555. * @alloc: binder_alloc for this proc
  556. * @buffer: kernel pointer to buffer
  557. *
  558. * Free the buffer allocated via binder_alloc_new_buffer()
  559. */
  560. void binder_alloc_free_buf(struct binder_alloc *alloc,
  561. struct binder_buffer *buffer)
  562. {
  563. mutex_lock(&alloc->mutex);
  564. binder_free_buf_locked(alloc, buffer);
  565. mutex_unlock(&alloc->mutex);
  566. }
  567. /**
  568. * binder_alloc_mmap_handler() - map virtual address space for proc
  569. * @alloc: alloc structure for this proc
  570. * @vma: vma passed to mmap()
  571. *
  572. * Called by binder_mmap() to initialize the space specified in
  573. * vma for allocating binder buffers
  574. *
  575. * Return:
  576. * 0 = success
  577. * -EBUSY = address space already mapped
  578. * -ENOMEM = failed to map memory to given address space
  579. */
  580. int binder_alloc_mmap_handler(struct binder_alloc *alloc,
  581. struct vm_area_struct *vma)
  582. {
  583. int ret;
  584. struct vm_struct *area;
  585. const char *failure_string;
  586. struct binder_buffer *buffer;
  587. mutex_lock(&binder_alloc_mmap_lock);
  588. if (alloc->buffer) {
  589. ret = -EBUSY;
  590. failure_string = "already mapped";
  591. goto err_already_mapped;
  592. }
  593. area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
  594. if (area == NULL) {
  595. ret = -ENOMEM;
  596. failure_string = "get_vm_area";
  597. goto err_get_vm_area_failed;
  598. }
  599. alloc->buffer = area->addr;
  600. alloc->user_buffer_offset =
  601. vma->vm_start - (uintptr_t)alloc->buffer;
  602. mutex_unlock(&binder_alloc_mmap_lock);
  603. #ifdef CONFIG_CPU_CACHE_VIPT
  604. if (cache_is_vipt_aliasing()) {
  605. while (CACHE_COLOUR(
  606. (vma->vm_start ^ (uint32_t)alloc->buffer))) {
  607. pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
  608. __func__, alloc->pid, vma->vm_start,
  609. vma->vm_end, alloc->buffer);
  610. vma->vm_start += PAGE_SIZE;
  611. }
  612. }
  613. #endif
  614. alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
  615. sizeof(alloc->pages[0]),
  616. GFP_KERNEL);
  617. if (alloc->pages == NULL) {
  618. ret = -ENOMEM;
  619. failure_string = "alloc page array";
  620. goto err_alloc_pages_failed;
  621. }
  622. alloc->buffer_size = vma->vm_end - vma->vm_start;
  623. buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
  624. if (!buffer) {
  625. ret = -ENOMEM;
  626. failure_string = "alloc buffer struct";
  627. goto err_alloc_buf_struct_failed;
  628. }
  629. buffer->data = alloc->buffer;
  630. list_add(&buffer->entry, &alloc->buffers);
  631. buffer->free = 1;
  632. binder_insert_free_buffer(alloc, buffer);
  633. alloc->free_async_space = alloc->buffer_size / 2;
  634. barrier();
  635. alloc->vma = vma;
  636. alloc->vma_vm_mm = vma->vm_mm;
  637. mmgrab(alloc->vma_vm_mm);
  638. return 0;
  639. err_alloc_buf_struct_failed:
  640. kfree(alloc->pages);
  641. alloc->pages = NULL;
  642. err_alloc_pages_failed:
  643. mutex_lock(&binder_alloc_mmap_lock);
  644. vfree(alloc->buffer);
  645. alloc->buffer = NULL;
  646. err_get_vm_area_failed:
  647. err_already_mapped:
  648. mutex_unlock(&binder_alloc_mmap_lock);
  649. binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
  650. "%s: %d %lx-%lx %s failed %d\n", __func__,
  651. alloc->pid, vma->vm_start, vma->vm_end,
  652. failure_string, ret);
  653. return ret;
  654. }
  655. void binder_alloc_deferred_release(struct binder_alloc *alloc)
  656. {
  657. struct rb_node *n;
  658. int buffers, page_count;
  659. struct binder_buffer *buffer;
  660. BUG_ON(alloc->vma);
  661. buffers = 0;
  662. mutex_lock(&alloc->mutex);
  663. while ((n = rb_first(&alloc->allocated_buffers))) {
  664. buffer = rb_entry(n, struct binder_buffer, rb_node);
  665. /* Transaction should already have been freed */
  666. BUG_ON(buffer->transaction);
  667. binder_free_buf_locked(alloc, buffer);
  668. buffers++;
  669. }
  670. while (!list_empty(&alloc->buffers)) {
  671. buffer = list_first_entry(&alloc->buffers,
  672. struct binder_buffer, entry);
  673. WARN_ON(!buffer->free);
  674. list_del(&buffer->entry);
  675. WARN_ON_ONCE(!list_empty(&alloc->buffers));
  676. kfree(buffer);
  677. }
  678. page_count = 0;
  679. if (alloc->pages) {
  680. int i;
  681. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  682. void *page_addr;
  683. bool on_lru;
  684. if (!alloc->pages[i].page_ptr)
  685. continue;
  686. on_lru = list_lru_del(&binder_alloc_lru,
  687. &alloc->pages[i].lru);
  688. page_addr = alloc->buffer + i * PAGE_SIZE;
  689. binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
  690. "%s: %d: page %d at %pK %s\n",
  691. __func__, alloc->pid, i, page_addr,
  692. on_lru ? "on lru" : "active");
  693. unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
  694. __free_page(alloc->pages[i].page_ptr);
  695. page_count++;
  696. }
  697. kfree(alloc->pages);
  698. vfree(alloc->buffer);
  699. }
  700. mutex_unlock(&alloc->mutex);
  701. if (alloc->vma_vm_mm)
  702. mmdrop(alloc->vma_vm_mm);
  703. binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
  704. "%s: %d buffers %d, pages %d\n",
  705. __func__, alloc->pid, buffers, page_count);
  706. }
  707. static void print_binder_buffer(struct seq_file *m, const char *prefix,
  708. struct binder_buffer *buffer)
  709. {
  710. seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
  711. prefix, buffer->debug_id, buffer->data,
  712. buffer->data_size, buffer->offsets_size,
  713. buffer->extra_buffers_size,
  714. buffer->transaction ? "active" : "delivered");
  715. }
  716. /**
  717. * binder_alloc_print_allocated() - print buffer info
  718. * @m: seq_file for output via seq_printf()
  719. * @alloc: binder_alloc for this proc
  720. *
  721. * Prints information about every buffer associated with
  722. * the binder_alloc state to the given seq_file
  723. */
  724. void binder_alloc_print_allocated(struct seq_file *m,
  725. struct binder_alloc *alloc)
  726. {
  727. struct rb_node *n;
  728. mutex_lock(&alloc->mutex);
  729. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  730. print_binder_buffer(m, " buffer",
  731. rb_entry(n, struct binder_buffer, rb_node));
  732. mutex_unlock(&alloc->mutex);
  733. }
  734. /**
  735. * binder_alloc_print_pages() - print page usage
  736. * @m: seq_file for output via seq_printf()
  737. * @alloc: binder_alloc for this proc
  738. */
  739. void binder_alloc_print_pages(struct seq_file *m,
  740. struct binder_alloc *alloc)
  741. {
  742. struct binder_lru_page *page;
  743. int i;
  744. int active = 0;
  745. int lru = 0;
  746. int free = 0;
  747. mutex_lock(&alloc->mutex);
  748. for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
  749. page = &alloc->pages[i];
  750. if (!page->page_ptr)
  751. free++;
  752. else if (list_empty(&page->lru))
  753. active++;
  754. else
  755. lru++;
  756. }
  757. mutex_unlock(&alloc->mutex);
  758. seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
  759. seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
  760. }
  761. /**
  762. * binder_alloc_get_allocated_count() - return count of buffers
  763. * @alloc: binder_alloc for this proc
  764. *
  765. * Return: count of allocated buffers
  766. */
  767. int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
  768. {
  769. struct rb_node *n;
  770. int count = 0;
  771. mutex_lock(&alloc->mutex);
  772. for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
  773. count++;
  774. mutex_unlock(&alloc->mutex);
  775. return count;
  776. }
  777. /**
  778. * binder_alloc_vma_close() - invalidate address space
  779. * @alloc: binder_alloc for this proc
  780. *
  781. * Called from binder_vma_close() when releasing address space.
  782. * Clears alloc->vma to prevent new incoming transactions from
  783. * allocating more buffers.
  784. */
  785. void binder_alloc_vma_close(struct binder_alloc *alloc)
  786. {
  787. WRITE_ONCE(alloc->vma, NULL);
  788. }
  789. /**
  790. * binder_alloc_free_page() - shrinker callback to free pages
  791. * @item: item to free
  792. * @lock: lock protecting the item
  793. * @cb_arg: callback argument
  794. *
  795. * Called from list_lru_walk() in binder_shrink_scan() to free
  796. * up pages when the system is under memory pressure.
  797. */
  798. enum lru_status binder_alloc_free_page(struct list_head *item,
  799. struct list_lru_one *lru,
  800. spinlock_t *lock,
  801. void *cb_arg)
  802. {
  803. struct mm_struct *mm = NULL;
  804. struct binder_lru_page *page = container_of(item,
  805. struct binder_lru_page,
  806. lru);
  807. struct binder_alloc *alloc;
  808. uintptr_t page_addr;
  809. size_t index;
  810. struct vm_area_struct *vma;
  811. alloc = page->alloc;
  812. if (!mutex_trylock(&alloc->mutex))
  813. goto err_get_alloc_mutex_failed;
  814. if (!page->page_ptr)
  815. goto err_page_already_freed;
  816. index = page - alloc->pages;
  817. page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
  818. vma = alloc->vma;
  819. if (vma) {
  820. if (!mmget_not_zero(alloc->vma_vm_mm))
  821. goto err_mmget;
  822. mm = alloc->vma_vm_mm;
  823. if (!down_write_trylock(&mm->mmap_sem))
  824. goto err_down_write_mmap_sem_failed;
  825. }
  826. list_lru_isolate(lru, item);
  827. spin_unlock(lock);
  828. if (vma) {
  829. trace_binder_unmap_user_start(alloc, index);
  830. zap_page_range(vma,
  831. page_addr + alloc->user_buffer_offset,
  832. PAGE_SIZE);
  833. trace_binder_unmap_user_end(alloc, index);
  834. up_write(&mm->mmap_sem);
  835. mmput(mm);
  836. }
  837. trace_binder_unmap_kernel_start(alloc, index);
  838. unmap_kernel_range(page_addr, PAGE_SIZE);
  839. __free_page(page->page_ptr);
  840. page->page_ptr = NULL;
  841. trace_binder_unmap_kernel_end(alloc, index);
  842. spin_lock(lock);
  843. mutex_unlock(&alloc->mutex);
  844. return LRU_REMOVED_RETRY;
  845. err_down_write_mmap_sem_failed:
  846. mmput_async(mm);
  847. err_mmget:
  848. err_page_already_freed:
  849. mutex_unlock(&alloc->mutex);
  850. err_get_alloc_mutex_failed:
  851. return LRU_SKIP;
  852. }
  853. static unsigned long
  854. binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
  855. {
  856. unsigned long ret = list_lru_count(&binder_alloc_lru);
  857. return ret;
  858. }
  859. static unsigned long
  860. binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
  861. {
  862. unsigned long ret;
  863. ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
  864. NULL, sc->nr_to_scan);
  865. return ret;
  866. }
  867. static struct shrinker binder_shrinker = {
  868. .count_objects = binder_shrink_count,
  869. .scan_objects = binder_shrink_scan,
  870. .seeks = DEFAULT_SEEKS,
  871. };
  872. /**
  873. * binder_alloc_init() - called by binder_open() for per-proc initialization
  874. * @alloc: binder_alloc for this proc
  875. *
  876. * Called from binder_open() to initialize binder_alloc fields for
  877. * new binder proc
  878. */
  879. void binder_alloc_init(struct binder_alloc *alloc)
  880. {
  881. alloc->pid = current->group_leader->pid;
  882. mutex_init(&alloc->mutex);
  883. INIT_LIST_HEAD(&alloc->buffers);
  884. }
  885. int binder_alloc_shrinker_init(void)
  886. {
  887. int ret = list_lru_init(&binder_alloc_lru);
  888. if (ret == 0) {
  889. ret = register_shrinker(&binder_shrinker);
  890. if (ret)
  891. list_lru_destroy(&binder_alloc_lru);
  892. }
  893. return ret;
  894. }