ring_buffer.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. /*
  2. * Performance events ring-buffer code:
  3. *
  4. * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
  7. * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  8. *
  9. * For licensing details see kernel-base/COPYING
  10. */
  11. #include <linux/perf_event.h>
  12. #include <linux/vmalloc.h>
  13. #include <linux/slab.h>
  14. #include <linux/circ_buf.h>
  15. #include <linux/poll.h>
  16. #include "internal.h"
  17. static void perf_output_wakeup(struct perf_output_handle *handle)
  18. {
  19. atomic_set(&handle->rb->poll, POLLIN);
  20. handle->event->pending_wakeup = 1;
  21. irq_work_queue(&handle->event->pending);
  22. }
  23. /*
  24. * We need to ensure a later event_id doesn't publish a head when a former
  25. * event isn't done writing. However since we need to deal with NMIs we
  26. * cannot fully serialize things.
  27. *
  28. * We only publish the head (and generate a wakeup) when the outer-most
  29. * event completes.
  30. */
  31. static void perf_output_get_handle(struct perf_output_handle *handle)
  32. {
  33. struct ring_buffer *rb = handle->rb;
  34. preempt_disable();
  35. local_inc(&rb->nest);
  36. handle->wakeup = local_read(&rb->wakeup);
  37. }
  38. static void perf_output_put_handle(struct perf_output_handle *handle)
  39. {
  40. struct ring_buffer *rb = handle->rb;
  41. unsigned long head;
  42. again:
  43. head = local_read(&rb->head);
  44. /*
  45. * IRQ/NMI can happen here, which means we can miss a head update.
  46. */
  47. if (!local_dec_and_test(&rb->nest))
  48. goto out;
  49. /*
  50. * Since the mmap() consumer (userspace) can run on a different CPU:
  51. *
  52. * kernel user
  53. *
  54. * if (LOAD ->data_tail) { LOAD ->data_head
  55. * (A) smp_rmb() (C)
  56. * STORE $data LOAD $data
  57. * smp_wmb() (B) smp_mb() (D)
  58. * STORE ->data_head STORE ->data_tail
  59. * }
  60. *
  61. * Where A pairs with D, and B pairs with C.
  62. *
  63. * In our case (A) is a control dependency that separates the load of
  64. * the ->data_tail and the stores of $data. In case ->data_tail
  65. * indicates there is no room in the buffer to store $data we do not.
  66. *
  67. * D needs to be a full barrier since it separates the data READ
  68. * from the tail WRITE.
  69. *
  70. * For B a WMB is sufficient since it separates two WRITEs, and for C
  71. * an RMB is sufficient since it separates two READs.
  72. *
  73. * See perf_output_begin().
  74. */
  75. smp_wmb(); /* B, matches C */
  76. rb->user_page->data_head = head;
  77. /*
  78. * Now check if we missed an update -- rely on previous implied
  79. * compiler barriers to force a re-read.
  80. */
  81. if (unlikely(head != local_read(&rb->head))) {
  82. local_inc(&rb->nest);
  83. goto again;
  84. }
  85. if (handle->wakeup != local_read(&rb->wakeup))
  86. perf_output_wakeup(handle);
  87. out:
  88. preempt_enable();
  89. }
  90. static bool __always_inline
  91. ring_buffer_has_space(unsigned long head, unsigned long tail,
  92. unsigned long data_size, unsigned int size,
  93. bool backward)
  94. {
  95. if (!backward)
  96. return CIRC_SPACE(head, tail, data_size) >= size;
  97. else
  98. return CIRC_SPACE(tail, head, data_size) >= size;
  99. }
  100. static int __always_inline
  101. __perf_output_begin(struct perf_output_handle *handle,
  102. struct perf_event *event, unsigned int size,
  103. bool backward)
  104. {
  105. struct ring_buffer *rb;
  106. unsigned long tail, offset, head;
  107. int have_lost, page_shift;
  108. struct {
  109. struct perf_event_header header;
  110. u64 id;
  111. u64 lost;
  112. } lost_event;
  113. rcu_read_lock();
  114. /*
  115. * For inherited events we send all the output towards the parent.
  116. */
  117. if (event->parent)
  118. event = event->parent;
  119. rb = rcu_dereference(event->rb);
  120. if (unlikely(!rb))
  121. goto out;
  122. if (unlikely(rb->paused)) {
  123. if (rb->nr_pages)
  124. local_inc(&rb->lost);
  125. goto out;
  126. }
  127. handle->rb = rb;
  128. handle->event = event;
  129. have_lost = local_read(&rb->lost);
  130. if (unlikely(have_lost)) {
  131. size += sizeof(lost_event);
  132. if (event->attr.sample_id_all)
  133. size += event->id_header_size;
  134. }
  135. perf_output_get_handle(handle);
  136. do {
  137. tail = READ_ONCE(rb->user_page->data_tail);
  138. offset = head = local_read(&rb->head);
  139. if (!rb->overwrite) {
  140. if (unlikely(!ring_buffer_has_space(head, tail,
  141. perf_data_size(rb),
  142. size, backward)))
  143. goto fail;
  144. }
  145. /*
  146. * The above forms a control dependency barrier separating the
  147. * @tail load above from the data stores below. Since the @tail
  148. * load is required to compute the branch to fail below.
  149. *
  150. * A, matches D; the full memory barrier userspace SHOULD issue
  151. * after reading the data and before storing the new tail
  152. * position.
  153. *
  154. * See perf_output_put_handle().
  155. */
  156. if (!backward)
  157. head += size;
  158. else
  159. head -= size;
  160. } while (local_cmpxchg(&rb->head, offset, head) != offset);
  161. if (backward) {
  162. offset = head;
  163. head = (u64)(-head);
  164. }
  165. /*
  166. * We rely on the implied barrier() by local_cmpxchg() to ensure
  167. * none of the data stores below can be lifted up by the compiler.
  168. */
  169. if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
  170. local_add(rb->watermark, &rb->wakeup);
  171. page_shift = PAGE_SHIFT + page_order(rb);
  172. handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
  173. offset &= (1UL << page_shift) - 1;
  174. handle->addr = rb->data_pages[handle->page] + offset;
  175. handle->size = (1UL << page_shift) - offset;
  176. if (unlikely(have_lost)) {
  177. struct perf_sample_data sample_data;
  178. lost_event.header.size = sizeof(lost_event);
  179. lost_event.header.type = PERF_RECORD_LOST;
  180. lost_event.header.misc = 0;
  181. lost_event.id = event->id;
  182. lost_event.lost = local_xchg(&rb->lost, 0);
  183. perf_event_header__init_id(&lost_event.header,
  184. &sample_data, event);
  185. perf_output_put(handle, lost_event);
  186. perf_event__output_id_sample(event, handle, &sample_data);
  187. }
  188. return 0;
  189. fail:
  190. local_inc(&rb->lost);
  191. perf_output_put_handle(handle);
  192. out:
  193. rcu_read_unlock();
  194. return -ENOSPC;
  195. }
  196. int perf_output_begin_forward(struct perf_output_handle *handle,
  197. struct perf_event *event, unsigned int size)
  198. {
  199. return __perf_output_begin(handle, event, size, false);
  200. }
  201. int perf_output_begin_backward(struct perf_output_handle *handle,
  202. struct perf_event *event, unsigned int size)
  203. {
  204. return __perf_output_begin(handle, event, size, true);
  205. }
  206. int perf_output_begin(struct perf_output_handle *handle,
  207. struct perf_event *event, unsigned int size)
  208. {
  209. return __perf_output_begin(handle, event, size,
  210. unlikely(is_write_backward(event)));
  211. }
  212. unsigned int perf_output_copy(struct perf_output_handle *handle,
  213. const void *buf, unsigned int len)
  214. {
  215. return __output_copy(handle, buf, len);
  216. }
  217. unsigned int perf_output_skip(struct perf_output_handle *handle,
  218. unsigned int len)
  219. {
  220. return __output_skip(handle, NULL, len);
  221. }
  222. void perf_output_end(struct perf_output_handle *handle)
  223. {
  224. perf_output_put_handle(handle);
  225. rcu_read_unlock();
  226. }
  227. static void
  228. ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
  229. {
  230. long max_size = perf_data_size(rb);
  231. if (watermark)
  232. rb->watermark = min(max_size, watermark);
  233. if (!rb->watermark)
  234. rb->watermark = max_size / 2;
  235. if (flags & RING_BUFFER_WRITABLE)
  236. rb->overwrite = 0;
  237. else
  238. rb->overwrite = 1;
  239. atomic_set(&rb->refcount, 1);
  240. INIT_LIST_HEAD(&rb->event_list);
  241. spin_lock_init(&rb->event_lock);
  242. /*
  243. * perf_output_begin() only checks rb->paused, therefore
  244. * rb->paused must be true if we have no pages for output.
  245. */
  246. if (!rb->nr_pages)
  247. rb->paused = 1;
  248. }
  249. void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
  250. {
  251. /*
  252. * OVERWRITE is determined by perf_aux_output_end() and can't
  253. * be passed in directly.
  254. */
  255. if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
  256. return;
  257. handle->aux_flags |= flags;
  258. }
  259. EXPORT_SYMBOL_GPL(perf_aux_output_flag);
  260. /*
  261. * This is called before hardware starts writing to the AUX area to
  262. * obtain an output handle and make sure there's room in the buffer.
  263. * When the capture completes, call perf_aux_output_end() to commit
  264. * the recorded data to the buffer.
  265. *
  266. * The ordering is similar to that of perf_output_{begin,end}, with
  267. * the exception of (B), which should be taken care of by the pmu
  268. * driver, since ordering rules will differ depending on hardware.
  269. *
  270. * Call this from pmu::start(); see the comment in perf_aux_output_end()
  271. * about its use in pmu callbacks. Both can also be called from the PMI
  272. * handler if needed.
  273. */
  274. void *perf_aux_output_begin(struct perf_output_handle *handle,
  275. struct perf_event *event)
  276. {
  277. struct perf_event *output_event = event;
  278. unsigned long aux_head, aux_tail;
  279. struct ring_buffer *rb;
  280. if (output_event->parent)
  281. output_event = output_event->parent;
  282. /*
  283. * Since this will typically be open across pmu::add/pmu::del, we
  284. * grab ring_buffer's refcount instead of holding rcu read lock
  285. * to make sure it doesn't disappear under us.
  286. */
  287. rb = ring_buffer_get(output_event);
  288. if (!rb)
  289. return NULL;
  290. if (!rb_has_aux(rb))
  291. goto err;
  292. /*
  293. * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
  294. * about to get freed, so we leave immediately.
  295. *
  296. * Checking rb::aux_mmap_count and rb::refcount has to be done in
  297. * the same order, see perf_mmap_close. Otherwise we end up freeing
  298. * aux pages in this path, which is a bug, because in_atomic().
  299. */
  300. if (!atomic_read(&rb->aux_mmap_count))
  301. goto err;
  302. if (!atomic_inc_not_zero(&rb->aux_refcount))
  303. goto err;
  304. /*
  305. * Nesting is not supported for AUX area, make sure nested
  306. * writers are caught early
  307. */
  308. if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
  309. goto err_put;
  310. aux_head = local_read(&rb->aux_head);
  311. handle->rb = rb;
  312. handle->event = event;
  313. handle->head = aux_head;
  314. handle->size = 0;
  315. handle->aux_flags = 0;
  316. /*
  317. * In overwrite mode, AUX data stores do not depend on aux_tail,
  318. * therefore (A) control dependency barrier does not exist. The
  319. * (B) <-> (C) ordering is still observed by the pmu driver.
  320. */
  321. if (!rb->aux_overwrite) {
  322. aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
  323. handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
  324. if (aux_head - aux_tail < perf_aux_size(rb))
  325. handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
  326. /*
  327. * handle->size computation depends on aux_tail load; this forms a
  328. * control dependency barrier separating aux_tail load from aux data
  329. * store that will be enabled on successful return
  330. */
  331. if (!handle->size) { /* A, matches D */
  332. event->pending_disable = 1;
  333. perf_output_wakeup(handle);
  334. local_set(&rb->aux_nest, 0);
  335. goto err_put;
  336. }
  337. }
  338. return handle->rb->aux_priv;
  339. err_put:
  340. /* can't be last */
  341. rb_free_aux(rb);
  342. err:
  343. ring_buffer_put(rb);
  344. handle->event = NULL;
  345. return NULL;
  346. }
  347. /*
  348. * Commit the data written by hardware into the ring buffer by adjusting
  349. * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
  350. * pmu driver's responsibility to observe ordering rules of the hardware,
  351. * so that all the data is externally visible before this is called.
  352. *
  353. * Note: this has to be called from pmu::stop() callback, as the assumption
  354. * of the AUX buffer management code is that after pmu::stop(), the AUX
  355. * transaction must be stopped and therefore drop the AUX reference count.
  356. */
  357. void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
  358. {
  359. bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
  360. struct ring_buffer *rb = handle->rb;
  361. unsigned long aux_head;
  362. /* in overwrite mode, driver provides aux_head via handle */
  363. if (rb->aux_overwrite) {
  364. handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
  365. aux_head = handle->head;
  366. local_set(&rb->aux_head, aux_head);
  367. } else {
  368. handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
  369. aux_head = local_read(&rb->aux_head);
  370. local_add(size, &rb->aux_head);
  371. }
  372. if (size || handle->aux_flags) {
  373. /*
  374. * Only send RECORD_AUX if we have something useful to communicate
  375. */
  376. perf_event_aux_event(handle->event, aux_head, size,
  377. handle->aux_flags);
  378. }
  379. aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
  380. if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
  381. wakeup = true;
  382. local_add(rb->aux_watermark, &rb->aux_wakeup);
  383. }
  384. if (wakeup) {
  385. if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
  386. handle->event->pending_disable = 1;
  387. perf_output_wakeup(handle);
  388. }
  389. handle->event = NULL;
  390. local_set(&rb->aux_nest, 0);
  391. /* can't be last */
  392. rb_free_aux(rb);
  393. ring_buffer_put(rb);
  394. }
  395. /*
  396. * Skip over a given number of bytes in the AUX buffer, due to, for example,
  397. * hardware's alignment constraints.
  398. */
  399. int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
  400. {
  401. struct ring_buffer *rb = handle->rb;
  402. unsigned long aux_head;
  403. if (size > handle->size)
  404. return -ENOSPC;
  405. local_add(size, &rb->aux_head);
  406. aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
  407. if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
  408. perf_output_wakeup(handle);
  409. local_add(rb->aux_watermark, &rb->aux_wakeup);
  410. handle->wakeup = local_read(&rb->aux_wakeup) +
  411. rb->aux_watermark;
  412. }
  413. handle->head = aux_head;
  414. handle->size -= size;
  415. return 0;
  416. }
  417. void *perf_get_aux(struct perf_output_handle *handle)
  418. {
  419. /* this is only valid between perf_aux_output_begin and *_end */
  420. if (!handle->event)
  421. return NULL;
  422. return handle->rb->aux_priv;
  423. }
  424. #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
  425. static struct page *rb_alloc_aux_page(int node, int order)
  426. {
  427. struct page *page;
  428. if (order > MAX_ORDER)
  429. order = MAX_ORDER;
  430. do {
  431. page = alloc_pages_node(node, PERF_AUX_GFP, order);
  432. } while (!page && order--);
  433. if (page && order) {
  434. /*
  435. * Communicate the allocation size to the driver:
  436. * if we managed to secure a high-order allocation,
  437. * set its first page's private to this order;
  438. * !PagePrivate(page) means it's just a normal page.
  439. */
  440. split_page(page, order);
  441. SetPagePrivate(page);
  442. set_page_private(page, order);
  443. }
  444. return page;
  445. }
  446. static void rb_free_aux_page(struct ring_buffer *rb, int idx)
  447. {
  448. struct page *page = virt_to_page(rb->aux_pages[idx]);
  449. ClearPagePrivate(page);
  450. page->mapping = NULL;
  451. __free_page(page);
  452. }
  453. static void __rb_free_aux(struct ring_buffer *rb)
  454. {
  455. int pg;
  456. /*
  457. * Should never happen, the last reference should be dropped from
  458. * perf_mmap_close() path, which first stops aux transactions (which
  459. * in turn are the atomic holders of aux_refcount) and then does the
  460. * last rb_free_aux().
  461. */
  462. WARN_ON_ONCE(in_atomic());
  463. if (rb->aux_priv) {
  464. rb->free_aux(rb->aux_priv);
  465. rb->free_aux = NULL;
  466. rb->aux_priv = NULL;
  467. }
  468. if (rb->aux_nr_pages) {
  469. for (pg = 0; pg < rb->aux_nr_pages; pg++)
  470. rb_free_aux_page(rb, pg);
  471. kfree(rb->aux_pages);
  472. rb->aux_nr_pages = 0;
  473. }
  474. }
  475. int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
  476. pgoff_t pgoff, int nr_pages, long watermark, int flags)
  477. {
  478. bool overwrite = !(flags & RING_BUFFER_WRITABLE);
  479. int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
  480. int ret = -ENOMEM, max_order = 0;
  481. if (!has_aux(event))
  482. return -EOPNOTSUPP;
  483. if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
  484. /*
  485. * We need to start with the max_order that fits in nr_pages,
  486. * not the other way around, hence ilog2() and not get_order.
  487. */
  488. max_order = ilog2(nr_pages);
  489. /*
  490. * PMU requests more than one contiguous chunks of memory
  491. * for SW double buffering
  492. */
  493. if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
  494. !overwrite) {
  495. if (!max_order)
  496. return -EINVAL;
  497. max_order--;
  498. }
  499. }
  500. rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
  501. if (!rb->aux_pages)
  502. return -ENOMEM;
  503. rb->free_aux = event->pmu->free_aux;
  504. for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
  505. struct page *page;
  506. int last, order;
  507. order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
  508. page = rb_alloc_aux_page(node, order);
  509. if (!page)
  510. goto out;
  511. for (last = rb->aux_nr_pages + (1 << page_private(page));
  512. last > rb->aux_nr_pages; rb->aux_nr_pages++)
  513. rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
  514. }
  515. /*
  516. * In overwrite mode, PMUs that don't support SG may not handle more
  517. * than one contiguous allocation, since they rely on PMI to do double
  518. * buffering. In this case, the entire buffer has to be one contiguous
  519. * chunk.
  520. */
  521. if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
  522. overwrite) {
  523. struct page *page = virt_to_page(rb->aux_pages[0]);
  524. if (page_private(page) != max_order)
  525. goto out;
  526. }
  527. rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
  528. overwrite);
  529. if (!rb->aux_priv)
  530. goto out;
  531. ret = 0;
  532. /*
  533. * aux_pages (and pmu driver's private data, aux_priv) will be
  534. * referenced in both producer's and consumer's contexts, thus
  535. * we keep a refcount here to make sure either of the two can
  536. * reference them safely.
  537. */
  538. atomic_set(&rb->aux_refcount, 1);
  539. rb->aux_overwrite = overwrite;
  540. rb->aux_watermark = watermark;
  541. if (!rb->aux_watermark && !rb->aux_overwrite)
  542. rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
  543. out:
  544. if (!ret)
  545. rb->aux_pgoff = pgoff;
  546. else
  547. __rb_free_aux(rb);
  548. return ret;
  549. }
  550. void rb_free_aux(struct ring_buffer *rb)
  551. {
  552. if (atomic_dec_and_test(&rb->aux_refcount))
  553. __rb_free_aux(rb);
  554. }
  555. #ifndef CONFIG_PERF_USE_VMALLOC
  556. /*
  557. * Back perf_mmap() with regular GFP_KERNEL-0 pages.
  558. */
  559. static struct page *
  560. __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
  561. {
  562. if (pgoff > rb->nr_pages)
  563. return NULL;
  564. if (pgoff == 0)
  565. return virt_to_page(rb->user_page);
  566. return virt_to_page(rb->data_pages[pgoff - 1]);
  567. }
  568. static void *perf_mmap_alloc_page(int cpu)
  569. {
  570. struct page *page;
  571. int node;
  572. node = (cpu == -1) ? cpu : cpu_to_node(cpu);
  573. page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
  574. if (!page)
  575. return NULL;
  576. return page_address(page);
  577. }
  578. struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
  579. {
  580. struct ring_buffer *rb;
  581. unsigned long size;
  582. int i;
  583. size = sizeof(struct ring_buffer);
  584. size += nr_pages * sizeof(void *);
  585. rb = kzalloc(size, GFP_KERNEL);
  586. if (!rb)
  587. goto fail;
  588. rb->user_page = perf_mmap_alloc_page(cpu);
  589. if (!rb->user_page)
  590. goto fail_user_page;
  591. for (i = 0; i < nr_pages; i++) {
  592. rb->data_pages[i] = perf_mmap_alloc_page(cpu);
  593. if (!rb->data_pages[i])
  594. goto fail_data_pages;
  595. }
  596. rb->nr_pages = nr_pages;
  597. ring_buffer_init(rb, watermark, flags);
  598. return rb;
  599. fail_data_pages:
  600. for (i--; i >= 0; i--)
  601. free_page((unsigned long)rb->data_pages[i]);
  602. free_page((unsigned long)rb->user_page);
  603. fail_user_page:
  604. kfree(rb);
  605. fail:
  606. return NULL;
  607. }
  608. static void perf_mmap_free_page(unsigned long addr)
  609. {
  610. struct page *page = virt_to_page((void *)addr);
  611. page->mapping = NULL;
  612. __free_page(page);
  613. }
  614. void rb_free(struct ring_buffer *rb)
  615. {
  616. int i;
  617. perf_mmap_free_page((unsigned long)rb->user_page);
  618. for (i = 0; i < rb->nr_pages; i++)
  619. perf_mmap_free_page((unsigned long)rb->data_pages[i]);
  620. kfree(rb);
  621. }
  622. #else
  623. static int data_page_nr(struct ring_buffer *rb)
  624. {
  625. return rb->nr_pages << page_order(rb);
  626. }
  627. static struct page *
  628. __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
  629. {
  630. /* The '>' counts in the user page. */
  631. if (pgoff > data_page_nr(rb))
  632. return NULL;
  633. return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
  634. }
  635. static void perf_mmap_unmark_page(void *addr)
  636. {
  637. struct page *page = vmalloc_to_page(addr);
  638. page->mapping = NULL;
  639. }
  640. static void rb_free_work(struct work_struct *work)
  641. {
  642. struct ring_buffer *rb;
  643. void *base;
  644. int i, nr;
  645. rb = container_of(work, struct ring_buffer, work);
  646. nr = data_page_nr(rb);
  647. base = rb->user_page;
  648. /* The '<=' counts in the user page. */
  649. for (i = 0; i <= nr; i++)
  650. perf_mmap_unmark_page(base + (i * PAGE_SIZE));
  651. vfree(base);
  652. kfree(rb);
  653. }
  654. void rb_free(struct ring_buffer *rb)
  655. {
  656. schedule_work(&rb->work);
  657. }
  658. struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
  659. {
  660. struct ring_buffer *rb;
  661. unsigned long size;
  662. void *all_buf;
  663. size = sizeof(struct ring_buffer);
  664. size += sizeof(void *);
  665. rb = kzalloc(size, GFP_KERNEL);
  666. if (!rb)
  667. goto fail;
  668. INIT_WORK(&rb->work, rb_free_work);
  669. all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
  670. if (!all_buf)
  671. goto fail_all_buf;
  672. rb->user_page = all_buf;
  673. rb->data_pages[0] = all_buf + PAGE_SIZE;
  674. if (nr_pages) {
  675. rb->nr_pages = 1;
  676. rb->page_order = ilog2(nr_pages);
  677. }
  678. ring_buffer_init(rb, watermark, flags);
  679. return rb;
  680. fail_all_buf:
  681. kfree(rb);
  682. fail:
  683. return NULL;
  684. }
  685. #endif
  686. struct page *
  687. perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
  688. {
  689. if (rb->aux_nr_pages) {
  690. /* above AUX space */
  691. if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
  692. return NULL;
  693. /* AUX space */
  694. if (pgoff >= rb->aux_pgoff)
  695. return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
  696. }
  697. return __perf_mmap_to_page(rb, pgoff);
  698. }