builtin-kmem.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. #include "builtin.h"
  2. #include "perf.h"
  3. #include "util/evlist.h"
  4. #include "util/evsel.h"
  5. #include "util/util.h"
  6. #include "util/cache.h"
  7. #include "util/symbol.h"
  8. #include "util/thread.h"
  9. #include "util/header.h"
  10. #include "util/session.h"
  11. #include "util/tool.h"
  12. #include "util/parse-options.h"
  13. #include "util/trace-event.h"
  14. #include "util/data.h"
  15. #include "util/cpumap.h"
  16. #include "util/debug.h"
  17. #include <linux/rbtree.h>
  18. #include <linux/string.h>
  19. #include <locale.h>
  20. static int kmem_slab;
  21. static int kmem_page;
  22. static long kmem_page_size;
  23. struct alloc_stat;
  24. typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
  25. static int alloc_flag;
  26. static int caller_flag;
  27. static int alloc_lines = -1;
  28. static int caller_lines = -1;
  29. static bool raw_ip;
  30. struct alloc_stat {
  31. u64 call_site;
  32. u64 ptr;
  33. u64 bytes_req;
  34. u64 bytes_alloc;
  35. u32 hit;
  36. u32 pingpong;
  37. short alloc_cpu;
  38. struct rb_node node;
  39. };
  40. static struct rb_root root_alloc_stat;
  41. static struct rb_root root_alloc_sorted;
  42. static struct rb_root root_caller_stat;
  43. static struct rb_root root_caller_sorted;
  44. static unsigned long total_requested, total_allocated;
  45. static unsigned long nr_allocs, nr_cross_allocs;
  46. static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
  47. int bytes_req, int bytes_alloc, int cpu)
  48. {
  49. struct rb_node **node = &root_alloc_stat.rb_node;
  50. struct rb_node *parent = NULL;
  51. struct alloc_stat *data = NULL;
  52. while (*node) {
  53. parent = *node;
  54. data = rb_entry(*node, struct alloc_stat, node);
  55. if (ptr > data->ptr)
  56. node = &(*node)->rb_right;
  57. else if (ptr < data->ptr)
  58. node = &(*node)->rb_left;
  59. else
  60. break;
  61. }
  62. if (data && data->ptr == ptr) {
  63. data->hit++;
  64. data->bytes_req += bytes_req;
  65. data->bytes_alloc += bytes_alloc;
  66. } else {
  67. data = malloc(sizeof(*data));
  68. if (!data) {
  69. pr_err("%s: malloc failed\n", __func__);
  70. return -1;
  71. }
  72. data->ptr = ptr;
  73. data->pingpong = 0;
  74. data->hit = 1;
  75. data->bytes_req = bytes_req;
  76. data->bytes_alloc = bytes_alloc;
  77. rb_link_node(&data->node, parent, node);
  78. rb_insert_color(&data->node, &root_alloc_stat);
  79. }
  80. data->call_site = call_site;
  81. data->alloc_cpu = cpu;
  82. return 0;
  83. }
  84. static int insert_caller_stat(unsigned long call_site,
  85. int bytes_req, int bytes_alloc)
  86. {
  87. struct rb_node **node = &root_caller_stat.rb_node;
  88. struct rb_node *parent = NULL;
  89. struct alloc_stat *data = NULL;
  90. while (*node) {
  91. parent = *node;
  92. data = rb_entry(*node, struct alloc_stat, node);
  93. if (call_site > data->call_site)
  94. node = &(*node)->rb_right;
  95. else if (call_site < data->call_site)
  96. node = &(*node)->rb_left;
  97. else
  98. break;
  99. }
  100. if (data && data->call_site == call_site) {
  101. data->hit++;
  102. data->bytes_req += bytes_req;
  103. data->bytes_alloc += bytes_alloc;
  104. } else {
  105. data = malloc(sizeof(*data));
  106. if (!data) {
  107. pr_err("%s: malloc failed\n", __func__);
  108. return -1;
  109. }
  110. data->call_site = call_site;
  111. data->pingpong = 0;
  112. data->hit = 1;
  113. data->bytes_req = bytes_req;
  114. data->bytes_alloc = bytes_alloc;
  115. rb_link_node(&data->node, parent, node);
  116. rb_insert_color(&data->node, &root_caller_stat);
  117. }
  118. return 0;
  119. }
  120. static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
  121. struct perf_sample *sample)
  122. {
  123. unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
  124. call_site = perf_evsel__intval(evsel, sample, "call_site");
  125. int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
  126. bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
  127. if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
  128. insert_caller_stat(call_site, bytes_req, bytes_alloc))
  129. return -1;
  130. total_requested += bytes_req;
  131. total_allocated += bytes_alloc;
  132. nr_allocs++;
  133. return 0;
  134. }
  135. static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
  136. struct perf_sample *sample)
  137. {
  138. int ret = perf_evsel__process_alloc_event(evsel, sample);
  139. if (!ret) {
  140. int node1 = cpu__get_node(sample->cpu),
  141. node2 = perf_evsel__intval(evsel, sample, "node");
  142. if (node1 != node2)
  143. nr_cross_allocs++;
  144. }
  145. return ret;
  146. }
  147. static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
  148. static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
  149. static struct alloc_stat *search_alloc_stat(unsigned long ptr,
  150. unsigned long call_site,
  151. struct rb_root *root,
  152. sort_fn_t sort_fn)
  153. {
  154. struct rb_node *node = root->rb_node;
  155. struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
  156. while (node) {
  157. struct alloc_stat *data;
  158. int cmp;
  159. data = rb_entry(node, struct alloc_stat, node);
  160. cmp = sort_fn(&key, data);
  161. if (cmp < 0)
  162. node = node->rb_left;
  163. else if (cmp > 0)
  164. node = node->rb_right;
  165. else
  166. return data;
  167. }
  168. return NULL;
  169. }
  170. static int perf_evsel__process_free_event(struct perf_evsel *evsel,
  171. struct perf_sample *sample)
  172. {
  173. unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
  174. struct alloc_stat *s_alloc, *s_caller;
  175. s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
  176. if (!s_alloc)
  177. return 0;
  178. if ((short)sample->cpu != s_alloc->alloc_cpu) {
  179. s_alloc->pingpong++;
  180. s_caller = search_alloc_stat(0, s_alloc->call_site,
  181. &root_caller_stat, callsite_cmp);
  182. if (!s_caller)
  183. return -1;
  184. s_caller->pingpong++;
  185. }
  186. s_alloc->alloc_cpu = -1;
  187. return 0;
  188. }
  189. static u64 total_page_alloc_bytes;
  190. static u64 total_page_free_bytes;
  191. static u64 total_page_nomatch_bytes;
  192. static u64 total_page_fail_bytes;
  193. static unsigned long nr_page_allocs;
  194. static unsigned long nr_page_frees;
  195. static unsigned long nr_page_fails;
  196. static unsigned long nr_page_nomatch;
  197. static bool use_pfn;
  198. #define MAX_MIGRATE_TYPES 6
  199. #define MAX_PAGE_ORDER 11
  200. static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
  201. struct page_stat {
  202. struct rb_node node;
  203. u64 page;
  204. int order;
  205. unsigned gfp_flags;
  206. unsigned migrate_type;
  207. u64 alloc_bytes;
  208. u64 free_bytes;
  209. int nr_alloc;
  210. int nr_free;
  211. };
  212. static struct rb_root page_tree;
  213. static struct rb_root page_alloc_tree;
  214. static struct rb_root page_alloc_sorted;
  215. static struct page_stat *search_page(unsigned long page, bool create)
  216. {
  217. struct rb_node **node = &page_tree.rb_node;
  218. struct rb_node *parent = NULL;
  219. struct page_stat *data;
  220. while (*node) {
  221. s64 cmp;
  222. parent = *node;
  223. data = rb_entry(*node, struct page_stat, node);
  224. cmp = data->page - page;
  225. if (cmp < 0)
  226. node = &parent->rb_left;
  227. else if (cmp > 0)
  228. node = &parent->rb_right;
  229. else
  230. return data;
  231. }
  232. if (!create)
  233. return NULL;
  234. data = zalloc(sizeof(*data));
  235. if (data != NULL) {
  236. data->page = page;
  237. rb_link_node(&data->node, parent, node);
  238. rb_insert_color(&data->node, &page_tree);
  239. }
  240. return data;
  241. }
  242. static int page_stat_cmp(struct page_stat *a, struct page_stat *b)
  243. {
  244. if (a->page > b->page)
  245. return -1;
  246. if (a->page < b->page)
  247. return 1;
  248. if (a->order > b->order)
  249. return -1;
  250. if (a->order < b->order)
  251. return 1;
  252. if (a->migrate_type > b->migrate_type)
  253. return -1;
  254. if (a->migrate_type < b->migrate_type)
  255. return 1;
  256. if (a->gfp_flags > b->gfp_flags)
  257. return -1;
  258. if (a->gfp_flags < b->gfp_flags)
  259. return 1;
  260. return 0;
  261. }
  262. static struct page_stat *search_page_alloc_stat(struct page_stat *pstat, bool create)
  263. {
  264. struct rb_node **node = &page_alloc_tree.rb_node;
  265. struct rb_node *parent = NULL;
  266. struct page_stat *data;
  267. while (*node) {
  268. s64 cmp;
  269. parent = *node;
  270. data = rb_entry(*node, struct page_stat, node);
  271. cmp = page_stat_cmp(data, pstat);
  272. if (cmp < 0)
  273. node = &parent->rb_left;
  274. else if (cmp > 0)
  275. node = &parent->rb_right;
  276. else
  277. return data;
  278. }
  279. if (!create)
  280. return NULL;
  281. data = zalloc(sizeof(*data));
  282. if (data != NULL) {
  283. data->page = pstat->page;
  284. data->order = pstat->order;
  285. data->gfp_flags = pstat->gfp_flags;
  286. data->migrate_type = pstat->migrate_type;
  287. rb_link_node(&data->node, parent, node);
  288. rb_insert_color(&data->node, &page_alloc_tree);
  289. }
  290. return data;
  291. }
  292. static bool valid_page(u64 pfn_or_page)
  293. {
  294. if (use_pfn && pfn_or_page == -1UL)
  295. return false;
  296. if (!use_pfn && pfn_or_page == 0)
  297. return false;
  298. return true;
  299. }
  300. static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
  301. struct perf_sample *sample)
  302. {
  303. u64 page;
  304. unsigned int order = perf_evsel__intval(evsel, sample, "order");
  305. unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
  306. unsigned int migrate_type = perf_evsel__intval(evsel, sample,
  307. "migratetype");
  308. u64 bytes = kmem_page_size << order;
  309. struct page_stat *pstat;
  310. struct page_stat this = {
  311. .order = order,
  312. .gfp_flags = gfp_flags,
  313. .migrate_type = migrate_type,
  314. };
  315. if (use_pfn)
  316. page = perf_evsel__intval(evsel, sample, "pfn");
  317. else
  318. page = perf_evsel__intval(evsel, sample, "page");
  319. nr_page_allocs++;
  320. total_page_alloc_bytes += bytes;
  321. if (!valid_page(page)) {
  322. nr_page_fails++;
  323. total_page_fail_bytes += bytes;
  324. return 0;
  325. }
  326. /*
  327. * This is to find the current page (with correct gfp flags and
  328. * migrate type) at free event.
  329. */
  330. pstat = search_page(page, true);
  331. if (pstat == NULL)
  332. return -ENOMEM;
  333. pstat->order = order;
  334. pstat->gfp_flags = gfp_flags;
  335. pstat->migrate_type = migrate_type;
  336. this.page = page;
  337. pstat = search_page_alloc_stat(&this, true);
  338. if (pstat == NULL)
  339. return -ENOMEM;
  340. pstat->nr_alloc++;
  341. pstat->alloc_bytes += bytes;
  342. order_stats[order][migrate_type]++;
  343. return 0;
  344. }
  345. static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
  346. struct perf_sample *sample)
  347. {
  348. u64 page;
  349. unsigned int order = perf_evsel__intval(evsel, sample, "order");
  350. u64 bytes = kmem_page_size << order;
  351. struct page_stat *pstat;
  352. struct page_stat this = {
  353. .order = order,
  354. };
  355. if (use_pfn)
  356. page = perf_evsel__intval(evsel, sample, "pfn");
  357. else
  358. page = perf_evsel__intval(evsel, sample, "page");
  359. nr_page_frees++;
  360. total_page_free_bytes += bytes;
  361. pstat = search_page(page, false);
  362. if (pstat == NULL) {
  363. pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
  364. page, order);
  365. nr_page_nomatch++;
  366. total_page_nomatch_bytes += bytes;
  367. return 0;
  368. }
  369. this.page = page;
  370. this.gfp_flags = pstat->gfp_flags;
  371. this.migrate_type = pstat->migrate_type;
  372. rb_erase(&pstat->node, &page_tree);
  373. free(pstat);
  374. pstat = search_page_alloc_stat(&this, false);
  375. if (pstat == NULL)
  376. return -ENOENT;
  377. pstat->nr_free++;
  378. pstat->free_bytes += bytes;
  379. return 0;
  380. }
  381. typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
  382. struct perf_sample *sample);
  383. static int process_sample_event(struct perf_tool *tool __maybe_unused,
  384. union perf_event *event,
  385. struct perf_sample *sample,
  386. struct perf_evsel *evsel,
  387. struct machine *machine)
  388. {
  389. struct thread *thread = machine__findnew_thread(machine, sample->pid,
  390. sample->tid);
  391. if (thread == NULL) {
  392. pr_debug("problem processing %d event, skipping it.\n",
  393. event->header.type);
  394. return -1;
  395. }
  396. dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
  397. if (evsel->handler != NULL) {
  398. tracepoint_handler f = evsel->handler;
  399. return f(evsel, sample);
  400. }
  401. return 0;
  402. }
  403. static struct perf_tool perf_kmem = {
  404. .sample = process_sample_event,
  405. .comm = perf_event__process_comm,
  406. .mmap = perf_event__process_mmap,
  407. .mmap2 = perf_event__process_mmap2,
  408. .ordered_events = true,
  409. };
  410. static double fragmentation(unsigned long n_req, unsigned long n_alloc)
  411. {
  412. if (n_alloc == 0)
  413. return 0.0;
  414. else
  415. return 100.0 - (100.0 * n_req / n_alloc);
  416. }
  417. static void __print_slab_result(struct rb_root *root,
  418. struct perf_session *session,
  419. int n_lines, int is_caller)
  420. {
  421. struct rb_node *next;
  422. struct machine *machine = &session->machines.host;
  423. printf("%.105s\n", graph_dotted_line);
  424. printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
  425. printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
  426. printf("%.105s\n", graph_dotted_line);
  427. next = rb_first(root);
  428. while (next && n_lines--) {
  429. struct alloc_stat *data = rb_entry(next, struct alloc_stat,
  430. node);
  431. struct symbol *sym = NULL;
  432. struct map *map;
  433. char buf[BUFSIZ];
  434. u64 addr;
  435. if (is_caller) {
  436. addr = data->call_site;
  437. if (!raw_ip)
  438. sym = machine__find_kernel_function(machine, addr, &map, NULL);
  439. } else
  440. addr = data->ptr;
  441. if (sym != NULL)
  442. snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
  443. addr - map->unmap_ip(map, sym->start));
  444. else
  445. snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
  446. printf(" %-34s |", buf);
  447. printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
  448. (unsigned long long)data->bytes_alloc,
  449. (unsigned long)data->bytes_alloc / data->hit,
  450. (unsigned long long)data->bytes_req,
  451. (unsigned long)data->bytes_req / data->hit,
  452. (unsigned long)data->hit,
  453. (unsigned long)data->pingpong,
  454. fragmentation(data->bytes_req, data->bytes_alloc));
  455. next = rb_next(next);
  456. }
  457. if (n_lines == -1)
  458. printf(" ... | ... | ... | ... | ... | ... \n");
  459. printf("%.105s\n", graph_dotted_line);
  460. }
  461. static const char * const migrate_type_str[] = {
  462. "UNMOVABL",
  463. "RECLAIM",
  464. "MOVABLE",
  465. "RESERVED",
  466. "CMA/ISLT",
  467. "UNKNOWN",
  468. };
  469. static void __print_page_result(struct rb_root *root,
  470. struct perf_session *session __maybe_unused,
  471. int n_lines)
  472. {
  473. struct rb_node *next = rb_first(root);
  474. const char *format;
  475. printf("\n%.80s\n", graph_dotted_line);
  476. printf(" %-16s | Total alloc (KB) | Hits | Order | Mig.type | GFP flags\n",
  477. use_pfn ? "PFN" : "Page");
  478. printf("%.80s\n", graph_dotted_line);
  479. if (use_pfn)
  480. format = " %16llu | %'16llu | %'9d | %5d | %8s | %08lx\n";
  481. else
  482. format = " %016llx | %'16llu | %'9d | %5d | %8s | %08lx\n";
  483. while (next && n_lines--) {
  484. struct page_stat *data;
  485. data = rb_entry(next, struct page_stat, node);
  486. printf(format, (unsigned long long)data->page,
  487. (unsigned long long)data->alloc_bytes / 1024,
  488. data->nr_alloc, data->order,
  489. migrate_type_str[data->migrate_type],
  490. (unsigned long)data->gfp_flags);
  491. next = rb_next(next);
  492. }
  493. if (n_lines == -1)
  494. printf(" ... | ... | ... | ... | ... | ... \n");
  495. printf("%.80s\n", graph_dotted_line);
  496. }
  497. static void print_slab_summary(void)
  498. {
  499. printf("\nSUMMARY (SLAB allocator)");
  500. printf("\n========================\n");
  501. printf("Total bytes requested: %'lu\n", total_requested);
  502. printf("Total bytes allocated: %'lu\n", total_allocated);
  503. printf("Total bytes wasted on internal fragmentation: %'lu\n",
  504. total_allocated - total_requested);
  505. printf("Internal fragmentation: %f%%\n",
  506. fragmentation(total_requested, total_allocated));
  507. printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
  508. }
  509. static void print_page_summary(void)
  510. {
  511. int o, m;
  512. u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
  513. u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
  514. printf("\nSUMMARY (page allocator)");
  515. printf("\n========================\n");
  516. printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests",
  517. nr_page_allocs, total_page_alloc_bytes / 1024);
  518. printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests",
  519. nr_page_frees, total_page_free_bytes / 1024);
  520. printf("\n");
  521. printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
  522. nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
  523. printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
  524. nr_page_allocs - nr_alloc_freed,
  525. (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
  526. printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
  527. nr_page_nomatch, total_page_nomatch_bytes / 1024);
  528. printf("\n");
  529. printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures",
  530. nr_page_fails, total_page_fail_bytes / 1024);
  531. printf("\n");
  532. printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable",
  533. "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
  534. printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line,
  535. graph_dotted_line, graph_dotted_line, graph_dotted_line,
  536. graph_dotted_line, graph_dotted_line);
  537. for (o = 0; o < MAX_PAGE_ORDER; o++) {
  538. printf("%5d", o);
  539. for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
  540. if (order_stats[o][m])
  541. printf(" %'12d", order_stats[o][m]);
  542. else
  543. printf(" %12c", '.');
  544. }
  545. printf("\n");
  546. }
  547. }
  548. static void print_slab_result(struct perf_session *session)
  549. {
  550. if (caller_flag)
  551. __print_slab_result(&root_caller_sorted, session, caller_lines, 1);
  552. if (alloc_flag)
  553. __print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
  554. print_slab_summary();
  555. }
  556. static void print_page_result(struct perf_session *session)
  557. {
  558. if (alloc_flag)
  559. __print_page_result(&page_alloc_sorted, session, alloc_lines);
  560. print_page_summary();
  561. }
  562. static void print_result(struct perf_session *session)
  563. {
  564. if (kmem_slab)
  565. print_slab_result(session);
  566. if (kmem_page)
  567. print_page_result(session);
  568. }
  569. struct sort_dimension {
  570. const char name[20];
  571. sort_fn_t cmp;
  572. struct list_head list;
  573. };
  574. static LIST_HEAD(caller_sort);
  575. static LIST_HEAD(alloc_sort);
  576. static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
  577. struct list_head *sort_list)
  578. {
  579. struct rb_node **new = &(root->rb_node);
  580. struct rb_node *parent = NULL;
  581. struct sort_dimension *sort;
  582. while (*new) {
  583. struct alloc_stat *this;
  584. int cmp = 0;
  585. this = rb_entry(*new, struct alloc_stat, node);
  586. parent = *new;
  587. list_for_each_entry(sort, sort_list, list) {
  588. cmp = sort->cmp(data, this);
  589. if (cmp)
  590. break;
  591. }
  592. if (cmp > 0)
  593. new = &((*new)->rb_left);
  594. else
  595. new = &((*new)->rb_right);
  596. }
  597. rb_link_node(&data->node, parent, new);
  598. rb_insert_color(&data->node, root);
  599. }
  600. static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
  601. struct list_head *sort_list)
  602. {
  603. struct rb_node *node;
  604. struct alloc_stat *data;
  605. for (;;) {
  606. node = rb_first(root);
  607. if (!node)
  608. break;
  609. rb_erase(node, root);
  610. data = rb_entry(node, struct alloc_stat, node);
  611. sort_slab_insert(root_sorted, data, sort_list);
  612. }
  613. }
  614. static void sort_page_insert(struct rb_root *root, struct page_stat *data)
  615. {
  616. struct rb_node **new = &root->rb_node;
  617. struct rb_node *parent = NULL;
  618. while (*new) {
  619. struct page_stat *this;
  620. int cmp = 0;
  621. this = rb_entry(*new, struct page_stat, node);
  622. parent = *new;
  623. /* TODO: support more sort key */
  624. cmp = data->alloc_bytes - this->alloc_bytes;
  625. if (cmp > 0)
  626. new = &parent->rb_left;
  627. else
  628. new = &parent->rb_right;
  629. }
  630. rb_link_node(&data->node, parent, new);
  631. rb_insert_color(&data->node, root);
  632. }
  633. static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted)
  634. {
  635. struct rb_node *node;
  636. struct page_stat *data;
  637. for (;;) {
  638. node = rb_first(root);
  639. if (!node)
  640. break;
  641. rb_erase(node, root);
  642. data = rb_entry(node, struct page_stat, node);
  643. sort_page_insert(root_sorted, data);
  644. }
  645. }
  646. static void sort_result(void)
  647. {
  648. if (kmem_slab) {
  649. __sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
  650. &alloc_sort);
  651. __sort_slab_result(&root_caller_stat, &root_caller_sorted,
  652. &caller_sort);
  653. }
  654. if (kmem_page) {
  655. __sort_page_result(&page_alloc_tree, &page_alloc_sorted);
  656. }
  657. }
  658. static int __cmd_kmem(struct perf_session *session)
  659. {
  660. int err = -EINVAL;
  661. struct perf_evsel *evsel;
  662. const struct perf_evsel_str_handler kmem_tracepoints[] = {
  663. /* slab allocator */
  664. { "kmem:kmalloc", perf_evsel__process_alloc_event, },
  665. { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
  666. { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
  667. { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
  668. { "kmem:kfree", perf_evsel__process_free_event, },
  669. { "kmem:kmem_cache_free", perf_evsel__process_free_event, },
  670. /* page allocator */
  671. { "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, },
  672. { "kmem:mm_page_free", perf_evsel__process_page_free_event, },
  673. };
  674. if (!perf_session__has_traces(session, "kmem record"))
  675. goto out;
  676. if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
  677. pr_err("Initializing perf session tracepoint handlers failed\n");
  678. goto out;
  679. }
  680. evlist__for_each(session->evlist, evsel) {
  681. if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
  682. perf_evsel__field(evsel, "pfn")) {
  683. use_pfn = true;
  684. break;
  685. }
  686. }
  687. setup_pager();
  688. err = perf_session__process_events(session);
  689. if (err != 0) {
  690. pr_err("error during process events: %d\n", err);
  691. goto out;
  692. }
  693. sort_result();
  694. print_result(session);
  695. out:
  696. return err;
  697. }
  698. static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
  699. {
  700. if (l->ptr < r->ptr)
  701. return -1;
  702. else if (l->ptr > r->ptr)
  703. return 1;
  704. return 0;
  705. }
  706. static struct sort_dimension ptr_sort_dimension = {
  707. .name = "ptr",
  708. .cmp = ptr_cmp,
  709. };
  710. static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
  711. {
  712. if (l->call_site < r->call_site)
  713. return -1;
  714. else if (l->call_site > r->call_site)
  715. return 1;
  716. return 0;
  717. }
  718. static struct sort_dimension callsite_sort_dimension = {
  719. .name = "callsite",
  720. .cmp = callsite_cmp,
  721. };
  722. static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
  723. {
  724. if (l->hit < r->hit)
  725. return -1;
  726. else if (l->hit > r->hit)
  727. return 1;
  728. return 0;
  729. }
  730. static struct sort_dimension hit_sort_dimension = {
  731. .name = "hit",
  732. .cmp = hit_cmp,
  733. };
  734. static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
  735. {
  736. if (l->bytes_alloc < r->bytes_alloc)
  737. return -1;
  738. else if (l->bytes_alloc > r->bytes_alloc)
  739. return 1;
  740. return 0;
  741. }
  742. static struct sort_dimension bytes_sort_dimension = {
  743. .name = "bytes",
  744. .cmp = bytes_cmp,
  745. };
  746. static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
  747. {
  748. double x, y;
  749. x = fragmentation(l->bytes_req, l->bytes_alloc);
  750. y = fragmentation(r->bytes_req, r->bytes_alloc);
  751. if (x < y)
  752. return -1;
  753. else if (x > y)
  754. return 1;
  755. return 0;
  756. }
  757. static struct sort_dimension frag_sort_dimension = {
  758. .name = "frag",
  759. .cmp = frag_cmp,
  760. };
  761. static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
  762. {
  763. if (l->pingpong < r->pingpong)
  764. return -1;
  765. else if (l->pingpong > r->pingpong)
  766. return 1;
  767. return 0;
  768. }
  769. static struct sort_dimension pingpong_sort_dimension = {
  770. .name = "pingpong",
  771. .cmp = pingpong_cmp,
  772. };
  773. static struct sort_dimension *avail_sorts[] = {
  774. &ptr_sort_dimension,
  775. &callsite_sort_dimension,
  776. &hit_sort_dimension,
  777. &bytes_sort_dimension,
  778. &frag_sort_dimension,
  779. &pingpong_sort_dimension,
  780. };
  781. #define NUM_AVAIL_SORTS ((int)ARRAY_SIZE(avail_sorts))
  782. static int sort_dimension__add(const char *tok, struct list_head *list)
  783. {
  784. struct sort_dimension *sort;
  785. int i;
  786. for (i = 0; i < NUM_AVAIL_SORTS; i++) {
  787. if (!strcmp(avail_sorts[i]->name, tok)) {
  788. sort = memdup(avail_sorts[i], sizeof(*avail_sorts[i]));
  789. if (!sort) {
  790. pr_err("%s: memdup failed\n", __func__);
  791. return -1;
  792. }
  793. list_add_tail(&sort->list, list);
  794. return 0;
  795. }
  796. }
  797. return -1;
  798. }
  799. static int setup_sorting(struct list_head *sort_list, const char *arg)
  800. {
  801. char *tok;
  802. char *str = strdup(arg);
  803. char *pos = str;
  804. if (!str) {
  805. pr_err("%s: strdup failed\n", __func__);
  806. return -1;
  807. }
  808. while (true) {
  809. tok = strsep(&pos, ",");
  810. if (!tok)
  811. break;
  812. if (sort_dimension__add(tok, sort_list) < 0) {
  813. error("Unknown --sort key: '%s'", tok);
  814. free(str);
  815. return -1;
  816. }
  817. }
  818. free(str);
  819. return 0;
  820. }
  821. static int parse_sort_opt(const struct option *opt __maybe_unused,
  822. const char *arg, int unset __maybe_unused)
  823. {
  824. if (!arg)
  825. return -1;
  826. if (caller_flag > alloc_flag)
  827. return setup_sorting(&caller_sort, arg);
  828. else
  829. return setup_sorting(&alloc_sort, arg);
  830. return 0;
  831. }
  832. static int parse_caller_opt(const struct option *opt __maybe_unused,
  833. const char *arg __maybe_unused,
  834. int unset __maybe_unused)
  835. {
  836. caller_flag = (alloc_flag + 1);
  837. return 0;
  838. }
  839. static int parse_alloc_opt(const struct option *opt __maybe_unused,
  840. const char *arg __maybe_unused,
  841. int unset __maybe_unused)
  842. {
  843. alloc_flag = (caller_flag + 1);
  844. return 0;
  845. }
  846. static int parse_slab_opt(const struct option *opt __maybe_unused,
  847. const char *arg __maybe_unused,
  848. int unset __maybe_unused)
  849. {
  850. kmem_slab = (kmem_page + 1);
  851. return 0;
  852. }
  853. static int parse_page_opt(const struct option *opt __maybe_unused,
  854. const char *arg __maybe_unused,
  855. int unset __maybe_unused)
  856. {
  857. kmem_page = (kmem_slab + 1);
  858. return 0;
  859. }
  860. static int parse_line_opt(const struct option *opt __maybe_unused,
  861. const char *arg, int unset __maybe_unused)
  862. {
  863. int lines;
  864. if (!arg)
  865. return -1;
  866. lines = strtoul(arg, NULL, 10);
  867. if (caller_flag > alloc_flag)
  868. caller_lines = lines;
  869. else
  870. alloc_lines = lines;
  871. return 0;
  872. }
  873. static int __cmd_record(int argc, const char **argv)
  874. {
  875. const char * const record_args[] = {
  876. "record", "-a", "-R", "-c", "1",
  877. };
  878. const char * const slab_events[] = {
  879. "-e", "kmem:kmalloc",
  880. "-e", "kmem:kmalloc_node",
  881. "-e", "kmem:kfree",
  882. "-e", "kmem:kmem_cache_alloc",
  883. "-e", "kmem:kmem_cache_alloc_node",
  884. "-e", "kmem:kmem_cache_free",
  885. };
  886. const char * const page_events[] = {
  887. "-e", "kmem:mm_page_alloc",
  888. "-e", "kmem:mm_page_free",
  889. };
  890. unsigned int rec_argc, i, j;
  891. const char **rec_argv;
  892. rec_argc = ARRAY_SIZE(record_args) + argc - 1;
  893. if (kmem_slab)
  894. rec_argc += ARRAY_SIZE(slab_events);
  895. if (kmem_page)
  896. rec_argc += ARRAY_SIZE(page_events);
  897. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  898. if (rec_argv == NULL)
  899. return -ENOMEM;
  900. for (i = 0; i < ARRAY_SIZE(record_args); i++)
  901. rec_argv[i] = strdup(record_args[i]);
  902. if (kmem_slab) {
  903. for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
  904. rec_argv[i] = strdup(slab_events[j]);
  905. }
  906. if (kmem_page) {
  907. for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
  908. rec_argv[i] = strdup(page_events[j]);
  909. }
  910. for (j = 1; j < (unsigned int)argc; j++, i++)
  911. rec_argv[i] = argv[j];
  912. return cmd_record(i, rec_argv, NULL);
  913. }
  914. int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
  915. {
  916. const char * const default_sort_order = "frag,hit,bytes";
  917. struct perf_data_file file = {
  918. .mode = PERF_DATA_MODE_READ,
  919. };
  920. const struct option kmem_options[] = {
  921. OPT_STRING('i', "input", &input_name, "file", "input file name"),
  922. OPT_INCR('v', "verbose", &verbose,
  923. "be more verbose (show symbol address, etc)"),
  924. OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
  925. "show per-callsite statistics", parse_caller_opt),
  926. OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
  927. "show per-allocation statistics", parse_alloc_opt),
  928. OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
  929. "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
  930. parse_sort_opt),
  931. OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
  932. OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
  933. OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
  934. OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
  935. parse_slab_opt),
  936. OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
  937. parse_page_opt),
  938. OPT_END()
  939. };
  940. const char *const kmem_subcommands[] = { "record", "stat", NULL };
  941. const char *kmem_usage[] = {
  942. NULL,
  943. NULL
  944. };
  945. struct perf_session *session;
  946. int ret = -1;
  947. argc = parse_options_subcommand(argc, argv, kmem_options,
  948. kmem_subcommands, kmem_usage, 0);
  949. if (!argc)
  950. usage_with_options(kmem_usage, kmem_options);
  951. if (kmem_slab == 0 && kmem_page == 0)
  952. kmem_slab = 1; /* for backward compatibility */
  953. if (!strncmp(argv[0], "rec", 3)) {
  954. symbol__init(NULL);
  955. return __cmd_record(argc, argv);
  956. }
  957. file.path = input_name;
  958. session = perf_session__new(&file, false, &perf_kmem);
  959. if (session == NULL)
  960. return -1;
  961. if (kmem_page) {
  962. struct perf_evsel *evsel = perf_evlist__first(session->evlist);
  963. if (evsel == NULL || evsel->tp_format == NULL) {
  964. pr_err("invalid event found.. aborting\n");
  965. return -1;
  966. }
  967. kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
  968. }
  969. symbol__init(&session->header.env);
  970. if (!strcmp(argv[0], "stat")) {
  971. setlocale(LC_ALL, "");
  972. if (cpu__setup_cpunode_map())
  973. goto out_delete;
  974. if (list_empty(&caller_sort))
  975. setup_sorting(&caller_sort, default_sort_order);
  976. if (list_empty(&alloc_sort))
  977. setup_sorting(&alloc_sort, default_sort_order);
  978. ret = __cmd_kmem(session);
  979. } else
  980. usage_with_options(kmem_usage, kmem_options);
  981. out_delete:
  982. perf_session__delete(session);
  983. return ret;
  984. }