builtin-kmem.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. #include "builtin.h"
  2. #include "perf.h"
  3. #include "util/evlist.h"
  4. #include "util/evsel.h"
  5. #include "util/util.h"
  6. #include "util/cache.h"
  7. #include "util/symbol.h"
  8. #include "util/thread.h"
  9. #include "util/header.h"
  10. #include "util/session.h"
  11. #include "util/tool.h"
  12. #include "util/parse-options.h"
  13. #include "util/trace-event.h"
  14. #include "util/data.h"
  15. #include "util/cpumap.h"
  16. #include "util/debug.h"
  17. #include <linux/rbtree.h>
  18. #include <linux/string.h>
  19. struct alloc_stat;
  20. typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
  21. static int alloc_flag;
  22. static int caller_flag;
  23. static int alloc_lines = -1;
  24. static int caller_lines = -1;
  25. static bool raw_ip;
  26. struct alloc_stat {
  27. u64 call_site;
  28. u64 ptr;
  29. u64 bytes_req;
  30. u64 bytes_alloc;
  31. u32 hit;
  32. u32 pingpong;
  33. short alloc_cpu;
  34. struct rb_node node;
  35. };
  36. static struct rb_root root_alloc_stat;
  37. static struct rb_root root_alloc_sorted;
  38. static struct rb_root root_caller_stat;
  39. static struct rb_root root_caller_sorted;
  40. static unsigned long total_requested, total_allocated;
  41. static unsigned long nr_allocs, nr_cross_allocs;
  42. static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
  43. int bytes_req, int bytes_alloc, int cpu)
  44. {
  45. struct rb_node **node = &root_alloc_stat.rb_node;
  46. struct rb_node *parent = NULL;
  47. struct alloc_stat *data = NULL;
  48. while (*node) {
  49. parent = *node;
  50. data = rb_entry(*node, struct alloc_stat, node);
  51. if (ptr > data->ptr)
  52. node = &(*node)->rb_right;
  53. else if (ptr < data->ptr)
  54. node = &(*node)->rb_left;
  55. else
  56. break;
  57. }
  58. if (data && data->ptr == ptr) {
  59. data->hit++;
  60. data->bytes_req += bytes_req;
  61. data->bytes_alloc += bytes_alloc;
  62. } else {
  63. data = malloc(sizeof(*data));
  64. if (!data) {
  65. pr_err("%s: malloc failed\n", __func__);
  66. return -1;
  67. }
  68. data->ptr = ptr;
  69. data->pingpong = 0;
  70. data->hit = 1;
  71. data->bytes_req = bytes_req;
  72. data->bytes_alloc = bytes_alloc;
  73. rb_link_node(&data->node, parent, node);
  74. rb_insert_color(&data->node, &root_alloc_stat);
  75. }
  76. data->call_site = call_site;
  77. data->alloc_cpu = cpu;
  78. return 0;
  79. }
  80. static int insert_caller_stat(unsigned long call_site,
  81. int bytes_req, int bytes_alloc)
  82. {
  83. struct rb_node **node = &root_caller_stat.rb_node;
  84. struct rb_node *parent = NULL;
  85. struct alloc_stat *data = NULL;
  86. while (*node) {
  87. parent = *node;
  88. data = rb_entry(*node, struct alloc_stat, node);
  89. if (call_site > data->call_site)
  90. node = &(*node)->rb_right;
  91. else if (call_site < data->call_site)
  92. node = &(*node)->rb_left;
  93. else
  94. break;
  95. }
  96. if (data && data->call_site == call_site) {
  97. data->hit++;
  98. data->bytes_req += bytes_req;
  99. data->bytes_alloc += bytes_alloc;
  100. } else {
  101. data = malloc(sizeof(*data));
  102. if (!data) {
  103. pr_err("%s: malloc failed\n", __func__);
  104. return -1;
  105. }
  106. data->call_site = call_site;
  107. data->pingpong = 0;
  108. data->hit = 1;
  109. data->bytes_req = bytes_req;
  110. data->bytes_alloc = bytes_alloc;
  111. rb_link_node(&data->node, parent, node);
  112. rb_insert_color(&data->node, &root_caller_stat);
  113. }
  114. return 0;
  115. }
  116. static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
  117. struct perf_sample *sample)
  118. {
  119. unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
  120. call_site = perf_evsel__intval(evsel, sample, "call_site");
  121. int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
  122. bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
  123. if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
  124. insert_caller_stat(call_site, bytes_req, bytes_alloc))
  125. return -1;
  126. total_requested += bytes_req;
  127. total_allocated += bytes_alloc;
  128. nr_allocs++;
  129. return 0;
  130. }
  131. static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
  132. struct perf_sample *sample)
  133. {
  134. int ret = perf_evsel__process_alloc_event(evsel, sample);
  135. if (!ret) {
  136. int node1 = cpu__get_node(sample->cpu),
  137. node2 = perf_evsel__intval(evsel, sample, "node");
  138. if (node1 != node2)
  139. nr_cross_allocs++;
  140. }
  141. return ret;
  142. }
  143. static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
  144. static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
  145. static struct alloc_stat *search_alloc_stat(unsigned long ptr,
  146. unsigned long call_site,
  147. struct rb_root *root,
  148. sort_fn_t sort_fn)
  149. {
  150. struct rb_node *node = root->rb_node;
  151. struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
  152. while (node) {
  153. struct alloc_stat *data;
  154. int cmp;
  155. data = rb_entry(node, struct alloc_stat, node);
  156. cmp = sort_fn(&key, data);
  157. if (cmp < 0)
  158. node = node->rb_left;
  159. else if (cmp > 0)
  160. node = node->rb_right;
  161. else
  162. return data;
  163. }
  164. return NULL;
  165. }
  166. static int perf_evsel__process_free_event(struct perf_evsel *evsel,
  167. struct perf_sample *sample)
  168. {
  169. unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
  170. struct alloc_stat *s_alloc, *s_caller;
  171. s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
  172. if (!s_alloc)
  173. return 0;
  174. if ((short)sample->cpu != s_alloc->alloc_cpu) {
  175. s_alloc->pingpong++;
  176. s_caller = search_alloc_stat(0, s_alloc->call_site,
  177. &root_caller_stat, callsite_cmp);
  178. if (!s_caller)
  179. return -1;
  180. s_caller->pingpong++;
  181. }
  182. s_alloc->alloc_cpu = -1;
  183. return 0;
  184. }
  185. typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
  186. struct perf_sample *sample);
  187. static int process_sample_event(struct perf_tool *tool __maybe_unused,
  188. union perf_event *event,
  189. struct perf_sample *sample,
  190. struct perf_evsel *evsel,
  191. struct machine *machine)
  192. {
  193. struct thread *thread = machine__findnew_thread(machine, sample->pid,
  194. sample->tid);
  195. if (thread == NULL) {
  196. pr_debug("problem processing %d event, skipping it.\n",
  197. event->header.type);
  198. return -1;
  199. }
  200. dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
  201. if (evsel->handler != NULL) {
  202. tracepoint_handler f = evsel->handler;
  203. return f(evsel, sample);
  204. }
  205. return 0;
  206. }
  207. static struct perf_tool perf_kmem = {
  208. .sample = process_sample_event,
  209. .comm = perf_event__process_comm,
  210. .mmap = perf_event__process_mmap,
  211. .mmap2 = perf_event__process_mmap2,
  212. .ordered_events = true,
  213. };
  214. static double fragmentation(unsigned long n_req, unsigned long n_alloc)
  215. {
  216. if (n_alloc == 0)
  217. return 0.0;
  218. else
  219. return 100.0 - (100.0 * n_req / n_alloc);
  220. }
  221. static void __print_result(struct rb_root *root, struct perf_session *session,
  222. int n_lines, int is_caller)
  223. {
  224. struct rb_node *next;
  225. struct machine *machine = &session->machines.host;
  226. printf("%.102s\n", graph_dotted_line);
  227. printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
  228. printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
  229. printf("%.102s\n", graph_dotted_line);
  230. next = rb_first(root);
  231. while (next && n_lines--) {
  232. struct alloc_stat *data = rb_entry(next, struct alloc_stat,
  233. node);
  234. struct symbol *sym = NULL;
  235. struct map *map;
  236. char buf[BUFSIZ];
  237. u64 addr;
  238. if (is_caller) {
  239. addr = data->call_site;
  240. if (!raw_ip)
  241. sym = machine__find_kernel_function(machine, addr, &map, NULL);
  242. } else
  243. addr = data->ptr;
  244. if (sym != NULL)
  245. snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
  246. addr - map->unmap_ip(map, sym->start));
  247. else
  248. snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
  249. printf(" %-34s |", buf);
  250. printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
  251. (unsigned long long)data->bytes_alloc,
  252. (unsigned long)data->bytes_alloc / data->hit,
  253. (unsigned long long)data->bytes_req,
  254. (unsigned long)data->bytes_req / data->hit,
  255. (unsigned long)data->hit,
  256. (unsigned long)data->pingpong,
  257. fragmentation(data->bytes_req, data->bytes_alloc));
  258. next = rb_next(next);
  259. }
  260. if (n_lines == -1)
  261. printf(" ... | ... | ... | ... | ... | ... \n");
  262. printf("%.102s\n", graph_dotted_line);
  263. }
  264. static void print_summary(void)
  265. {
  266. printf("\nSUMMARY\n=======\n");
  267. printf("Total bytes requested: %lu\n", total_requested);
  268. printf("Total bytes allocated: %lu\n", total_allocated);
  269. printf("Total bytes wasted on internal fragmentation: %lu\n",
  270. total_allocated - total_requested);
  271. printf("Internal fragmentation: %f%%\n",
  272. fragmentation(total_requested, total_allocated));
  273. printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
  274. }
  275. static void print_result(struct perf_session *session)
  276. {
  277. if (caller_flag)
  278. __print_result(&root_caller_sorted, session, caller_lines, 1);
  279. if (alloc_flag)
  280. __print_result(&root_alloc_sorted, session, alloc_lines, 0);
  281. print_summary();
  282. }
  283. struct sort_dimension {
  284. const char name[20];
  285. sort_fn_t cmp;
  286. struct list_head list;
  287. };
  288. static LIST_HEAD(caller_sort);
  289. static LIST_HEAD(alloc_sort);
  290. static void sort_insert(struct rb_root *root, struct alloc_stat *data,
  291. struct list_head *sort_list)
  292. {
  293. struct rb_node **new = &(root->rb_node);
  294. struct rb_node *parent = NULL;
  295. struct sort_dimension *sort;
  296. while (*new) {
  297. struct alloc_stat *this;
  298. int cmp = 0;
  299. this = rb_entry(*new, struct alloc_stat, node);
  300. parent = *new;
  301. list_for_each_entry(sort, sort_list, list) {
  302. cmp = sort->cmp(data, this);
  303. if (cmp)
  304. break;
  305. }
  306. if (cmp > 0)
  307. new = &((*new)->rb_left);
  308. else
  309. new = &((*new)->rb_right);
  310. }
  311. rb_link_node(&data->node, parent, new);
  312. rb_insert_color(&data->node, root);
  313. }
  314. static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
  315. struct list_head *sort_list)
  316. {
  317. struct rb_node *node;
  318. struct alloc_stat *data;
  319. for (;;) {
  320. node = rb_first(root);
  321. if (!node)
  322. break;
  323. rb_erase(node, root);
  324. data = rb_entry(node, struct alloc_stat, node);
  325. sort_insert(root_sorted, data, sort_list);
  326. }
  327. }
  328. static void sort_result(void)
  329. {
  330. __sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
  331. __sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
  332. }
  333. static int __cmd_kmem(struct perf_session *session)
  334. {
  335. int err = -EINVAL;
  336. const struct perf_evsel_str_handler kmem_tracepoints[] = {
  337. { "kmem:kmalloc", perf_evsel__process_alloc_event, },
  338. { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
  339. { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
  340. { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
  341. { "kmem:kfree", perf_evsel__process_free_event, },
  342. { "kmem:kmem_cache_free", perf_evsel__process_free_event, },
  343. };
  344. if (!perf_session__has_traces(session, "kmem record"))
  345. goto out;
  346. if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
  347. pr_err("Initializing perf session tracepoint handlers failed\n");
  348. goto out;
  349. }
  350. setup_pager();
  351. err = perf_session__process_events(session, &perf_kmem);
  352. if (err != 0)
  353. goto out;
  354. sort_result();
  355. print_result(session);
  356. out:
  357. return err;
  358. }
  359. static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
  360. {
  361. if (l->ptr < r->ptr)
  362. return -1;
  363. else if (l->ptr > r->ptr)
  364. return 1;
  365. return 0;
  366. }
  367. static struct sort_dimension ptr_sort_dimension = {
  368. .name = "ptr",
  369. .cmp = ptr_cmp,
  370. };
  371. static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
  372. {
  373. if (l->call_site < r->call_site)
  374. return -1;
  375. else if (l->call_site > r->call_site)
  376. return 1;
  377. return 0;
  378. }
  379. static struct sort_dimension callsite_sort_dimension = {
  380. .name = "callsite",
  381. .cmp = callsite_cmp,
  382. };
  383. static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
  384. {
  385. if (l->hit < r->hit)
  386. return -1;
  387. else if (l->hit > r->hit)
  388. return 1;
  389. return 0;
  390. }
  391. static struct sort_dimension hit_sort_dimension = {
  392. .name = "hit",
  393. .cmp = hit_cmp,
  394. };
  395. static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
  396. {
  397. if (l->bytes_alloc < r->bytes_alloc)
  398. return -1;
  399. else if (l->bytes_alloc > r->bytes_alloc)
  400. return 1;
  401. return 0;
  402. }
  403. static struct sort_dimension bytes_sort_dimension = {
  404. .name = "bytes",
  405. .cmp = bytes_cmp,
  406. };
  407. static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
  408. {
  409. double x, y;
  410. x = fragmentation(l->bytes_req, l->bytes_alloc);
  411. y = fragmentation(r->bytes_req, r->bytes_alloc);
  412. if (x < y)
  413. return -1;
  414. else if (x > y)
  415. return 1;
  416. return 0;
  417. }
  418. static struct sort_dimension frag_sort_dimension = {
  419. .name = "frag",
  420. .cmp = frag_cmp,
  421. };
  422. static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
  423. {
  424. if (l->pingpong < r->pingpong)
  425. return -1;
  426. else if (l->pingpong > r->pingpong)
  427. return 1;
  428. return 0;
  429. }
  430. static struct sort_dimension pingpong_sort_dimension = {
  431. .name = "pingpong",
  432. .cmp = pingpong_cmp,
  433. };
  434. static struct sort_dimension *avail_sorts[] = {
  435. &ptr_sort_dimension,
  436. &callsite_sort_dimension,
  437. &hit_sort_dimension,
  438. &bytes_sort_dimension,
  439. &frag_sort_dimension,
  440. &pingpong_sort_dimension,
  441. };
  442. #define NUM_AVAIL_SORTS ((int)ARRAY_SIZE(avail_sorts))
  443. static int sort_dimension__add(const char *tok, struct list_head *list)
  444. {
  445. struct sort_dimension *sort;
  446. int i;
  447. for (i = 0; i < NUM_AVAIL_SORTS; i++) {
  448. if (!strcmp(avail_sorts[i]->name, tok)) {
  449. sort = memdup(avail_sorts[i], sizeof(*avail_sorts[i]));
  450. if (!sort) {
  451. pr_err("%s: memdup failed\n", __func__);
  452. return -1;
  453. }
  454. list_add_tail(&sort->list, list);
  455. return 0;
  456. }
  457. }
  458. return -1;
  459. }
  460. static int setup_sorting(struct list_head *sort_list, const char *arg)
  461. {
  462. char *tok;
  463. char *str = strdup(arg);
  464. if (!str) {
  465. pr_err("%s: strdup failed\n", __func__);
  466. return -1;
  467. }
  468. while (true) {
  469. tok = strsep(&str, ",");
  470. if (!tok)
  471. break;
  472. if (sort_dimension__add(tok, sort_list) < 0) {
  473. error("Unknown --sort key: '%s'", tok);
  474. free(str);
  475. return -1;
  476. }
  477. }
  478. free(str);
  479. return 0;
  480. }
  481. static int parse_sort_opt(const struct option *opt __maybe_unused,
  482. const char *arg, int unset __maybe_unused)
  483. {
  484. if (!arg)
  485. return -1;
  486. if (caller_flag > alloc_flag)
  487. return setup_sorting(&caller_sort, arg);
  488. else
  489. return setup_sorting(&alloc_sort, arg);
  490. return 0;
  491. }
  492. static int parse_caller_opt(const struct option *opt __maybe_unused,
  493. const char *arg __maybe_unused,
  494. int unset __maybe_unused)
  495. {
  496. caller_flag = (alloc_flag + 1);
  497. return 0;
  498. }
  499. static int parse_alloc_opt(const struct option *opt __maybe_unused,
  500. const char *arg __maybe_unused,
  501. int unset __maybe_unused)
  502. {
  503. alloc_flag = (caller_flag + 1);
  504. return 0;
  505. }
  506. static int parse_line_opt(const struct option *opt __maybe_unused,
  507. const char *arg, int unset __maybe_unused)
  508. {
  509. int lines;
  510. if (!arg)
  511. return -1;
  512. lines = strtoul(arg, NULL, 10);
  513. if (caller_flag > alloc_flag)
  514. caller_lines = lines;
  515. else
  516. alloc_lines = lines;
  517. return 0;
  518. }
  519. static int __cmd_record(int argc, const char **argv)
  520. {
  521. const char * const record_args[] = {
  522. "record", "-a", "-R", "-c", "1",
  523. "-e", "kmem:kmalloc",
  524. "-e", "kmem:kmalloc_node",
  525. "-e", "kmem:kfree",
  526. "-e", "kmem:kmem_cache_alloc",
  527. "-e", "kmem:kmem_cache_alloc_node",
  528. "-e", "kmem:kmem_cache_free",
  529. };
  530. unsigned int rec_argc, i, j;
  531. const char **rec_argv;
  532. rec_argc = ARRAY_SIZE(record_args) + argc - 1;
  533. rec_argv = calloc(rec_argc + 1, sizeof(char *));
  534. if (rec_argv == NULL)
  535. return -ENOMEM;
  536. for (i = 0; i < ARRAY_SIZE(record_args); i++)
  537. rec_argv[i] = strdup(record_args[i]);
  538. for (j = 1; j < (unsigned int)argc; j++, i++)
  539. rec_argv[i] = argv[j];
  540. return cmd_record(i, rec_argv, NULL);
  541. }
  542. int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
  543. {
  544. const char * const default_sort_order = "frag,hit,bytes";
  545. const struct option kmem_options[] = {
  546. OPT_STRING('i', "input", &input_name, "file", "input file name"),
  547. OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
  548. "show per-callsite statistics", parse_caller_opt),
  549. OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
  550. "show per-allocation statistics", parse_alloc_opt),
  551. OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
  552. "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
  553. parse_sort_opt),
  554. OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
  555. OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
  556. OPT_END()
  557. };
  558. const char *const kmem_subcommands[] = { "record", "stat", NULL };
  559. const char *kmem_usage[] = {
  560. NULL,
  561. NULL
  562. };
  563. struct perf_session *session;
  564. struct perf_data_file file = {
  565. .path = input_name,
  566. .mode = PERF_DATA_MODE_READ,
  567. };
  568. int ret = -1;
  569. argc = parse_options_subcommand(argc, argv, kmem_options,
  570. kmem_subcommands, kmem_usage, 0);
  571. if (!argc)
  572. usage_with_options(kmem_usage, kmem_options);
  573. if (!strncmp(argv[0], "rec", 3)) {
  574. symbol__init(NULL);
  575. return __cmd_record(argc, argv);
  576. }
  577. session = perf_session__new(&file, false, &perf_kmem);
  578. if (session == NULL)
  579. return -1;
  580. symbol__init(&session->header.env);
  581. if (!strcmp(argv[0], "stat")) {
  582. if (cpu__setup_cpunode_map())
  583. goto out_delete;
  584. if (list_empty(&caller_sort))
  585. setup_sorting(&caller_sort, default_sort_order);
  586. if (list_empty(&alloc_sort))
  587. setup_sorting(&alloc_sort, default_sort_order);
  588. ret = __cmd_kmem(session);
  589. } else
  590. usage_with_options(kmem_usage, kmem_options);
  591. out_delete:
  592. perf_session__delete(session);
  593. return ret;
  594. }