builtin-top.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336
  1. /*
  2. * builtin-top.c
  3. *
  4. * Builtin top command: Display a continuously updated profile of
  5. * any workload, CPU or specific PID.
  6. *
  7. * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
  8. * 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  9. *
  10. * Improvements and fixes by:
  11. *
  12. * Arjan van de Ven <arjan@linux.intel.com>
  13. * Yanmin Zhang <yanmin.zhang@intel.com>
  14. * Wu Fengguang <fengguang.wu@intel.com>
  15. * Mike Galbraith <efault@gmx.de>
  16. * Paul Mackerras <paulus@samba.org>
  17. *
  18. * Released under the GPL v2. (and only v2, not any later version)
  19. */
  20. #include "builtin.h"
  21. #include "perf.h"
  22. #include "util/annotate.h"
  23. #include "util/config.h"
  24. #include "util/color.h"
  25. #include "util/drv_configs.h"
  26. #include "util/evlist.h"
  27. #include "util/evsel.h"
  28. #include "util/machine.h"
  29. #include "util/session.h"
  30. #include "util/symbol.h"
  31. #include "util/thread.h"
  32. #include "util/thread_map.h"
  33. #include "util/top.h"
  34. #include "util/util.h"
  35. #include <linux/rbtree.h>
  36. #include <subcmd/parse-options.h>
  37. #include "util/parse-events.h"
  38. #include "util/cpumap.h"
  39. #include "util/xyarray.h"
  40. #include "util/sort.h"
  41. #include "util/intlist.h"
  42. #include "util/parse-branch-options.h"
  43. #include "arch/common.h"
  44. #include "util/debug.h"
  45. #include <assert.h>
  46. #include <elf.h>
  47. #include <fcntl.h>
  48. #include <stdio.h>
  49. #include <termios.h>
  50. #include <unistd.h>
  51. #include <inttypes.h>
  52. #include <errno.h>
  53. #include <time.h>
  54. #include <sched.h>
  55. #include <sys/syscall.h>
  56. #include <sys/ioctl.h>
  57. #include <poll.h>
  58. #include <sys/prctl.h>
  59. #include <sys/wait.h>
  60. #include <sys/uio.h>
  61. #include <sys/utsname.h>
  62. #include <sys/mman.h>
  63. #include <linux/stringify.h>
  64. #include <linux/time64.h>
  65. #include <linux/types.h>
  66. static volatile int done;
  67. #define HEADER_LINE_NR 5
  68. static void perf_top__update_print_entries(struct perf_top *top)
  69. {
  70. top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
  71. }
  72. static void perf_top__sig_winch(int sig __maybe_unused,
  73. siginfo_t *info __maybe_unused, void *arg)
  74. {
  75. struct perf_top *top = arg;
  76. get_term_dimensions(&top->winsize);
  77. perf_top__update_print_entries(top);
  78. }
  79. static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
  80. {
  81. struct symbol *sym;
  82. struct annotation *notes;
  83. struct map *map;
  84. int err = -1;
  85. if (!he || !he->ms.sym)
  86. return -1;
  87. sym = he->ms.sym;
  88. map = he->ms.map;
  89. /*
  90. * We can't annotate with just /proc/kallsyms
  91. */
  92. if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
  93. !dso__is_kcore(map->dso)) {
  94. pr_err("Can't annotate %s: No vmlinux file was found in the "
  95. "path\n", sym->name);
  96. sleep(1);
  97. return -1;
  98. }
  99. notes = symbol__annotation(sym);
  100. if (notes->src != NULL) {
  101. pthread_mutex_lock(&notes->lock);
  102. goto out_assign;
  103. }
  104. pthread_mutex_lock(&notes->lock);
  105. if (symbol__alloc_hist(sym) < 0) {
  106. pthread_mutex_unlock(&notes->lock);
  107. pr_err("Not enough memory for annotating '%s' symbol!\n",
  108. sym->name);
  109. sleep(1);
  110. return err;
  111. }
  112. err = symbol__disassemble(sym, map, 0);
  113. if (err == 0) {
  114. out_assign:
  115. top->sym_filter_entry = he;
  116. } else {
  117. char msg[BUFSIZ];
  118. symbol__strerror_disassemble(sym, map, err, msg, sizeof(msg));
  119. pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
  120. }
  121. pthread_mutex_unlock(&notes->lock);
  122. return err;
  123. }
  124. static void __zero_source_counters(struct hist_entry *he)
  125. {
  126. struct symbol *sym = he->ms.sym;
  127. symbol__annotate_zero_histograms(sym);
  128. }
  129. static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
  130. {
  131. struct utsname uts;
  132. int err = uname(&uts);
  133. ui__warning("Out of bounds address found:\n\n"
  134. "Addr: %" PRIx64 "\n"
  135. "DSO: %s %c\n"
  136. "Map: %" PRIx64 "-%" PRIx64 "\n"
  137. "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
  138. "Arch: %s\n"
  139. "Kernel: %s\n"
  140. "Tools: %s\n\n"
  141. "Not all samples will be on the annotation output.\n\n"
  142. "Please report to linux-kernel@vger.kernel.org\n",
  143. ip, map->dso->long_name, dso__symtab_origin(map->dso),
  144. map->start, map->end, sym->start, sym->end,
  145. sym->binding == STB_GLOBAL ? 'g' :
  146. sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
  147. err ? "[unknown]" : uts.machine,
  148. err ? "[unknown]" : uts.release, perf_version_string);
  149. if (use_browser <= 0)
  150. sleep(5);
  151. map->erange_warned = true;
  152. }
  153. static void perf_top__record_precise_ip(struct perf_top *top,
  154. struct hist_entry *he,
  155. int counter, u64 ip)
  156. {
  157. struct annotation *notes;
  158. struct symbol *sym = he->ms.sym;
  159. int err = 0;
  160. if (sym == NULL || (use_browser == 0 &&
  161. (top->sym_filter_entry == NULL ||
  162. top->sym_filter_entry->ms.sym != sym)))
  163. return;
  164. notes = symbol__annotation(sym);
  165. if (pthread_mutex_trylock(&notes->lock))
  166. return;
  167. err = hist_entry__inc_addr_samples(he, counter, ip);
  168. pthread_mutex_unlock(&notes->lock);
  169. if (unlikely(err)) {
  170. /*
  171. * This function is now called with he->hists->lock held.
  172. * Release it before going to sleep.
  173. */
  174. pthread_mutex_unlock(&he->hists->lock);
  175. if (err == -ERANGE && !he->ms.map->erange_warned)
  176. ui__warn_map_erange(he->ms.map, sym, ip);
  177. else if (err == -ENOMEM) {
  178. pr_err("Not enough memory for annotating '%s' symbol!\n",
  179. sym->name);
  180. sleep(1);
  181. }
  182. pthread_mutex_lock(&he->hists->lock);
  183. }
  184. }
  185. static void perf_top__show_details(struct perf_top *top)
  186. {
  187. struct hist_entry *he = top->sym_filter_entry;
  188. struct annotation *notes;
  189. struct symbol *symbol;
  190. int more;
  191. if (!he)
  192. return;
  193. symbol = he->ms.sym;
  194. notes = symbol__annotation(symbol);
  195. pthread_mutex_lock(&notes->lock);
  196. if (notes->src == NULL)
  197. goto out_unlock;
  198. printf("Showing %s for %s\n", perf_evsel__name(top->sym_evsel), symbol->name);
  199. printf(" Events Pcnt (>=%d%%)\n", top->sym_pcnt_filter);
  200. more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel,
  201. 0, top->sym_pcnt_filter, top->print_entries, 4);
  202. if (top->evlist->enabled) {
  203. if (top->zero)
  204. symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
  205. else
  206. symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
  207. }
  208. if (more != 0)
  209. printf("%d lines not displayed, maybe increase display entries [e]\n", more);
  210. out_unlock:
  211. pthread_mutex_unlock(&notes->lock);
  212. }
  213. static void perf_top__print_sym_table(struct perf_top *top)
  214. {
  215. char bf[160];
  216. int printed = 0;
  217. const int win_width = top->winsize.ws_col - 1;
  218. struct perf_evsel *evsel = top->sym_evsel;
  219. struct hists *hists = evsel__hists(evsel);
  220. puts(CONSOLE_CLEAR);
  221. perf_top__header_snprintf(top, bf, sizeof(bf));
  222. printf("%s\n", bf);
  223. perf_top__reset_sample_counters(top);
  224. printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
  225. if (hists->stats.nr_lost_warned !=
  226. hists->stats.nr_events[PERF_RECORD_LOST]) {
  227. hists->stats.nr_lost_warned =
  228. hists->stats.nr_events[PERF_RECORD_LOST];
  229. color_fprintf(stdout, PERF_COLOR_RED,
  230. "WARNING: LOST %d chunks, Check IO/CPU overload",
  231. hists->stats.nr_lost_warned);
  232. ++printed;
  233. }
  234. if (top->sym_filter_entry) {
  235. perf_top__show_details(top);
  236. return;
  237. }
  238. if (top->evlist->enabled) {
  239. if (top->zero) {
  240. hists__delete_entries(hists);
  241. } else {
  242. hists__decay_entries(hists, top->hide_user_symbols,
  243. top->hide_kernel_symbols);
  244. }
  245. }
  246. hists__collapse_resort(hists, NULL);
  247. perf_evsel__output_resort(evsel, NULL);
  248. hists__output_recalc_col_len(hists, top->print_entries - printed);
  249. putchar('\n');
  250. hists__fprintf(hists, false, top->print_entries - printed, win_width,
  251. top->min_percent, stdout, symbol_conf.use_callchain);
  252. }
  253. static void prompt_integer(int *target, const char *msg)
  254. {
  255. char *buf = malloc(0), *p;
  256. size_t dummy = 0;
  257. int tmp;
  258. fprintf(stdout, "\n%s: ", msg);
  259. if (getline(&buf, &dummy, stdin) < 0)
  260. return;
  261. p = strchr(buf, '\n');
  262. if (p)
  263. *p = 0;
  264. p = buf;
  265. while(*p) {
  266. if (!isdigit(*p))
  267. goto out_free;
  268. p++;
  269. }
  270. tmp = strtoul(buf, NULL, 10);
  271. *target = tmp;
  272. out_free:
  273. free(buf);
  274. }
  275. static void prompt_percent(int *target, const char *msg)
  276. {
  277. int tmp = 0;
  278. prompt_integer(&tmp, msg);
  279. if (tmp >= 0 && tmp <= 100)
  280. *target = tmp;
  281. }
  282. static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
  283. {
  284. char *buf = malloc(0), *p;
  285. struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
  286. struct hists *hists = evsel__hists(top->sym_evsel);
  287. struct rb_node *next;
  288. size_t dummy = 0;
  289. /* zero counters of active symbol */
  290. if (syme) {
  291. __zero_source_counters(syme);
  292. top->sym_filter_entry = NULL;
  293. }
  294. fprintf(stdout, "\n%s: ", msg);
  295. if (getline(&buf, &dummy, stdin) < 0)
  296. goto out_free;
  297. p = strchr(buf, '\n');
  298. if (p)
  299. *p = 0;
  300. next = rb_first(&hists->entries);
  301. while (next) {
  302. n = rb_entry(next, struct hist_entry, rb_node);
  303. if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
  304. found = n;
  305. break;
  306. }
  307. next = rb_next(&n->rb_node);
  308. }
  309. if (!found) {
  310. fprintf(stderr, "Sorry, %s is not active.\n", buf);
  311. sleep(1);
  312. } else
  313. perf_top__parse_source(top, found);
  314. out_free:
  315. free(buf);
  316. }
  317. static void perf_top__print_mapped_keys(struct perf_top *top)
  318. {
  319. char *name = NULL;
  320. if (top->sym_filter_entry) {
  321. struct symbol *sym = top->sym_filter_entry->ms.sym;
  322. name = sym->name;
  323. }
  324. fprintf(stdout, "\nMapped keys:\n");
  325. fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
  326. fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
  327. if (top->evlist->nr_entries > 1)
  328. fprintf(stdout, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top->sym_evsel));
  329. fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
  330. fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->sym_pcnt_filter);
  331. fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
  332. fprintf(stdout, "\t[S] stop annotation.\n");
  333. fprintf(stdout,
  334. "\t[K] hide kernel_symbols symbols. \t(%s)\n",
  335. top->hide_kernel_symbols ? "yes" : "no");
  336. fprintf(stdout,
  337. "\t[U] hide user symbols. \t(%s)\n",
  338. top->hide_user_symbols ? "yes" : "no");
  339. fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
  340. fprintf(stdout, "\t[qQ] quit.\n");
  341. }
  342. static int perf_top__key_mapped(struct perf_top *top, int c)
  343. {
  344. switch (c) {
  345. case 'd':
  346. case 'e':
  347. case 'f':
  348. case 'z':
  349. case 'q':
  350. case 'Q':
  351. case 'K':
  352. case 'U':
  353. case 'F':
  354. case 's':
  355. case 'S':
  356. return 1;
  357. case 'E':
  358. return top->evlist->nr_entries > 1 ? 1 : 0;
  359. default:
  360. break;
  361. }
  362. return 0;
  363. }
  364. static bool perf_top__handle_keypress(struct perf_top *top, int c)
  365. {
  366. bool ret = true;
  367. if (!perf_top__key_mapped(top, c)) {
  368. struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
  369. struct termios save;
  370. perf_top__print_mapped_keys(top);
  371. fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
  372. fflush(stdout);
  373. set_term_quiet_input(&save);
  374. poll(&stdin_poll, 1, -1);
  375. c = getc(stdin);
  376. tcsetattr(0, TCSAFLUSH, &save);
  377. if (!perf_top__key_mapped(top, c))
  378. return ret;
  379. }
  380. switch (c) {
  381. case 'd':
  382. prompt_integer(&top->delay_secs, "Enter display delay");
  383. if (top->delay_secs < 1)
  384. top->delay_secs = 1;
  385. break;
  386. case 'e':
  387. prompt_integer(&top->print_entries, "Enter display entries (lines)");
  388. if (top->print_entries == 0) {
  389. struct sigaction act = {
  390. .sa_sigaction = perf_top__sig_winch,
  391. .sa_flags = SA_SIGINFO,
  392. };
  393. perf_top__sig_winch(SIGWINCH, NULL, top);
  394. sigaction(SIGWINCH, &act, NULL);
  395. } else {
  396. signal(SIGWINCH, SIG_DFL);
  397. }
  398. break;
  399. case 'E':
  400. if (top->evlist->nr_entries > 1) {
  401. /* Select 0 as the default event: */
  402. int counter = 0;
  403. fprintf(stderr, "\nAvailable events:");
  404. evlist__for_each_entry(top->evlist, top->sym_evsel)
  405. fprintf(stderr, "\n\t%d %s", top->sym_evsel->idx, perf_evsel__name(top->sym_evsel));
  406. prompt_integer(&counter, "Enter details event counter");
  407. if (counter >= top->evlist->nr_entries) {
  408. top->sym_evsel = perf_evlist__first(top->evlist);
  409. fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel));
  410. sleep(1);
  411. break;
  412. }
  413. evlist__for_each_entry(top->evlist, top->sym_evsel)
  414. if (top->sym_evsel->idx == counter)
  415. break;
  416. } else
  417. top->sym_evsel = perf_evlist__first(top->evlist);
  418. break;
  419. case 'f':
  420. prompt_integer(&top->count_filter, "Enter display event count filter");
  421. break;
  422. case 'F':
  423. prompt_percent(&top->sym_pcnt_filter,
  424. "Enter details display event filter (percent)");
  425. break;
  426. case 'K':
  427. top->hide_kernel_symbols = !top->hide_kernel_symbols;
  428. break;
  429. case 'q':
  430. case 'Q':
  431. printf("exiting.\n");
  432. if (top->dump_symtab)
  433. perf_session__fprintf_dsos(top->session, stderr);
  434. ret = false;
  435. break;
  436. case 's':
  437. perf_top__prompt_symbol(top, "Enter details symbol");
  438. break;
  439. case 'S':
  440. if (!top->sym_filter_entry)
  441. break;
  442. else {
  443. struct hist_entry *syme = top->sym_filter_entry;
  444. top->sym_filter_entry = NULL;
  445. __zero_source_counters(syme);
  446. }
  447. break;
  448. case 'U':
  449. top->hide_user_symbols = !top->hide_user_symbols;
  450. break;
  451. case 'z':
  452. top->zero = !top->zero;
  453. break;
  454. default:
  455. break;
  456. }
  457. return ret;
  458. }
  459. static void perf_top__sort_new_samples(void *arg)
  460. {
  461. struct perf_top *t = arg;
  462. struct perf_evsel *evsel = t->sym_evsel;
  463. struct hists *hists;
  464. perf_top__reset_sample_counters(t);
  465. if (t->evlist->selected != NULL)
  466. t->sym_evsel = t->evlist->selected;
  467. hists = evsel__hists(evsel);
  468. if (t->evlist->enabled) {
  469. if (t->zero) {
  470. hists__delete_entries(hists);
  471. } else {
  472. hists__decay_entries(hists, t->hide_user_symbols,
  473. t->hide_kernel_symbols);
  474. }
  475. }
  476. hists__collapse_resort(hists, NULL);
  477. perf_evsel__output_resort(evsel, NULL);
  478. }
  479. static void *display_thread_tui(void *arg)
  480. {
  481. struct perf_evsel *pos;
  482. struct perf_top *top = arg;
  483. const char *help = "For a higher level overview, try: perf top --sort comm,dso";
  484. struct hist_browser_timer hbt = {
  485. .timer = perf_top__sort_new_samples,
  486. .arg = top,
  487. .refresh = top->delay_secs,
  488. };
  489. perf_top__sort_new_samples(top);
  490. /*
  491. * Initialize the uid_filter_str, in the future the TUI will allow
  492. * Zooming in/out UIDs. For now juse use whatever the user passed
  493. * via --uid.
  494. */
  495. evlist__for_each_entry(top->evlist, pos) {
  496. struct hists *hists = evsel__hists(pos);
  497. hists->uid_filter_str = top->record_opts.target.uid_str;
  498. }
  499. perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
  500. top->min_percent,
  501. &top->session->header.env);
  502. done = 1;
  503. return NULL;
  504. }
  505. static void display_sig(int sig __maybe_unused)
  506. {
  507. done = 1;
  508. }
  509. static void display_setup_sig(void)
  510. {
  511. signal(SIGSEGV, sighandler_dump_stack);
  512. signal(SIGFPE, sighandler_dump_stack);
  513. signal(SIGINT, display_sig);
  514. signal(SIGQUIT, display_sig);
  515. signal(SIGTERM, display_sig);
  516. }
  517. static void *display_thread(void *arg)
  518. {
  519. struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
  520. struct termios save;
  521. struct perf_top *top = arg;
  522. int delay_msecs, c;
  523. display_setup_sig();
  524. pthread__unblock_sigwinch();
  525. repeat:
  526. delay_msecs = top->delay_secs * MSEC_PER_SEC;
  527. set_term_quiet_input(&save);
  528. /* trash return*/
  529. getc(stdin);
  530. while (!done) {
  531. perf_top__print_sym_table(top);
  532. /*
  533. * Either timeout expired or we got an EINTR due to SIGWINCH,
  534. * refresh screen in both cases.
  535. */
  536. switch (poll(&stdin_poll, 1, delay_msecs)) {
  537. case 0:
  538. continue;
  539. case -1:
  540. if (errno == EINTR)
  541. continue;
  542. __fallthrough;
  543. default:
  544. c = getc(stdin);
  545. tcsetattr(0, TCSAFLUSH, &save);
  546. if (perf_top__handle_keypress(top, c))
  547. goto repeat;
  548. done = 1;
  549. }
  550. }
  551. tcsetattr(0, TCSAFLUSH, &save);
  552. return NULL;
  553. }
  554. static int hist_iter__top_callback(struct hist_entry_iter *iter,
  555. struct addr_location *al, bool single,
  556. void *arg)
  557. {
  558. struct perf_top *top = arg;
  559. struct hist_entry *he = iter->he;
  560. struct perf_evsel *evsel = iter->evsel;
  561. if (perf_hpp_list.sym && single)
  562. perf_top__record_precise_ip(top, he, evsel->idx, al->addr);
  563. hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
  564. !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY));
  565. return 0;
  566. }
  567. static void perf_event__process_sample(struct perf_tool *tool,
  568. const union perf_event *event,
  569. struct perf_evsel *evsel,
  570. struct perf_sample *sample,
  571. struct machine *machine)
  572. {
  573. struct perf_top *top = container_of(tool, struct perf_top, tool);
  574. struct addr_location al;
  575. int err;
  576. if (!machine && perf_guest) {
  577. static struct intlist *seen;
  578. if (!seen)
  579. seen = intlist__new(NULL);
  580. if (!intlist__has_entry(seen, sample->pid)) {
  581. pr_err("Can't find guest [%d]'s kernel information\n",
  582. sample->pid);
  583. intlist__add(seen, sample->pid);
  584. }
  585. return;
  586. }
  587. if (!machine) {
  588. pr_err("%u unprocessable samples recorded.\r",
  589. top->session->evlist->stats.nr_unprocessable_samples++);
  590. return;
  591. }
  592. if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
  593. top->exact_samples++;
  594. if (machine__resolve(machine, &al, sample) < 0)
  595. return;
  596. if (!machine->kptr_restrict_warned &&
  597. symbol_conf.kptr_restrict &&
  598. al.cpumode == PERF_RECORD_MISC_KERNEL) {
  599. ui__warning(
  600. "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
  601. "Check /proc/sys/kernel/kptr_restrict.\n\n"
  602. "Kernel%s samples will not be resolved.\n",
  603. al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
  604. " modules" : "");
  605. if (use_browser <= 0)
  606. sleep(5);
  607. machine->kptr_restrict_warned = true;
  608. }
  609. if (al.sym == NULL) {
  610. const char *msg = "Kernel samples will not be resolved.\n";
  611. /*
  612. * As we do lazy loading of symtabs we only will know if the
  613. * specified vmlinux file is invalid when we actually have a
  614. * hit in kernel space and then try to load it. So if we get
  615. * here and there are _no_ symbols in the DSO backing the
  616. * kernel map, bail out.
  617. *
  618. * We may never get here, for instance, if we use -K/
  619. * --hide-kernel-symbols, even if the user specifies an
  620. * invalid --vmlinux ;-)
  621. */
  622. if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
  623. al.map == machine->vmlinux_maps[MAP__FUNCTION] &&
  624. RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION])) {
  625. if (symbol_conf.vmlinux_name) {
  626. char serr[256];
  627. dso__strerror_load(al.map->dso, serr, sizeof(serr));
  628. ui__warning("The %s file can't be used: %s\n%s",
  629. symbol_conf.vmlinux_name, serr, msg);
  630. } else {
  631. ui__warning("A vmlinux file was not found.\n%s",
  632. msg);
  633. }
  634. if (use_browser <= 0)
  635. sleep(5);
  636. top->vmlinux_warned = true;
  637. }
  638. }
  639. if (al.sym == NULL || !al.sym->idle) {
  640. struct hists *hists = evsel__hists(evsel);
  641. struct hist_entry_iter iter = {
  642. .evsel = evsel,
  643. .sample = sample,
  644. .add_entry_cb = hist_iter__top_callback,
  645. };
  646. if (symbol_conf.cumulate_callchain)
  647. iter.ops = &hist_iter_cumulative;
  648. else
  649. iter.ops = &hist_iter_normal;
  650. pthread_mutex_lock(&hists->lock);
  651. err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
  652. if (err < 0)
  653. pr_err("Problem incrementing symbol period, skipping event\n");
  654. pthread_mutex_unlock(&hists->lock);
  655. }
  656. addr_location__put(&al);
  657. }
  658. static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
  659. {
  660. struct perf_sample sample;
  661. struct perf_evsel *evsel;
  662. struct perf_session *session = top->session;
  663. union perf_event *event;
  664. struct machine *machine;
  665. int ret;
  666. while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) {
  667. ret = perf_evlist__parse_sample(top->evlist, event, &sample);
  668. if (ret) {
  669. pr_err("Can't parse sample, err = %d\n", ret);
  670. goto next_event;
  671. }
  672. evsel = perf_evlist__id2evsel(session->evlist, sample.id);
  673. assert(evsel != NULL);
  674. if (event->header.type == PERF_RECORD_SAMPLE)
  675. ++top->samples;
  676. switch (sample.cpumode) {
  677. case PERF_RECORD_MISC_USER:
  678. ++top->us_samples;
  679. if (top->hide_user_symbols)
  680. goto next_event;
  681. machine = &session->machines.host;
  682. break;
  683. case PERF_RECORD_MISC_KERNEL:
  684. ++top->kernel_samples;
  685. if (top->hide_kernel_symbols)
  686. goto next_event;
  687. machine = &session->machines.host;
  688. break;
  689. case PERF_RECORD_MISC_GUEST_KERNEL:
  690. ++top->guest_kernel_samples;
  691. machine = perf_session__find_machine(session,
  692. sample.pid);
  693. break;
  694. case PERF_RECORD_MISC_GUEST_USER:
  695. ++top->guest_us_samples;
  696. /*
  697. * TODO: we don't process guest user from host side
  698. * except simple counting.
  699. */
  700. goto next_event;
  701. default:
  702. if (event->header.type == PERF_RECORD_SAMPLE)
  703. goto next_event;
  704. machine = &session->machines.host;
  705. break;
  706. }
  707. if (event->header.type == PERF_RECORD_SAMPLE) {
  708. perf_event__process_sample(&top->tool, event, evsel,
  709. &sample, machine);
  710. } else if (event->header.type < PERF_RECORD_MAX) {
  711. hists__inc_nr_events(evsel__hists(evsel), event->header.type);
  712. machine__process_event(machine, event, &sample);
  713. } else
  714. ++session->evlist->stats.nr_unknown_events;
  715. next_event:
  716. perf_evlist__mmap_consume(top->evlist, idx);
  717. }
  718. }
  719. static void perf_top__mmap_read(struct perf_top *top)
  720. {
  721. int i;
  722. for (i = 0; i < top->evlist->nr_mmaps; i++)
  723. perf_top__mmap_read_idx(top, i);
  724. }
  725. static int perf_top__start_counters(struct perf_top *top)
  726. {
  727. char msg[512];
  728. struct perf_evsel *counter;
  729. struct perf_evlist *evlist = top->evlist;
  730. struct record_opts *opts = &top->record_opts;
  731. perf_evlist__config(evlist, opts, &callchain_param);
  732. evlist__for_each_entry(evlist, counter) {
  733. try_again:
  734. if (perf_evsel__open(counter, top->evlist->cpus,
  735. top->evlist->threads) < 0) {
  736. if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
  737. if (verbose)
  738. ui__warning("%s\n", msg);
  739. goto try_again;
  740. }
  741. perf_evsel__open_strerror(counter, &opts->target,
  742. errno, msg, sizeof(msg));
  743. ui__error("%s\n", msg);
  744. goto out_err;
  745. }
  746. }
  747. if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
  748. ui__error("Failed to mmap with %d (%s)\n",
  749. errno, str_error_r(errno, msg, sizeof(msg)));
  750. goto out_err;
  751. }
  752. return 0;
  753. out_err:
  754. return -1;
  755. }
  756. static int callchain_param__setup_sample_type(struct callchain_param *callchain)
  757. {
  758. if (!perf_hpp_list.sym) {
  759. if (callchain->enabled) {
  760. ui__error("Selected -g but \"sym\" not present in --sort/-s.");
  761. return -EINVAL;
  762. }
  763. } else if (callchain->mode != CHAIN_NONE) {
  764. if (callchain_register_param(callchain) < 0) {
  765. ui__error("Can't register callchain params.\n");
  766. return -EINVAL;
  767. }
  768. }
  769. return 0;
  770. }
  771. static int __cmd_top(struct perf_top *top)
  772. {
  773. char msg[512];
  774. struct perf_evsel *pos;
  775. struct perf_evsel_config_term *err_term;
  776. struct perf_evlist *evlist = top->evlist;
  777. struct record_opts *opts = &top->record_opts;
  778. pthread_t thread;
  779. int ret;
  780. top->session = perf_session__new(NULL, false, NULL);
  781. if (top->session == NULL)
  782. return -1;
  783. if (!objdump_path) {
  784. ret = perf_env__lookup_objdump(&top->session->header.env);
  785. if (ret)
  786. goto out_delete;
  787. }
  788. ret = callchain_param__setup_sample_type(&callchain_param);
  789. if (ret)
  790. goto out_delete;
  791. if (perf_session__register_idle_thread(top->session) < 0)
  792. goto out_delete;
  793. machine__synthesize_threads(&top->session->machines.host, &opts->target,
  794. top->evlist->threads, false, opts->proc_map_timeout);
  795. if (perf_hpp_list.socket) {
  796. ret = perf_env__read_cpu_topology_map(&perf_env);
  797. if (ret < 0)
  798. goto out_err_cpu_topo;
  799. }
  800. ret = perf_top__start_counters(top);
  801. if (ret)
  802. goto out_delete;
  803. ret = perf_evlist__apply_drv_configs(evlist, &pos, &err_term);
  804. if (ret) {
  805. error("failed to set config \"%s\" on event %s with %d (%s)\n",
  806. err_term->val.drv_cfg, perf_evsel__name(pos), errno,
  807. str_error_r(errno, msg, sizeof(msg)));
  808. goto out_delete;
  809. }
  810. top->session->evlist = top->evlist;
  811. perf_session__set_id_hdr_size(top->session);
  812. /*
  813. * When perf is starting the traced process, all the events (apart from
  814. * group members) have enable_on_exec=1 set, so don't spoil it by
  815. * prematurely enabling them.
  816. *
  817. * XXX 'top' still doesn't start workloads like record, trace, but should,
  818. * so leave the check here.
  819. */
  820. if (!target__none(&opts->target))
  821. perf_evlist__enable(top->evlist);
  822. /* Wait for a minimal set of events before starting the snapshot */
  823. perf_evlist__poll(top->evlist, 100);
  824. perf_top__mmap_read(top);
  825. ret = -1;
  826. if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
  827. display_thread), top)) {
  828. ui__error("Could not create display thread.\n");
  829. goto out_delete;
  830. }
  831. if (top->realtime_prio) {
  832. struct sched_param param;
  833. param.sched_priority = top->realtime_prio;
  834. if (sched_setscheduler(0, SCHED_FIFO, &param)) {
  835. ui__error("Could not set realtime priority.\n");
  836. goto out_join;
  837. }
  838. }
  839. while (!done) {
  840. u64 hits = top->samples;
  841. perf_top__mmap_read(top);
  842. if (hits == top->samples)
  843. ret = perf_evlist__poll(top->evlist, 100);
  844. }
  845. ret = 0;
  846. out_join:
  847. pthread_join(thread, NULL);
  848. out_delete:
  849. perf_session__delete(top->session);
  850. top->session = NULL;
  851. return ret;
  852. out_err_cpu_topo: {
  853. char errbuf[BUFSIZ];
  854. const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
  855. ui__error("Could not read the CPU topology map: %s\n", err);
  856. goto out_delete;
  857. }
  858. }
  859. static int
  860. callchain_opt(const struct option *opt, const char *arg, int unset)
  861. {
  862. symbol_conf.use_callchain = true;
  863. return record_callchain_opt(opt, arg, unset);
  864. }
  865. static int
  866. parse_callchain_opt(const struct option *opt, const char *arg, int unset)
  867. {
  868. struct callchain_param *callchain = opt->value;
  869. callchain->enabled = !unset;
  870. callchain->record_mode = CALLCHAIN_FP;
  871. /*
  872. * --no-call-graph
  873. */
  874. if (unset) {
  875. symbol_conf.use_callchain = false;
  876. callchain->record_mode = CALLCHAIN_NONE;
  877. return 0;
  878. }
  879. return parse_callchain_top_opt(arg);
  880. }
  881. static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
  882. {
  883. if (!strcmp(var, "top.call-graph"))
  884. var = "call-graph.record-mode"; /* fall-through */
  885. if (!strcmp(var, "top.children")) {
  886. symbol_conf.cumulate_callchain = perf_config_bool(var, value);
  887. return 0;
  888. }
  889. return 0;
  890. }
  891. static int
  892. parse_percent_limit(const struct option *opt, const char *arg,
  893. int unset __maybe_unused)
  894. {
  895. struct perf_top *top = opt->value;
  896. top->min_percent = strtof(arg, NULL);
  897. return 0;
  898. }
  899. const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
  900. "\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
  901. int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
  902. {
  903. char errbuf[BUFSIZ];
  904. struct perf_top top = {
  905. .count_filter = 5,
  906. .delay_secs = 2,
  907. .record_opts = {
  908. .mmap_pages = UINT_MAX,
  909. .user_freq = UINT_MAX,
  910. .user_interval = ULLONG_MAX,
  911. .freq = 4000, /* 4 KHz */
  912. .target = {
  913. .uses_mmap = true,
  914. },
  915. .proc_map_timeout = 500,
  916. },
  917. .max_stack = sysctl_perf_event_max_stack,
  918. .sym_pcnt_filter = 5,
  919. };
  920. struct record_opts *opts = &top.record_opts;
  921. struct target *target = &opts->target;
  922. const struct option options[] = {
  923. OPT_CALLBACK('e', "event", &top.evlist, "event",
  924. "event selector. use 'perf list' to list available events",
  925. parse_events_option),
  926. OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
  927. OPT_STRING('p', "pid", &target->pid, "pid",
  928. "profile events on existing process id"),
  929. OPT_STRING('t', "tid", &target->tid, "tid",
  930. "profile events on existing thread id"),
  931. OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
  932. "system-wide collection from all CPUs"),
  933. OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
  934. "list of cpus to monitor"),
  935. OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
  936. "file", "vmlinux pathname"),
  937. OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
  938. "don't load vmlinux even if found"),
  939. OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
  940. "hide kernel symbols"),
  941. OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
  942. "number of mmap data pages",
  943. perf_evlist__parse_mmap_pages),
  944. OPT_INTEGER('r', "realtime", &top.realtime_prio,
  945. "collect data with this RT SCHED_FIFO priority"),
  946. OPT_INTEGER('d', "delay", &top.delay_secs,
  947. "number of seconds to delay between refreshes"),
  948. OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
  949. "dump the symbol table used for profiling"),
  950. OPT_INTEGER('f', "count-filter", &top.count_filter,
  951. "only display functions with more events than this"),
  952. OPT_BOOLEAN(0, "group", &opts->group,
  953. "put the counters into a counter group"),
  954. OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
  955. "child tasks do not inherit counters"),
  956. OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
  957. "symbol to annotate"),
  958. OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
  959. OPT_UINTEGER('F', "freq", &opts->user_freq, "profile at this frequency"),
  960. OPT_INTEGER('E', "entries", &top.print_entries,
  961. "display this many functions"),
  962. OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
  963. "hide user symbols"),
  964. OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
  965. OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
  966. OPT_INCR('v', "verbose", &verbose,
  967. "be more verbose (show counter open errors, etc)"),
  968. OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
  969. "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
  970. " Please refer the man page for the complete list."),
  971. OPT_STRING(0, "fields", &field_order, "key[,keys...]",
  972. "output field(s): overhead, period, sample plus all of sort keys"),
  973. OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
  974. "Show a column with the number of samples"),
  975. OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
  976. NULL, "enables call-graph recording and display",
  977. &callchain_opt),
  978. OPT_CALLBACK(0, "call-graph", &callchain_param,
  979. "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
  980. top_callchain_help, &parse_callchain_opt),
  981. OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
  982. "Accumulate callchains of children and show total overhead as well"),
  983. OPT_INTEGER(0, "max-stack", &top.max_stack,
  984. "Set the maximum stack depth when parsing the callchain. "
  985. "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
  986. OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
  987. "ignore callees of these functions in call graphs",
  988. report_parse_ignore_callees_opt),
  989. OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
  990. "Show a column with the sum of periods"),
  991. OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
  992. "only consider symbols in these dsos"),
  993. OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
  994. "only consider symbols in these comms"),
  995. OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
  996. "only consider these symbols"),
  997. OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
  998. "Interleave source code with assembly code (default)"),
  999. OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
  1000. "Display raw encoding of assembly instructions (default)"),
  1001. OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
  1002. "Enable kernel symbol demangling"),
  1003. OPT_STRING(0, "objdump", &objdump_path, "path",
  1004. "objdump binary to use for disassembly and annotations"),
  1005. OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
  1006. "Specify disassembler style (e.g. -M intel for intel syntax)"),
  1007. OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
  1008. OPT_CALLBACK(0, "percent-limit", &top, "percent",
  1009. "Don't show entries under that percent", parse_percent_limit),
  1010. OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
  1011. "How to display percentage of filtered entries", parse_filter_percentage),
  1012. OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
  1013. "width[,width...]",
  1014. "don't try to adjust column width, use these fixed values"),
  1015. OPT_UINTEGER(0, "proc-map-timeout", &opts->proc_map_timeout,
  1016. "per thread proc mmap processing timeout in ms"),
  1017. OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
  1018. "branch any", "sample any taken branches",
  1019. parse_branch_stack),
  1020. OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
  1021. "branch filter mask", "branch stack filter modes",
  1022. parse_branch_stack),
  1023. OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
  1024. "Show raw trace event output (do not use print fmt or plugins)"),
  1025. OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
  1026. "Show entries in a hierarchy"),
  1027. OPT_END()
  1028. };
  1029. const char * const top_usage[] = {
  1030. "perf top [<options>]",
  1031. NULL
  1032. };
  1033. int status = hists__init();
  1034. if (status < 0)
  1035. return status;
  1036. top.evlist = perf_evlist__new();
  1037. if (top.evlist == NULL)
  1038. return -ENOMEM;
  1039. perf_config(perf_top_config, &top);
  1040. argc = parse_options(argc, argv, options, top_usage, 0);
  1041. if (argc)
  1042. usage_with_options(top_usage, options);
  1043. if (!top.evlist->nr_entries &&
  1044. perf_evlist__add_default(top.evlist) < 0) {
  1045. pr_err("Not enough memory for event selector list\n");
  1046. goto out_delete_evlist;
  1047. }
  1048. if (symbol_conf.report_hierarchy) {
  1049. /* disable incompatible options */
  1050. symbol_conf.event_group = false;
  1051. symbol_conf.cumulate_callchain = false;
  1052. if (field_order) {
  1053. pr_err("Error: --hierarchy and --fields options cannot be used together\n");
  1054. parse_options_usage(top_usage, options, "fields", 0);
  1055. parse_options_usage(NULL, options, "hierarchy", 0);
  1056. goto out_delete_evlist;
  1057. }
  1058. }
  1059. sort__mode = SORT_MODE__TOP;
  1060. /* display thread wants entries to be collapsed in a different tree */
  1061. perf_hpp_list.need_collapse = 1;
  1062. if (top.use_stdio)
  1063. use_browser = 0;
  1064. else if (top.use_tui)
  1065. use_browser = 1;
  1066. setup_browser(false);
  1067. if (setup_sorting(top.evlist) < 0) {
  1068. if (sort_order)
  1069. parse_options_usage(top_usage, options, "s", 1);
  1070. if (field_order)
  1071. parse_options_usage(sort_order ? NULL : top_usage,
  1072. options, "fields", 0);
  1073. goto out_delete_evlist;
  1074. }
  1075. status = target__validate(target);
  1076. if (status) {
  1077. target__strerror(target, status, errbuf, BUFSIZ);
  1078. ui__warning("%s\n", errbuf);
  1079. }
  1080. status = target__parse_uid(target);
  1081. if (status) {
  1082. int saved_errno = errno;
  1083. target__strerror(target, status, errbuf, BUFSIZ);
  1084. ui__error("%s\n", errbuf);
  1085. status = -saved_errno;
  1086. goto out_delete_evlist;
  1087. }
  1088. if (target__none(target))
  1089. target->system_wide = true;
  1090. if (perf_evlist__create_maps(top.evlist, target) < 0) {
  1091. ui__error("Couldn't create thread/CPU maps: %s\n",
  1092. errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
  1093. goto out_delete_evlist;
  1094. }
  1095. symbol_conf.nr_events = top.evlist->nr_entries;
  1096. if (top.delay_secs < 1)
  1097. top.delay_secs = 1;
  1098. if (record_opts__config(opts)) {
  1099. status = -EINVAL;
  1100. goto out_delete_evlist;
  1101. }
  1102. top.sym_evsel = perf_evlist__first(top.evlist);
  1103. if (!callchain_param.enabled) {
  1104. symbol_conf.cumulate_callchain = false;
  1105. perf_hpp__cancel_cumulate();
  1106. }
  1107. if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
  1108. callchain_param.order = ORDER_CALLER;
  1109. status = symbol__annotation_init();
  1110. if (status < 0)
  1111. goto out_delete_evlist;
  1112. symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
  1113. if (symbol__init(NULL) < 0)
  1114. return -1;
  1115. sort__setup_elide(stdout);
  1116. get_term_dimensions(&top.winsize);
  1117. if (top.print_entries == 0) {
  1118. struct sigaction act = {
  1119. .sa_sigaction = perf_top__sig_winch,
  1120. .sa_flags = SA_SIGINFO,
  1121. };
  1122. perf_top__update_print_entries(&top);
  1123. sigaction(SIGWINCH, &act, NULL);
  1124. }
  1125. status = __cmd_top(&top);
  1126. out_delete_evlist:
  1127. perf_evlist__delete(top.evlist);
  1128. return status;
  1129. }