12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081 |
- #include "builtin.h"
- #include "perf.h"
- #include "util/util.h"
- #include "util/evlist.h"
- #include "util/cache.h"
- #include "util/evsel.h"
- #include "util/symbol.h"
- #include "util/thread.h"
- #include "util/header.h"
- #include "util/session.h"
- #include "util/tool.h"
- #include "util/cloexec.h"
- #include "util/thread_map.h"
- #include "util/color.h"
- #include <subcmd/parse-options.h>
- #include "util/trace-event.h"
- #include "util/debug.h"
- #include <sys/prctl.h>
- #include <sys/resource.h>
- #include <semaphore.h>
- #include <pthread.h>
- #include <math.h>
- #include <api/fs/fs.h>
- #include <linux/time64.h>
- #define PR_SET_NAME 15 /* Set process name */
- #define MAX_CPUS 4096
- #define COMM_LEN 20
- #define SYM_LEN 129
- #define MAX_PID 1024000
- struct sched_atom;
- struct task_desc {
- unsigned long nr;
- unsigned long pid;
- char comm[COMM_LEN];
- unsigned long nr_events;
- unsigned long curr_event;
- struct sched_atom **atoms;
- pthread_t thread;
- sem_t sleep_sem;
- sem_t ready_for_work;
- sem_t work_done_sem;
- u64 cpu_usage;
- };
- enum sched_event_type {
- SCHED_EVENT_RUN,
- SCHED_EVENT_SLEEP,
- SCHED_EVENT_WAKEUP,
- SCHED_EVENT_MIGRATION,
- };
- struct sched_atom {
- enum sched_event_type type;
- int specific_wait;
- u64 timestamp;
- u64 duration;
- unsigned long nr;
- sem_t *wait_sem;
- struct task_desc *wakee;
- };
- #define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
- enum thread_state {
- THREAD_SLEEPING = 0,
- THREAD_WAIT_CPU,
- THREAD_SCHED_IN,
- THREAD_IGNORE
- };
- struct work_atom {
- struct list_head list;
- enum thread_state state;
- u64 sched_out_time;
- u64 wake_up_time;
- u64 sched_in_time;
- u64 runtime;
- };
- struct work_atoms {
- struct list_head work_list;
- struct thread *thread;
- struct rb_node node;
- u64 max_lat;
- u64 max_lat_at;
- u64 total_lat;
- u64 nb_atoms;
- u64 total_runtime;
- int num_merged;
- };
- typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
- struct perf_sched;
- struct trace_sched_handler {
- int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel,
- struct perf_sample *sample, struct machine *machine);
- int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel,
- struct perf_sample *sample, struct machine *machine);
- int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
- struct perf_sample *sample, struct machine *machine);
- /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
- int (*fork_event)(struct perf_sched *sched, union perf_event *event,
- struct machine *machine);
- int (*migrate_task_event)(struct perf_sched *sched,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine);
- };
- #define COLOR_PIDS PERF_COLOR_BLUE
- #define COLOR_CPUS PERF_COLOR_BG_RED
- struct perf_sched_map {
- DECLARE_BITMAP(comp_cpus_mask, MAX_CPUS);
- int *comp_cpus;
- bool comp;
- struct thread_map *color_pids;
- const char *color_pids_str;
- struct cpu_map *color_cpus;
- const char *color_cpus_str;
- struct cpu_map *cpus;
- const char *cpus_str;
- };
- struct perf_sched {
- struct perf_tool tool;
- const char *sort_order;
- unsigned long nr_tasks;
- struct task_desc **pid_to_task;
- struct task_desc **tasks;
- const struct trace_sched_handler *tp_handler;
- pthread_mutex_t start_work_mutex;
- pthread_mutex_t work_done_wait_mutex;
- int profile_cpu;
- /*
- * Track the current task - that way we can know whether there's any
- * weird events, such as a task being switched away that is not current.
- */
- int max_cpu;
- u32 curr_pid[MAX_CPUS];
- struct thread *curr_thread[MAX_CPUS];
- char next_shortname1;
- char next_shortname2;
- unsigned int replay_repeat;
- unsigned long nr_run_events;
- unsigned long nr_sleep_events;
- unsigned long nr_wakeup_events;
- unsigned long nr_sleep_corrections;
- unsigned long nr_run_events_optimized;
- unsigned long targetless_wakeups;
- unsigned long multitarget_wakeups;
- unsigned long nr_runs;
- unsigned long nr_timestamps;
- unsigned long nr_unordered_timestamps;
- unsigned long nr_context_switch_bugs;
- unsigned long nr_events;
- unsigned long nr_lost_chunks;
- unsigned long nr_lost_events;
- u64 run_measurement_overhead;
- u64 sleep_measurement_overhead;
- u64 start_time;
- u64 cpu_usage;
- u64 runavg_cpu_usage;
- u64 parent_cpu_usage;
- u64 runavg_parent_cpu_usage;
- u64 sum_runtime;
- u64 sum_fluct;
- u64 run_avg;
- u64 all_runtime;
- u64 all_count;
- u64 cpu_last_switched[MAX_CPUS];
- struct rb_root atom_root, sorted_atom_root, merged_atom_root;
- struct list_head sort_list, cmp_pid;
- bool force;
- bool skip_merge;
- struct perf_sched_map map;
- };
- static u64 get_nsecs(void)
- {
- struct timespec ts;
- clock_gettime(CLOCK_MONOTONIC, &ts);
- return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
- }
- static void burn_nsecs(struct perf_sched *sched, u64 nsecs)
- {
- u64 T0 = get_nsecs(), T1;
- do {
- T1 = get_nsecs();
- } while (T1 + sched->run_measurement_overhead < T0 + nsecs);
- }
- static void sleep_nsecs(u64 nsecs)
- {
- struct timespec ts;
- ts.tv_nsec = nsecs % 999999999;
- ts.tv_sec = nsecs / 999999999;
- nanosleep(&ts, NULL);
- }
- static void calibrate_run_measurement_overhead(struct perf_sched *sched)
- {
- u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
- int i;
- for (i = 0; i < 10; i++) {
- T0 = get_nsecs();
- burn_nsecs(sched, 0);
- T1 = get_nsecs();
- delta = T1-T0;
- min_delta = min(min_delta, delta);
- }
- sched->run_measurement_overhead = min_delta;
- printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta);
- }
- static void calibrate_sleep_measurement_overhead(struct perf_sched *sched)
- {
- u64 T0, T1, delta, min_delta = NSEC_PER_SEC;
- int i;
- for (i = 0; i < 10; i++) {
- T0 = get_nsecs();
- sleep_nsecs(10000);
- T1 = get_nsecs();
- delta = T1-T0;
- min_delta = min(min_delta, delta);
- }
- min_delta -= 10000;
- sched->sleep_measurement_overhead = min_delta;
- printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta);
- }
- static struct sched_atom *
- get_new_event(struct task_desc *task, u64 timestamp)
- {
- struct sched_atom *event = zalloc(sizeof(*event));
- unsigned long idx = task->nr_events;
- size_t size;
- event->timestamp = timestamp;
- event->nr = idx;
- task->nr_events++;
- size = sizeof(struct sched_atom *) * task->nr_events;
- task->atoms = realloc(task->atoms, size);
- BUG_ON(!task->atoms);
- task->atoms[idx] = event;
- return event;
- }
- static struct sched_atom *last_event(struct task_desc *task)
- {
- if (!task->nr_events)
- return NULL;
- return task->atoms[task->nr_events - 1];
- }
- static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task,
- u64 timestamp, u64 duration)
- {
- struct sched_atom *event, *curr_event = last_event(task);
- /*
- * optimize an existing RUN event by merging this one
- * to it:
- */
- if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
- sched->nr_run_events_optimized++;
- curr_event->duration += duration;
- return;
- }
- event = get_new_event(task, timestamp);
- event->type = SCHED_EVENT_RUN;
- event->duration = duration;
- sched->nr_run_events++;
- }
- static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task,
- u64 timestamp, struct task_desc *wakee)
- {
- struct sched_atom *event, *wakee_event;
- event = get_new_event(task, timestamp);
- event->type = SCHED_EVENT_WAKEUP;
- event->wakee = wakee;
- wakee_event = last_event(wakee);
- if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
- sched->targetless_wakeups++;
- return;
- }
- if (wakee_event->wait_sem) {
- sched->multitarget_wakeups++;
- return;
- }
- wakee_event->wait_sem = zalloc(sizeof(*wakee_event->wait_sem));
- sem_init(wakee_event->wait_sem, 0, 0);
- wakee_event->specific_wait = 1;
- event->wait_sem = wakee_event->wait_sem;
- sched->nr_wakeup_events++;
- }
- static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task,
- u64 timestamp, u64 task_state __maybe_unused)
- {
- struct sched_atom *event = get_new_event(task, timestamp);
- event->type = SCHED_EVENT_SLEEP;
- sched->nr_sleep_events++;
- }
- static struct task_desc *register_pid(struct perf_sched *sched,
- unsigned long pid, const char *comm)
- {
- struct task_desc *task;
- static int pid_max;
- if (sched->pid_to_task == NULL) {
- if (sysctl__read_int("kernel/pid_max", &pid_max) < 0)
- pid_max = MAX_PID;
- BUG_ON((sched->pid_to_task = calloc(pid_max, sizeof(struct task_desc *))) == NULL);
- }
- if (pid >= (unsigned long)pid_max) {
- BUG_ON((sched->pid_to_task = realloc(sched->pid_to_task, (pid + 1) *
- sizeof(struct task_desc *))) == NULL);
- while (pid >= (unsigned long)pid_max)
- sched->pid_to_task[pid_max++] = NULL;
- }
- task = sched->pid_to_task[pid];
- if (task)
- return task;
- task = zalloc(sizeof(*task));
- task->pid = pid;
- task->nr = sched->nr_tasks;
- strcpy(task->comm, comm);
- /*
- * every task starts in sleeping state - this gets ignored
- * if there's no wakeup pointing to this sleep state:
- */
- add_sched_event_sleep(sched, task, 0, 0);
- sched->pid_to_task[pid] = task;
- sched->nr_tasks++;
- sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_desc *));
- BUG_ON(!sched->tasks);
- sched->tasks[task->nr] = task;
- if (verbose)
- printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm);
- return task;
- }
- static void print_task_traces(struct perf_sched *sched)
- {
- struct task_desc *task;
- unsigned long i;
- for (i = 0; i < sched->nr_tasks; i++) {
- task = sched->tasks[i];
- printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
- task->nr, task->comm, task->pid, task->nr_events);
- }
- }
- static void add_cross_task_wakeups(struct perf_sched *sched)
- {
- struct task_desc *task1, *task2;
- unsigned long i, j;
- for (i = 0; i < sched->nr_tasks; i++) {
- task1 = sched->tasks[i];
- j = i + 1;
- if (j == sched->nr_tasks)
- j = 0;
- task2 = sched->tasks[j];
- add_sched_event_wakeup(sched, task1, 0, task2);
- }
- }
- static void perf_sched__process_event(struct perf_sched *sched,
- struct sched_atom *atom)
- {
- int ret = 0;
- switch (atom->type) {
- case SCHED_EVENT_RUN:
- burn_nsecs(sched, atom->duration);
- break;
- case SCHED_EVENT_SLEEP:
- if (atom->wait_sem)
- ret = sem_wait(atom->wait_sem);
- BUG_ON(ret);
- break;
- case SCHED_EVENT_WAKEUP:
- if (atom->wait_sem)
- ret = sem_post(atom->wait_sem);
- BUG_ON(ret);
- break;
- case SCHED_EVENT_MIGRATION:
- break;
- default:
- BUG_ON(1);
- }
- }
- static u64 get_cpu_usage_nsec_parent(void)
- {
- struct rusage ru;
- u64 sum;
- int err;
- err = getrusage(RUSAGE_SELF, &ru);
- BUG_ON(err);
- sum = ru.ru_utime.tv_sec * NSEC_PER_SEC + ru.ru_utime.tv_usec * NSEC_PER_USEC;
- sum += ru.ru_stime.tv_sec * NSEC_PER_SEC + ru.ru_stime.tv_usec * NSEC_PER_USEC;
- return sum;
- }
- static int self_open_counters(struct perf_sched *sched, unsigned long cur_task)
- {
- struct perf_event_attr attr;
- char sbuf[STRERR_BUFSIZE], info[STRERR_BUFSIZE];
- int fd;
- struct rlimit limit;
- bool need_privilege = false;
- memset(&attr, 0, sizeof(attr));
- attr.type = PERF_TYPE_SOFTWARE;
- attr.config = PERF_COUNT_SW_TASK_CLOCK;
- force_again:
- fd = sys_perf_event_open(&attr, 0, -1, -1,
- perf_event_open_cloexec_flag());
- if (fd < 0) {
- if (errno == EMFILE) {
- if (sched->force) {
- BUG_ON(getrlimit(RLIMIT_NOFILE, &limit) == -1);
- limit.rlim_cur += sched->nr_tasks - cur_task;
- if (limit.rlim_cur > limit.rlim_max) {
- limit.rlim_max = limit.rlim_cur;
- need_privilege = true;
- }
- if (setrlimit(RLIMIT_NOFILE, &limit) == -1) {
- if (need_privilege && errno == EPERM)
- strcpy(info, "Need privilege\n");
- } else
- goto force_again;
- } else
- strcpy(info, "Have a try with -f option\n");
- }
- pr_err("Error: sys_perf_event_open() syscall returned "
- "with %d (%s)\n%s", fd,
- str_error_r(errno, sbuf, sizeof(sbuf)), info);
- exit(EXIT_FAILURE);
- }
- return fd;
- }
- static u64 get_cpu_usage_nsec_self(int fd)
- {
- u64 runtime;
- int ret;
- ret = read(fd, &runtime, sizeof(runtime));
- BUG_ON(ret != sizeof(runtime));
- return runtime;
- }
- struct sched_thread_parms {
- struct task_desc *task;
- struct perf_sched *sched;
- int fd;
- };
- static void *thread_func(void *ctx)
- {
- struct sched_thread_parms *parms = ctx;
- struct task_desc *this_task = parms->task;
- struct perf_sched *sched = parms->sched;
- u64 cpu_usage_0, cpu_usage_1;
- unsigned long i, ret;
- char comm2[22];
- int fd = parms->fd;
- zfree(&parms);
- sprintf(comm2, ":%s", this_task->comm);
- prctl(PR_SET_NAME, comm2);
- if (fd < 0)
- return NULL;
- again:
- ret = sem_post(&this_task->ready_for_work);
- BUG_ON(ret);
- ret = pthread_mutex_lock(&sched->start_work_mutex);
- BUG_ON(ret);
- ret = pthread_mutex_unlock(&sched->start_work_mutex);
- BUG_ON(ret);
- cpu_usage_0 = get_cpu_usage_nsec_self(fd);
- for (i = 0; i < this_task->nr_events; i++) {
- this_task->curr_event = i;
- perf_sched__process_event(sched, this_task->atoms[i]);
- }
- cpu_usage_1 = get_cpu_usage_nsec_self(fd);
- this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
- ret = sem_post(&this_task->work_done_sem);
- BUG_ON(ret);
- ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
- BUG_ON(ret);
- ret = pthread_mutex_unlock(&sched->work_done_wait_mutex);
- BUG_ON(ret);
- goto again;
- }
- static void create_tasks(struct perf_sched *sched)
- {
- struct task_desc *task;
- pthread_attr_t attr;
- unsigned long i;
- int err;
- err = pthread_attr_init(&attr);
- BUG_ON(err);
- err = pthread_attr_setstacksize(&attr,
- (size_t) max(16 * 1024, PTHREAD_STACK_MIN));
- BUG_ON(err);
- err = pthread_mutex_lock(&sched->start_work_mutex);
- BUG_ON(err);
- err = pthread_mutex_lock(&sched->work_done_wait_mutex);
- BUG_ON(err);
- for (i = 0; i < sched->nr_tasks; i++) {
- struct sched_thread_parms *parms = malloc(sizeof(*parms));
- BUG_ON(parms == NULL);
- parms->task = task = sched->tasks[i];
- parms->sched = sched;
- parms->fd = self_open_counters(sched, i);
- sem_init(&task->sleep_sem, 0, 0);
- sem_init(&task->ready_for_work, 0, 0);
- sem_init(&task->work_done_sem, 0, 0);
- task->curr_event = 0;
- err = pthread_create(&task->thread, &attr, thread_func, parms);
- BUG_ON(err);
- }
- }
- static void wait_for_tasks(struct perf_sched *sched)
- {
- u64 cpu_usage_0, cpu_usage_1;
- struct task_desc *task;
- unsigned long i, ret;
- sched->start_time = get_nsecs();
- sched->cpu_usage = 0;
- pthread_mutex_unlock(&sched->work_done_wait_mutex);
- for (i = 0; i < sched->nr_tasks; i++) {
- task = sched->tasks[i];
- ret = sem_wait(&task->ready_for_work);
- BUG_ON(ret);
- sem_init(&task->ready_for_work, 0, 0);
- }
- ret = pthread_mutex_lock(&sched->work_done_wait_mutex);
- BUG_ON(ret);
- cpu_usage_0 = get_cpu_usage_nsec_parent();
- pthread_mutex_unlock(&sched->start_work_mutex);
- for (i = 0; i < sched->nr_tasks; i++) {
- task = sched->tasks[i];
- ret = sem_wait(&task->work_done_sem);
- BUG_ON(ret);
- sem_init(&task->work_done_sem, 0, 0);
- sched->cpu_usage += task->cpu_usage;
- task->cpu_usage = 0;
- }
- cpu_usage_1 = get_cpu_usage_nsec_parent();
- if (!sched->runavg_cpu_usage)
- sched->runavg_cpu_usage = sched->cpu_usage;
- sched->runavg_cpu_usage = (sched->runavg_cpu_usage * (sched->replay_repeat - 1) + sched->cpu_usage) / sched->replay_repeat;
- sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
- if (!sched->runavg_parent_cpu_usage)
- sched->runavg_parent_cpu_usage = sched->parent_cpu_usage;
- sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * (sched->replay_repeat - 1) +
- sched->parent_cpu_usage)/sched->replay_repeat;
- ret = pthread_mutex_lock(&sched->start_work_mutex);
- BUG_ON(ret);
- for (i = 0; i < sched->nr_tasks; i++) {
- task = sched->tasks[i];
- sem_init(&task->sleep_sem, 0, 0);
- task->curr_event = 0;
- }
- }
- static void run_one_test(struct perf_sched *sched)
- {
- u64 T0, T1, delta, avg_delta, fluct;
- T0 = get_nsecs();
- wait_for_tasks(sched);
- T1 = get_nsecs();
- delta = T1 - T0;
- sched->sum_runtime += delta;
- sched->nr_runs++;
- avg_delta = sched->sum_runtime / sched->nr_runs;
- if (delta < avg_delta)
- fluct = avg_delta - delta;
- else
- fluct = delta - avg_delta;
- sched->sum_fluct += fluct;
- if (!sched->run_avg)
- sched->run_avg = delta;
- sched->run_avg = (sched->run_avg * (sched->replay_repeat - 1) + delta) / sched->replay_repeat;
- printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / NSEC_PER_MSEC);
- printf("ravg: %0.2f, ", (double)sched->run_avg / NSEC_PER_MSEC);
- printf("cpu: %0.2f / %0.2f",
- (double)sched->cpu_usage / NSEC_PER_MSEC, (double)sched->runavg_cpu_usage / NSEC_PER_MSEC);
- #if 0
- /*
- * rusage statistics done by the parent, these are less
- * accurate than the sched->sum_exec_runtime based statistics:
- */
- printf(" [%0.2f / %0.2f]",
- (double)sched->parent_cpu_usage / NSEC_PER_MSEC,
- (double)sched->runavg_parent_cpu_usage / NSEC_PER_MSEC);
- #endif
- printf("\n");
- if (sched->nr_sleep_corrections)
- printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections);
- sched->nr_sleep_corrections = 0;
- }
- static void test_calibrations(struct perf_sched *sched)
- {
- u64 T0, T1;
- T0 = get_nsecs();
- burn_nsecs(sched, NSEC_PER_MSEC);
- T1 = get_nsecs();
- printf("the run test took %" PRIu64 " nsecs\n", T1 - T0);
- T0 = get_nsecs();
- sleep_nsecs(NSEC_PER_MSEC);
- T1 = get_nsecs();
- printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0);
- }
- static int
- replay_wakeup_event(struct perf_sched *sched,
- struct perf_evsel *evsel, struct perf_sample *sample,
- struct machine *machine __maybe_unused)
- {
- const char *comm = perf_evsel__strval(evsel, sample, "comm");
- const u32 pid = perf_evsel__intval(evsel, sample, "pid");
- struct task_desc *waker, *wakee;
- if (verbose) {
- printf("sched_wakeup event %p\n", evsel);
- printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid);
- }
- waker = register_pid(sched, sample->tid, "<unknown>");
- wakee = register_pid(sched, pid, comm);
- add_sched_event_wakeup(sched, waker, sample->time, wakee);
- return 0;
- }
- static int replay_switch_event(struct perf_sched *sched,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine __maybe_unused)
- {
- const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"),
- *next_comm = perf_evsel__strval(evsel, sample, "next_comm");
- const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
- next_pid = perf_evsel__intval(evsel, sample, "next_pid");
- const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
- struct task_desc *prev, __maybe_unused *next;
- u64 timestamp0, timestamp = sample->time;
- int cpu = sample->cpu;
- s64 delta;
- if (verbose)
- printf("sched_switch event %p\n", evsel);
- if (cpu >= MAX_CPUS || cpu < 0)
- return 0;
- timestamp0 = sched->cpu_last_switched[cpu];
- if (timestamp0)
- delta = timestamp - timestamp0;
- else
- delta = 0;
- if (delta < 0) {
- pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
- return -1;
- }
- pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n",
- prev_comm, prev_pid, next_comm, next_pid, delta);
- prev = register_pid(sched, prev_pid, prev_comm);
- next = register_pid(sched, next_pid, next_comm);
- sched->cpu_last_switched[cpu] = timestamp;
- add_sched_event_run(sched, prev, timestamp, delta);
- add_sched_event_sleep(sched, prev, timestamp, prev_state);
- return 0;
- }
- static int replay_fork_event(struct perf_sched *sched,
- union perf_event *event,
- struct machine *machine)
- {
- struct thread *child, *parent;
- child = machine__findnew_thread(machine, event->fork.pid,
- event->fork.tid);
- parent = machine__findnew_thread(machine, event->fork.ppid,
- event->fork.ptid);
- if (child == NULL || parent == NULL) {
- pr_debug("thread does not exist on fork event: child %p, parent %p\n",
- child, parent);
- goto out_put;
- }
- if (verbose) {
- printf("fork event\n");
- printf("... parent: %s/%d\n", thread__comm_str(parent), parent->tid);
- printf("... child: %s/%d\n", thread__comm_str(child), child->tid);
- }
- register_pid(sched, parent->tid, thread__comm_str(parent));
- register_pid(sched, child->tid, thread__comm_str(child));
- out_put:
- thread__put(child);
- thread__put(parent);
- return 0;
- }
- struct sort_dimension {
- const char *name;
- sort_fn_t cmp;
- struct list_head list;
- };
- static int
- thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
- {
- struct sort_dimension *sort;
- int ret = 0;
- BUG_ON(list_empty(list));
- list_for_each_entry(sort, list, list) {
- ret = sort->cmp(l, r);
- if (ret)
- return ret;
- }
- return ret;
- }
- static struct work_atoms *
- thread_atoms_search(struct rb_root *root, struct thread *thread,
- struct list_head *sort_list)
- {
- struct rb_node *node = root->rb_node;
- struct work_atoms key = { .thread = thread };
- while (node) {
- struct work_atoms *atoms;
- int cmp;
- atoms = container_of(node, struct work_atoms, node);
- cmp = thread_lat_cmp(sort_list, &key, atoms);
- if (cmp > 0)
- node = node->rb_left;
- else if (cmp < 0)
- node = node->rb_right;
- else {
- BUG_ON(thread != atoms->thread);
- return atoms;
- }
- }
- return NULL;
- }
- static void
- __thread_latency_insert(struct rb_root *root, struct work_atoms *data,
- struct list_head *sort_list)
- {
- struct rb_node **new = &(root->rb_node), *parent = NULL;
- while (*new) {
- struct work_atoms *this;
- int cmp;
- this = container_of(*new, struct work_atoms, node);
- parent = *new;
- cmp = thread_lat_cmp(sort_list, data, this);
- if (cmp > 0)
- new = &((*new)->rb_left);
- else
- new = &((*new)->rb_right);
- }
- rb_link_node(&data->node, parent, new);
- rb_insert_color(&data->node, root);
- }
- static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
- {
- struct work_atoms *atoms = zalloc(sizeof(*atoms));
- if (!atoms) {
- pr_err("No memory at %s\n", __func__);
- return -1;
- }
- atoms->thread = thread__get(thread);
- INIT_LIST_HEAD(&atoms->work_list);
- __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid);
- return 0;
- }
- static char sched_out_state(u64 prev_state)
- {
- const char *str = TASK_STATE_TO_CHAR_STR;
- return str[prev_state];
- }
- static int
- add_sched_out_event(struct work_atoms *atoms,
- char run_state,
- u64 timestamp)
- {
- struct work_atom *atom = zalloc(sizeof(*atom));
- if (!atom) {
- pr_err("Non memory at %s", __func__);
- return -1;
- }
- atom->sched_out_time = timestamp;
- if (run_state == 'R') {
- atom->state = THREAD_WAIT_CPU;
- atom->wake_up_time = atom->sched_out_time;
- }
- list_add_tail(&atom->list, &atoms->work_list);
- return 0;
- }
- static void
- add_runtime_event(struct work_atoms *atoms, u64 delta,
- u64 timestamp __maybe_unused)
- {
- struct work_atom *atom;
- BUG_ON(list_empty(&atoms->work_list));
- atom = list_entry(atoms->work_list.prev, struct work_atom, list);
- atom->runtime += delta;
- atoms->total_runtime += delta;
- }
- static void
- add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
- {
- struct work_atom *atom;
- u64 delta;
- if (list_empty(&atoms->work_list))
- return;
- atom = list_entry(atoms->work_list.prev, struct work_atom, list);
- if (atom->state != THREAD_WAIT_CPU)
- return;
- if (timestamp < atom->wake_up_time) {
- atom->state = THREAD_IGNORE;
- return;
- }
- atom->state = THREAD_SCHED_IN;
- atom->sched_in_time = timestamp;
- delta = atom->sched_in_time - atom->wake_up_time;
- atoms->total_lat += delta;
- if (delta > atoms->max_lat) {
- atoms->max_lat = delta;
- atoms->max_lat_at = timestamp;
- }
- atoms->nb_atoms++;
- }
- static int latency_switch_event(struct perf_sched *sched,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine)
- {
- const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
- next_pid = perf_evsel__intval(evsel, sample, "next_pid");
- const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
- struct work_atoms *out_events, *in_events;
- struct thread *sched_out, *sched_in;
- u64 timestamp0, timestamp = sample->time;
- int cpu = sample->cpu, err = -1;
- s64 delta;
- BUG_ON(cpu >= MAX_CPUS || cpu < 0);
- timestamp0 = sched->cpu_last_switched[cpu];
- sched->cpu_last_switched[cpu] = timestamp;
- if (timestamp0)
- delta = timestamp - timestamp0;
- else
- delta = 0;
- if (delta < 0) {
- pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
- return -1;
- }
- sched_out = machine__findnew_thread(machine, -1, prev_pid);
- sched_in = machine__findnew_thread(machine, -1, next_pid);
- if (sched_out == NULL || sched_in == NULL)
- goto out_put;
- out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
- if (!out_events) {
- if (thread_atoms_insert(sched, sched_out))
- goto out_put;
- out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
- if (!out_events) {
- pr_err("out-event: Internal tree error");
- goto out_put;
- }
- }
- if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
- return -1;
- in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
- if (!in_events) {
- if (thread_atoms_insert(sched, sched_in))
- goto out_put;
- in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
- if (!in_events) {
- pr_err("in-event: Internal tree error");
- goto out_put;
- }
- /*
- * Take came in we have not heard about yet,
- * add in an initial atom in runnable state:
- */
- if (add_sched_out_event(in_events, 'R', timestamp))
- goto out_put;
- }
- add_sched_in_event(in_events, timestamp);
- err = 0;
- out_put:
- thread__put(sched_out);
- thread__put(sched_in);
- return err;
- }
- static int latency_runtime_event(struct perf_sched *sched,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine)
- {
- const u32 pid = perf_evsel__intval(evsel, sample, "pid");
- const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
- struct thread *thread = machine__findnew_thread(machine, -1, pid);
- struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
- u64 timestamp = sample->time;
- int cpu = sample->cpu, err = -1;
- if (thread == NULL)
- return -1;
- BUG_ON(cpu >= MAX_CPUS || cpu < 0);
- if (!atoms) {
- if (thread_atoms_insert(sched, thread))
- goto out_put;
- atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
- if (!atoms) {
- pr_err("in-event: Internal tree error");
- goto out_put;
- }
- if (add_sched_out_event(atoms, 'R', timestamp))
- goto out_put;
- }
- add_runtime_event(atoms, runtime, timestamp);
- err = 0;
- out_put:
- thread__put(thread);
- return err;
- }
- static int latency_wakeup_event(struct perf_sched *sched,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine)
- {
- const u32 pid = perf_evsel__intval(evsel, sample, "pid");
- struct work_atoms *atoms;
- struct work_atom *atom;
- struct thread *wakee;
- u64 timestamp = sample->time;
- int err = -1;
- wakee = machine__findnew_thread(machine, -1, pid);
- if (wakee == NULL)
- return -1;
- atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
- if (!atoms) {
- if (thread_atoms_insert(sched, wakee))
- goto out_put;
- atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
- if (!atoms) {
- pr_err("wakeup-event: Internal tree error");
- goto out_put;
- }
- if (add_sched_out_event(atoms, 'S', timestamp))
- goto out_put;
- }
- BUG_ON(list_empty(&atoms->work_list));
- atom = list_entry(atoms->work_list.prev, struct work_atom, list);
- /*
- * As we do not guarantee the wakeup event happens when
- * task is out of run queue, also may happen when task is
- * on run queue and wakeup only change ->state to TASK_RUNNING,
- * then we should not set the ->wake_up_time when wake up a
- * task which is on run queue.
- *
- * You WILL be missing events if you've recorded only
- * one CPU, or are only looking at only one, so don't
- * skip in this case.
- */
- if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
- goto out_ok;
- sched->nr_timestamps++;
- if (atom->sched_out_time > timestamp) {
- sched->nr_unordered_timestamps++;
- goto out_ok;
- }
- atom->state = THREAD_WAIT_CPU;
- atom->wake_up_time = timestamp;
- out_ok:
- err = 0;
- out_put:
- thread__put(wakee);
- return err;
- }
- static int latency_migrate_task_event(struct perf_sched *sched,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine)
- {
- const u32 pid = perf_evsel__intval(evsel, sample, "pid");
- u64 timestamp = sample->time;
- struct work_atoms *atoms;
- struct work_atom *atom;
- struct thread *migrant;
- int err = -1;
- /*
- * Only need to worry about migration when profiling one CPU.
- */
- if (sched->profile_cpu == -1)
- return 0;
- migrant = machine__findnew_thread(machine, -1, pid);
- if (migrant == NULL)
- return -1;
- atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
- if (!atoms) {
- if (thread_atoms_insert(sched, migrant))
- goto out_put;
- register_pid(sched, migrant->tid, thread__comm_str(migrant));
- atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
- if (!atoms) {
- pr_err("migration-event: Internal tree error");
- goto out_put;
- }
- if (add_sched_out_event(atoms, 'R', timestamp))
- goto out_put;
- }
- BUG_ON(list_empty(&atoms->work_list));
- atom = list_entry(atoms->work_list.prev, struct work_atom, list);
- atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp;
- sched->nr_timestamps++;
- if (atom->sched_out_time > timestamp)
- sched->nr_unordered_timestamps++;
- err = 0;
- out_put:
- thread__put(migrant);
- return err;
- }
- static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
- {
- int i;
- int ret;
- u64 avg;
- if (!work_list->nb_atoms)
- return;
- /*
- * Ignore idle threads:
- */
- if (!strcmp(thread__comm_str(work_list->thread), "swapper"))
- return;
- sched->all_runtime += work_list->total_runtime;
- sched->all_count += work_list->nb_atoms;
- if (work_list->num_merged > 1)
- ret = printf(" %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
- else
- ret = printf(" %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
- for (i = 0; i < 24 - ret; i++)
- printf(" ");
- avg = work_list->total_lat / work_list->nb_atoms;
- printf("|%11.3f ms |%9" PRIu64 " | avg:%9.3f ms | max:%9.3f ms | max at: %13.6f s\n",
- (double)work_list->total_runtime / NSEC_PER_MSEC,
- work_list->nb_atoms, (double)avg / NSEC_PER_MSEC,
- (double)work_list->max_lat / NSEC_PER_MSEC,
- (double)work_list->max_lat_at / NSEC_PER_SEC);
- }
- static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
- {
- if (l->thread == r->thread)
- return 0;
- if (l->thread->tid < r->thread->tid)
- return -1;
- if (l->thread->tid > r->thread->tid)
- return 1;
- return (int)(l->thread - r->thread);
- }
- static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
- {
- u64 avgl, avgr;
- if (!l->nb_atoms)
- return -1;
- if (!r->nb_atoms)
- return 1;
- avgl = l->total_lat / l->nb_atoms;
- avgr = r->total_lat / r->nb_atoms;
- if (avgl < avgr)
- return -1;
- if (avgl > avgr)
- return 1;
- return 0;
- }
- static int max_cmp(struct work_atoms *l, struct work_atoms *r)
- {
- if (l->max_lat < r->max_lat)
- return -1;
- if (l->max_lat > r->max_lat)
- return 1;
- return 0;
- }
- static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
- {
- if (l->nb_atoms < r->nb_atoms)
- return -1;
- if (l->nb_atoms > r->nb_atoms)
- return 1;
- return 0;
- }
- static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
- {
- if (l->total_runtime < r->total_runtime)
- return -1;
- if (l->total_runtime > r->total_runtime)
- return 1;
- return 0;
- }
- static int sort_dimension__add(const char *tok, struct list_head *list)
- {
- size_t i;
- static struct sort_dimension avg_sort_dimension = {
- .name = "avg",
- .cmp = avg_cmp,
- };
- static struct sort_dimension max_sort_dimension = {
- .name = "max",
- .cmp = max_cmp,
- };
- static struct sort_dimension pid_sort_dimension = {
- .name = "pid",
- .cmp = pid_cmp,
- };
- static struct sort_dimension runtime_sort_dimension = {
- .name = "runtime",
- .cmp = runtime_cmp,
- };
- static struct sort_dimension switch_sort_dimension = {
- .name = "switch",
- .cmp = switch_cmp,
- };
- struct sort_dimension *available_sorts[] = {
- &pid_sort_dimension,
- &avg_sort_dimension,
- &max_sort_dimension,
- &switch_sort_dimension,
- &runtime_sort_dimension,
- };
- for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
- if (!strcmp(available_sorts[i]->name, tok)) {
- list_add_tail(&available_sorts[i]->list, list);
- return 0;
- }
- }
- return -1;
- }
- static void perf_sched__sort_lat(struct perf_sched *sched)
- {
- struct rb_node *node;
- struct rb_root *root = &sched->atom_root;
- again:
- for (;;) {
- struct work_atoms *data;
- node = rb_first(root);
- if (!node)
- break;
- rb_erase(node, root);
- data = rb_entry(node, struct work_atoms, node);
- __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
- }
- if (root == &sched->atom_root) {
- root = &sched->merged_atom_root;
- goto again;
- }
- }
- static int process_sched_wakeup_event(struct perf_tool *tool,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine)
- {
- struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- if (sched->tp_handler->wakeup_event)
- return sched->tp_handler->wakeup_event(sched, evsel, sample, machine);
- return 0;
- }
- union map_priv {
- void *ptr;
- bool color;
- };
- static bool thread__has_color(struct thread *thread)
- {
- union map_priv priv = {
- .ptr = thread__priv(thread),
- };
- return priv.color;
- }
- static struct thread*
- map__findnew_thread(struct perf_sched *sched, struct machine *machine, pid_t pid, pid_t tid)
- {
- struct thread *thread = machine__findnew_thread(machine, pid, tid);
- union map_priv priv = {
- .color = false,
- };
- if (!sched->map.color_pids || !thread || thread__priv(thread))
- return thread;
- if (thread_map__has(sched->map.color_pids, tid))
- priv.color = true;
- thread__set_priv(thread, priv.ptr);
- return thread;
- }
- static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
- struct perf_sample *sample, struct machine *machine)
- {
- const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
- struct thread *sched_in;
- int new_shortname;
- u64 timestamp0, timestamp = sample->time;
- s64 delta;
- int i, this_cpu = sample->cpu;
- int cpus_nr;
- bool new_cpu = false;
- const char *color = PERF_COLOR_NORMAL;
- BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
- if (this_cpu > sched->max_cpu)
- sched->max_cpu = this_cpu;
- if (sched->map.comp) {
- cpus_nr = bitmap_weight(sched->map.comp_cpus_mask, MAX_CPUS);
- if (!test_and_set_bit(this_cpu, sched->map.comp_cpus_mask)) {
- sched->map.comp_cpus[cpus_nr++] = this_cpu;
- new_cpu = true;
- }
- } else
- cpus_nr = sched->max_cpu;
- timestamp0 = sched->cpu_last_switched[this_cpu];
- sched->cpu_last_switched[this_cpu] = timestamp;
- if (timestamp0)
- delta = timestamp - timestamp0;
- else
- delta = 0;
- if (delta < 0) {
- pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta);
- return -1;
- }
- sched_in = map__findnew_thread(sched, machine, -1, next_pid);
- if (sched_in == NULL)
- return -1;
- sched->curr_thread[this_cpu] = thread__get(sched_in);
- printf(" ");
- new_shortname = 0;
- if (!sched_in->shortname[0]) {
- if (!strcmp(thread__comm_str(sched_in), "swapper")) {
- /*
- * Don't allocate a letter-number for swapper:0
- * as a shortname. Instead, we use '.' for it.
- */
- sched_in->shortname[0] = '.';
- sched_in->shortname[1] = ' ';
- } else {
- sched_in->shortname[0] = sched->next_shortname1;
- sched_in->shortname[1] = sched->next_shortname2;
- if (sched->next_shortname1 < 'Z') {
- sched->next_shortname1++;
- } else {
- sched->next_shortname1 = 'A';
- if (sched->next_shortname2 < '9')
- sched->next_shortname2++;
- else
- sched->next_shortname2 = '0';
- }
- }
- new_shortname = 1;
- }
- for (i = 0; i < cpus_nr; i++) {
- int cpu = sched->map.comp ? sched->map.comp_cpus[i] : i;
- struct thread *curr_thread = sched->curr_thread[cpu];
- const char *pid_color = color;
- const char *cpu_color = color;
- if (curr_thread && thread__has_color(curr_thread))
- pid_color = COLOR_PIDS;
- if (sched->map.cpus && !cpu_map__has(sched->map.cpus, cpu))
- continue;
- if (sched->map.color_cpus && cpu_map__has(sched->map.color_cpus, cpu))
- cpu_color = COLOR_CPUS;
- if (cpu != this_cpu)
- color_fprintf(stdout, cpu_color, " ");
- else
- color_fprintf(stdout, cpu_color, "*");
- if (sched->curr_thread[cpu])
- color_fprintf(stdout, pid_color, "%2s ", sched->curr_thread[cpu]->shortname);
- else
- color_fprintf(stdout, color, " ");
- }
- if (sched->map.cpus && !cpu_map__has(sched->map.cpus, this_cpu))
- goto out;
- color_fprintf(stdout, color, " %12.6f secs ", (double)timestamp / NSEC_PER_SEC);
- if (new_shortname) {
- const char *pid_color = color;
- if (thread__has_color(sched_in))
- pid_color = COLOR_PIDS;
- color_fprintf(stdout, pid_color, "%s => %s:%d",
- sched_in->shortname, thread__comm_str(sched_in), sched_in->tid);
- }
- if (sched->map.comp && new_cpu)
- color_fprintf(stdout, color, " (CPU %d)", this_cpu);
- out:
- color_fprintf(stdout, color, "\n");
- thread__put(sched_in);
- return 0;
- }
- static int process_sched_switch_event(struct perf_tool *tool,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine)
- {
- struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- int this_cpu = sample->cpu, err = 0;
- u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
- next_pid = perf_evsel__intval(evsel, sample, "next_pid");
- if (sched->curr_pid[this_cpu] != (u32)-1) {
- /*
- * Are we trying to switch away a PID that is
- * not current?
- */
- if (sched->curr_pid[this_cpu] != prev_pid)
- sched->nr_context_switch_bugs++;
- }
- if (sched->tp_handler->switch_event)
- err = sched->tp_handler->switch_event(sched, evsel, sample, machine);
- sched->curr_pid[this_cpu] = next_pid;
- return err;
- }
- static int process_sched_runtime_event(struct perf_tool *tool,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine)
- {
- struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- if (sched->tp_handler->runtime_event)
- return sched->tp_handler->runtime_event(sched, evsel, sample, machine);
- return 0;
- }
- static int perf_sched__process_fork_event(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample,
- struct machine *machine)
- {
- struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- /* run the fork event through the perf machineruy */
- perf_event__process_fork(tool, event, sample, machine);
- /* and then run additional processing needed for this command */
- if (sched->tp_handler->fork_event)
- return sched->tp_handler->fork_event(sched, event, machine);
- return 0;
- }
- static int process_sched_migrate_task_event(struct perf_tool *tool,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine)
- {
- struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- if (sched->tp_handler->migrate_task_event)
- return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine);
- return 0;
- }
- typedef int (*tracepoint_handler)(struct perf_tool *tool,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine);
- static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct machine *machine)
- {
- int err = 0;
- if (evsel->handler != NULL) {
- tracepoint_handler f = evsel->handler;
- err = f(tool, evsel, sample, machine);
- }
- return err;
- }
- static int perf_sched__read_events(struct perf_sched *sched)
- {
- const struct perf_evsel_str_handler handlers[] = {
- { "sched:sched_switch", process_sched_switch_event, },
- { "sched:sched_stat_runtime", process_sched_runtime_event, },
- { "sched:sched_wakeup", process_sched_wakeup_event, },
- { "sched:sched_wakeup_new", process_sched_wakeup_event, },
- { "sched:sched_migrate_task", process_sched_migrate_task_event, },
- };
- struct perf_session *session;
- struct perf_data_file file = {
- .path = input_name,
- .mode = PERF_DATA_MODE_READ,
- .force = sched->force,
- };
- int rc = -1;
- session = perf_session__new(&file, false, &sched->tool);
- if (session == NULL) {
- pr_debug("No Memory for session\n");
- return -1;
- }
- symbol__init(&session->header.env);
- if (perf_session__set_tracepoints_handlers(session, handlers))
- goto out_delete;
- if (perf_session__has_traces(session, "record -R")) {
- int err = perf_session__process_events(session);
- if (err) {
- pr_err("Failed to process events, error %d", err);
- goto out_delete;
- }
- sched->nr_events = session->evlist->stats.nr_events[0];
- sched->nr_lost_events = session->evlist->stats.total_lost;
- sched->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
- }
- rc = 0;
- out_delete:
- perf_session__delete(session);
- return rc;
- }
- static void print_bad_events(struct perf_sched *sched)
- {
- if (sched->nr_unordered_timestamps && sched->nr_timestamps) {
- printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
- (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0,
- sched->nr_unordered_timestamps, sched->nr_timestamps);
- }
- if (sched->nr_lost_events && sched->nr_events) {
- printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
- (double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
- sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
- }
- if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
- printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
- (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
- sched->nr_context_switch_bugs, sched->nr_timestamps);
- if (sched->nr_lost_events)
- printf(" (due to lost events?)");
- printf("\n");
- }
- }
- static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
- {
- struct rb_node **new = &(root->rb_node), *parent = NULL;
- struct work_atoms *this;
- const char *comm = thread__comm_str(data->thread), *this_comm;
- while (*new) {
- int cmp;
- this = container_of(*new, struct work_atoms, node);
- parent = *new;
- this_comm = thread__comm_str(this->thread);
- cmp = strcmp(comm, this_comm);
- if (cmp > 0) {
- new = &((*new)->rb_left);
- } else if (cmp < 0) {
- new = &((*new)->rb_right);
- } else {
- this->num_merged++;
- this->total_runtime += data->total_runtime;
- this->nb_atoms += data->nb_atoms;
- this->total_lat += data->total_lat;
- list_splice(&data->work_list, &this->work_list);
- if (this->max_lat < data->max_lat) {
- this->max_lat = data->max_lat;
- this->max_lat_at = data->max_lat_at;
- }
- zfree(&data);
- return;
- }
- }
- data->num_merged++;
- rb_link_node(&data->node, parent, new);
- rb_insert_color(&data->node, root);
- }
- static void perf_sched__merge_lat(struct perf_sched *sched)
- {
- struct work_atoms *data;
- struct rb_node *node;
- if (sched->skip_merge)
- return;
- while ((node = rb_first(&sched->atom_root))) {
- rb_erase(node, &sched->atom_root);
- data = rb_entry(node, struct work_atoms, node);
- __merge_work_atoms(&sched->merged_atom_root, data);
- }
- }
- static int perf_sched__lat(struct perf_sched *sched)
- {
- struct rb_node *next;
- setup_pager();
- if (perf_sched__read_events(sched))
- return -1;
- perf_sched__merge_lat(sched);
- perf_sched__sort_lat(sched);
- printf("\n -----------------------------------------------------------------------------------------------------------------\n");
- printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n");
- printf(" -----------------------------------------------------------------------------------------------------------------\n");
- next = rb_first(&sched->sorted_atom_root);
- while (next) {
- struct work_atoms *work_list;
- work_list = rb_entry(next, struct work_atoms, node);
- output_lat_thread(sched, work_list);
- next = rb_next(next);
- thread__zput(work_list->thread);
- }
- printf(" -----------------------------------------------------------------------------------------------------------------\n");
- printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n",
- (double)sched->all_runtime / NSEC_PER_MSEC, sched->all_count);
- printf(" ---------------------------------------------------\n");
- print_bad_events(sched);
- printf("\n");
- return 0;
- }
- static int setup_map_cpus(struct perf_sched *sched)
- {
- struct cpu_map *map;
- sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
- if (sched->map.comp) {
- sched->map.comp_cpus = zalloc(sched->max_cpu * sizeof(int));
- if (!sched->map.comp_cpus)
- return -1;
- }
- if (!sched->map.cpus_str)
- return 0;
- map = cpu_map__new(sched->map.cpus_str);
- if (!map) {
- pr_err("failed to get cpus map from %s\n", sched->map.cpus_str);
- return -1;
- }
- sched->map.cpus = map;
- return 0;
- }
- static int setup_color_pids(struct perf_sched *sched)
- {
- struct thread_map *map;
- if (!sched->map.color_pids_str)
- return 0;
- map = thread_map__new_by_tid_str(sched->map.color_pids_str);
- if (!map) {
- pr_err("failed to get thread map from %s\n", sched->map.color_pids_str);
- return -1;
- }
- sched->map.color_pids = map;
- return 0;
- }
- static int setup_color_cpus(struct perf_sched *sched)
- {
- struct cpu_map *map;
- if (!sched->map.color_cpus_str)
- return 0;
- map = cpu_map__new(sched->map.color_cpus_str);
- if (!map) {
- pr_err("failed to get thread map from %s\n", sched->map.color_cpus_str);
- return -1;
- }
- sched->map.color_cpus = map;
- return 0;
- }
- static int perf_sched__map(struct perf_sched *sched)
- {
- if (setup_map_cpus(sched))
- return -1;
- if (setup_color_pids(sched))
- return -1;
- if (setup_color_cpus(sched))
- return -1;
- setup_pager();
- if (perf_sched__read_events(sched))
- return -1;
- print_bad_events(sched);
- return 0;
- }
- static int perf_sched__replay(struct perf_sched *sched)
- {
- unsigned long i;
- calibrate_run_measurement_overhead(sched);
- calibrate_sleep_measurement_overhead(sched);
- test_calibrations(sched);
- if (perf_sched__read_events(sched))
- return -1;
- printf("nr_run_events: %ld\n", sched->nr_run_events);
- printf("nr_sleep_events: %ld\n", sched->nr_sleep_events);
- printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events);
- if (sched->targetless_wakeups)
- printf("target-less wakeups: %ld\n", sched->targetless_wakeups);
- if (sched->multitarget_wakeups)
- printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups);
- if (sched->nr_run_events_optimized)
- printf("run atoms optimized: %ld\n",
- sched->nr_run_events_optimized);
- print_task_traces(sched);
- add_cross_task_wakeups(sched);
- create_tasks(sched);
- printf("------------------------------------------------------------\n");
- for (i = 0; i < sched->replay_repeat; i++)
- run_one_test(sched);
- return 0;
- }
- static void setup_sorting(struct perf_sched *sched, const struct option *options,
- const char * const usage_msg[])
- {
- char *tmp, *tok, *str = strdup(sched->sort_order);
- for (tok = strtok_r(str, ", ", &tmp);
- tok; tok = strtok_r(NULL, ", ", &tmp)) {
- if (sort_dimension__add(tok, &sched->sort_list) < 0) {
- usage_with_options_msg(usage_msg, options,
- "Unknown --sort key: `%s'", tok);
- }
- }
- free(str);
- sort_dimension__add("pid", &sched->cmp_pid);
- }
- static int __cmd_record(int argc, const char **argv)
- {
- unsigned int rec_argc, i, j;
- const char **rec_argv;
- const char * const record_args[] = {
- "record",
- "-a",
- "-R",
- "-m", "1024",
- "-c", "1",
- "-e", "sched:sched_switch",
- "-e", "sched:sched_stat_wait",
- "-e", "sched:sched_stat_sleep",
- "-e", "sched:sched_stat_iowait",
- "-e", "sched:sched_stat_runtime",
- "-e", "sched:sched_process_fork",
- "-e", "sched:sched_wakeup",
- "-e", "sched:sched_wakeup_new",
- "-e", "sched:sched_migrate_task",
- };
- rec_argc = ARRAY_SIZE(record_args) + argc - 1;
- rec_argv = calloc(rec_argc + 1, sizeof(char *));
- if (rec_argv == NULL)
- return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(record_args); i++)
- rec_argv[i] = strdup(record_args[i]);
- for (j = 1; j < (unsigned int)argc; j++, i++)
- rec_argv[i] = argv[j];
- BUG_ON(i != rec_argc);
- return cmd_record(i, rec_argv, NULL);
- }
- int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
- {
- const char default_sort_order[] = "avg, max, switch, runtime";
- struct perf_sched sched = {
- .tool = {
- .sample = perf_sched__process_tracepoint_sample,
- .comm = perf_event__process_comm,
- .lost = perf_event__process_lost,
- .fork = perf_sched__process_fork_event,
- .ordered_events = true,
- },
- .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
- .sort_list = LIST_HEAD_INIT(sched.sort_list),
- .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
- .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
- .sort_order = default_sort_order,
- .replay_repeat = 10,
- .profile_cpu = -1,
- .next_shortname1 = 'A',
- .next_shortname2 = '0',
- .skip_merge = 0,
- };
- const struct option latency_options[] = {
- OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
- "sort by key(s): runtime, switch, avg, max"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_INTEGER('C', "CPU", &sched.profile_cpu,
- "CPU to profile on"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
- OPT_BOOLEAN('p', "pids", &sched.skip_merge,
- "latency stats per pid instead of per comm"),
- OPT_END()
- };
- const struct option replay_options[] = {
- OPT_UINTEGER('r', "repeat", &sched.replay_repeat,
- "repeat the workload replay N times (-1: infinite)"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
- OPT_BOOLEAN('f', "force", &sched.force, "don't complain, do it"),
- OPT_END()
- };
- const struct option sched_options[] = {
- OPT_STRING('i', "input", &input_name, "file",
- "input file name"),
- OPT_INCR('v', "verbose", &verbose,
- "be more verbose (show symbol address, etc)"),
- OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
- "dump raw trace in ASCII"),
- OPT_END()
- };
- const struct option map_options[] = {
- OPT_BOOLEAN(0, "compact", &sched.map.comp,
- "map output in compact mode"),
- OPT_STRING(0, "color-pids", &sched.map.color_pids_str, "pids",
- "highlight given pids in map"),
- OPT_STRING(0, "color-cpus", &sched.map.color_cpus_str, "cpus",
- "highlight given CPUs in map"),
- OPT_STRING(0, "cpus", &sched.map.cpus_str, "cpus",
- "display given CPUs in map"),
- OPT_END()
- };
- const char * const latency_usage[] = {
- "perf sched latency [<options>]",
- NULL
- };
- const char * const replay_usage[] = {
- "perf sched replay [<options>]",
- NULL
- };
- const char * const map_usage[] = {
- "perf sched map [<options>]",
- NULL
- };
- const char *const sched_subcommands[] = { "record", "latency", "map",
- "replay", "script", NULL };
- const char *sched_usage[] = {
- NULL,
- NULL
- };
- struct trace_sched_handler lat_ops = {
- .wakeup_event = latency_wakeup_event,
- .switch_event = latency_switch_event,
- .runtime_event = latency_runtime_event,
- .migrate_task_event = latency_migrate_task_event,
- };
- struct trace_sched_handler map_ops = {
- .switch_event = map_switch_event,
- };
- struct trace_sched_handler replay_ops = {
- .wakeup_event = replay_wakeup_event,
- .switch_event = replay_switch_event,
- .fork_event = replay_fork_event,
- };
- unsigned int i;
- for (i = 0; i < ARRAY_SIZE(sched.curr_pid); i++)
- sched.curr_pid[i] = -1;
- argc = parse_options_subcommand(argc, argv, sched_options, sched_subcommands,
- sched_usage, PARSE_OPT_STOP_AT_NON_OPTION);
- if (!argc)
- usage_with_options(sched_usage, sched_options);
- /*
- * Aliased to 'perf script' for now:
- */
- if (!strcmp(argv[0], "script"))
- return cmd_script(argc, argv, prefix);
- if (!strncmp(argv[0], "rec", 3)) {
- return __cmd_record(argc, argv);
- } else if (!strncmp(argv[0], "lat", 3)) {
- sched.tp_handler = &lat_ops;
- if (argc > 1) {
- argc = parse_options(argc, argv, latency_options, latency_usage, 0);
- if (argc)
- usage_with_options(latency_usage, latency_options);
- }
- setup_sorting(&sched, latency_options, latency_usage);
- return perf_sched__lat(&sched);
- } else if (!strcmp(argv[0], "map")) {
- if (argc) {
- argc = parse_options(argc, argv, map_options, map_usage, 0);
- if (argc)
- usage_with_options(map_usage, map_options);
- }
- sched.tp_handler = &map_ops;
- setup_sorting(&sched, latency_options, latency_usage);
- return perf_sched__map(&sched);
- } else if (!strncmp(argv[0], "rep", 3)) {
- sched.tp_handler = &replay_ops;
- if (argc) {
- argc = parse_options(argc, argv, replay_options, replay_usage, 0);
- if (argc)
- usage_with_options(replay_usage, replay_options);
- }
- return perf_sched__replay(&sched);
- } else {
- usage_with_options(sched_usage, sched_options);
- }
- return 0;
- }
|