|
@@ -149,7 +149,6 @@ struct perf_sched {
|
|
unsigned long nr_runs;
|
|
unsigned long nr_runs;
|
|
unsigned long nr_timestamps;
|
|
unsigned long nr_timestamps;
|
|
unsigned long nr_unordered_timestamps;
|
|
unsigned long nr_unordered_timestamps;
|
|
- unsigned long nr_state_machine_bugs;
|
|
|
|
unsigned long nr_context_switch_bugs;
|
|
unsigned long nr_context_switch_bugs;
|
|
unsigned long nr_events;
|
|
unsigned long nr_events;
|
|
unsigned long nr_lost_chunks;
|
|
unsigned long nr_lost_chunks;
|
|
@@ -1007,17 +1006,12 @@ static int latency_wakeup_event(struct perf_sched *sched,
|
|
struct perf_sample *sample,
|
|
struct perf_sample *sample,
|
|
struct machine *machine)
|
|
struct machine *machine)
|
|
{
|
|
{
|
|
- const u32 pid = perf_evsel__intval(evsel, sample, "pid"),
|
|
|
|
- success = perf_evsel__intval(evsel, sample, "success");
|
|
|
|
|
|
+ const u32 pid = perf_evsel__intval(evsel, sample, "pid");
|
|
struct work_atoms *atoms;
|
|
struct work_atoms *atoms;
|
|
struct work_atom *atom;
|
|
struct work_atom *atom;
|
|
struct thread *wakee;
|
|
struct thread *wakee;
|
|
u64 timestamp = sample->time;
|
|
u64 timestamp = sample->time;
|
|
|
|
|
|
- /* Note for later, it may be interesting to observe the failing cases */
|
|
|
|
- if (!success)
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
wakee = machine__findnew_thread(machine, 0, pid);
|
|
wakee = machine__findnew_thread(machine, 0, pid);
|
|
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
|
|
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
|
|
if (!atoms) {
|
|
if (!atoms) {
|
|
@@ -1037,12 +1031,18 @@ static int latency_wakeup_event(struct perf_sched *sched,
|
|
atom = list_entry(atoms->work_list.prev, struct work_atom, list);
|
|
atom = list_entry(atoms->work_list.prev, struct work_atom, list);
|
|
|
|
|
|
/*
|
|
/*
|
|
|
|
+ * As we do not guarantee the wakeup event happens when
|
|
|
|
+ * task is out of run queue, also may happen when task is
|
|
|
|
+ * on run queue and wakeup only change ->state to TASK_RUNNING,
|
|
|
|
+ * then we should not set the ->wake_up_time when wake up a
|
|
|
|
+ * task which is on run queue.
|
|
|
|
+ *
|
|
* You WILL be missing events if you've recorded only
|
|
* You WILL be missing events if you've recorded only
|
|
* one CPU, or are only looking at only one, so don't
|
|
* one CPU, or are only looking at only one, so don't
|
|
- * make useless noise.
|
|
|
|
|
|
+ * skip in this case.
|
|
*/
|
|
*/
|
|
if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
|
|
if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
|
|
- sched->nr_state_machine_bugs++;
|
|
|
|
|
|
+ return 0;
|
|
|
|
|
|
sched->nr_timestamps++;
|
|
sched->nr_timestamps++;
|
|
if (atom->sched_out_time > timestamp) {
|
|
if (atom->sched_out_time > timestamp) {
|
|
@@ -1266,9 +1266,8 @@ static int process_sched_wakeup_event(struct perf_tool *tool,
|
|
static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
|
|
static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
|
|
struct perf_sample *sample, struct machine *machine)
|
|
struct perf_sample *sample, struct machine *machine)
|
|
{
|
|
{
|
|
- const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"),
|
|
|
|
- next_pid = perf_evsel__intval(evsel, sample, "next_pid");
|
|
|
|
- struct thread *sched_out __maybe_unused, *sched_in;
|
|
|
|
|
|
+ const u32 next_pid = perf_evsel__intval(evsel, sample, "next_pid");
|
|
|
|
+ struct thread *sched_in;
|
|
int new_shortname;
|
|
int new_shortname;
|
|
u64 timestamp0, timestamp = sample->time;
|
|
u64 timestamp0, timestamp = sample->time;
|
|
s64 delta;
|
|
s64 delta;
|
|
@@ -1291,7 +1290,6 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
|
|
return -1;
|
|
return -1;
|
|
}
|
|
}
|
|
|
|
|
|
- sched_out = machine__findnew_thread(machine, 0, prev_pid);
|
|
|
|
sched_in = machine__findnew_thread(machine, 0, next_pid);
|
|
sched_in = machine__findnew_thread(machine, 0, next_pid);
|
|
|
|
|
|
sched->curr_thread[this_cpu] = sched_in;
|
|
sched->curr_thread[this_cpu] = sched_in;
|
|
@@ -1501,14 +1499,6 @@ static void print_bad_events(struct perf_sched *sched)
|
|
(double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
|
|
(double)sched->nr_lost_events/(double)sched->nr_events * 100.0,
|
|
sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
|
|
sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks);
|
|
}
|
|
}
|
|
- if (sched->nr_state_machine_bugs && sched->nr_timestamps) {
|
|
|
|
- printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
|
|
|
|
- (double)sched->nr_state_machine_bugs/(double)sched->nr_timestamps*100.0,
|
|
|
|
- sched->nr_state_machine_bugs, sched->nr_timestamps);
|
|
|
|
- if (sched->nr_lost_events)
|
|
|
|
- printf(" (due to lost events?)");
|
|
|
|
- printf("\n");
|
|
|
|
- }
|
|
|
|
if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
|
|
if (sched->nr_context_switch_bugs && sched->nr_timestamps) {
|
|
printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
|
|
printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
|
|
(double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
|
|
(double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0,
|