2009-09-12 09:53:05 +04:00
|
|
|
/*
|
|
|
|
* builtin-timechart.c - make an svg timechart of system activity
|
|
|
|
*
|
|
|
|
* (C) Copyright 2009 Intel Corporation
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Arjan van de Ven <arjan@linux.intel.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; version 2
|
|
|
|
* of the License.
|
|
|
|
*/
|
|
|
|
|
2013-07-11 19:28:29 +04:00
|
|
|
#include <traceevent/event-parse.h>
|
|
|
|
|
2009-09-12 09:53:05 +04:00
|
|
|
#include "builtin.h"
|
|
|
|
|
|
|
|
#include "util/util.h"
|
|
|
|
|
|
|
|
#include "util/color.h"
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include "util/cache.h"
|
2013-07-11 19:28:30 +04:00
|
|
|
#include "util/evlist.h"
|
2011-11-16 23:02:54 +04:00
|
|
|
#include "util/evsel.h"
|
2009-09-12 09:53:05 +04:00
|
|
|
#include <linux/rbtree.h>
|
2016-08-08 18:45:58 +03:00
|
|
|
#include <linux/time64.h>
|
2009-09-12 09:53:05 +04:00
|
|
|
#include "util/symbol.h"
|
|
|
|
#include "util/callchain.h"
|
|
|
|
#include "util/strlist.h"
|
|
|
|
|
|
|
|
#include "perf.h"
|
|
|
|
#include "util/header.h"
|
2015-12-15 18:39:39 +03:00
|
|
|
#include <subcmd/parse-options.h>
|
2009-09-12 09:53:05 +04:00
|
|
|
#include "util/parse-events.h"
|
2009-12-01 09:05:16 +03:00
|
|
|
#include "util/event.h"
|
2009-12-14 00:50:25 +03:00
|
|
|
#include "util/session.h"
|
2009-09-12 09:53:05 +04:00
|
|
|
#include "util/svghelper.h"
|
2011-11-28 14:30:20 +04:00
|
|
|
#include "util/tool.h"
|
2013-10-15 18:27:32 +04:00
|
|
|
#include "util/data.h"
|
2014-07-15 01:46:48 +04:00
|
|
|
#include "util/debug.h"
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2011-01-03 19:50:45 +03:00
|
|
|
#define SUPPORT_OLD_POWER_EVENTS 1
|
|
|
|
#define PWR_EVENT_EXIT -1
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
struct per_pid;
|
2013-11-28 20:23:05 +04:00
|
|
|
struct power_event;
|
2013-11-28 20:26:33 +04:00
|
|
|
struct wake_event;
|
2013-11-28 18:50:41 +04:00
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
struct timechart {
|
|
|
|
struct perf_tool tool;
|
2013-11-28 18:50:41 +04:00
|
|
|
struct per_pid *all_data;
|
2013-11-28 20:23:05 +04:00
|
|
|
struct power_event *power_events;
|
2013-11-28 20:26:33 +04:00
|
|
|
struct wake_event *wake_events;
|
2013-11-28 18:25:19 +04:00
|
|
|
int proc_num;
|
|
|
|
unsigned int numcpus;
|
|
|
|
u64 min_freq, /* Lowest CPU frequency seen */
|
|
|
|
max_freq, /* Highest CPU frequency seen */
|
|
|
|
turbo_frequency,
|
|
|
|
first_time, last_time;
|
|
|
|
bool power_only,
|
|
|
|
tasks_only,
|
2013-12-02 18:37:36 +04:00
|
|
|
with_backtrace,
|
|
|
|
topology;
|
perf tools: Elliminate alignment holes
perf_evsel:
Before:
/* size: 320, cachelines: 5, members: 35 */
/* sum members: 304, holes: 3, sum holes: 16 */
After:
/* size: 304, cachelines: 5, members: 35 */
/* last cacheline: 48 bytes */
perf_evlist:
Before:
/* size: 2544, cachelines: 40, members: 17 */
/* sum members: 2533, holes: 2, sum holes: 11 */
/* last cacheline: 48 bytes */
After:
/* size: 2536, cachelines: 40, members: 17 */
/* sum members: 2533, holes: 1, sum holes: 3 */
/* last cacheline: 40 bytes */
timechart:
Before:
/* size: 288, cachelines: 5, members: 21 */
/* sum members: 271, holes: 2, sum holes: 10 */
/* padding: 7 */
/* last cacheline: 32 bytes */
After:
/* size: 272, cachelines: 5, members: 21 */
/* sum members: 271, holes: 1, sum holes: 1 */
/* last cacheline: 16 bytes */
thread:
Before:
/* size: 112, cachelines: 2, members: 15 */
/* sum members: 101, holes: 2, sum holes: 11 */
/* last cacheline: 48 bytes */
After:
/* size: 104, cachelines: 2, members: 15 */
/* sum members: 101, holes: 1, sum holes: 3 */
/* last cacheline: 40 bytes */
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-a543w7zjl9yyrg9nkf1teukp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-05-15 23:29:56 +03:00
|
|
|
bool force;
|
2014-07-08 20:03:41 +04:00
|
|
|
/* IO related settings */
|
2014-07-08 20:03:43 +04:00
|
|
|
bool io_only,
|
|
|
|
skip_eagain;
|
perf tools: Elliminate alignment holes
perf_evsel:
Before:
/* size: 320, cachelines: 5, members: 35 */
/* sum members: 304, holes: 3, sum holes: 16 */
After:
/* size: 304, cachelines: 5, members: 35 */
/* last cacheline: 48 bytes */
perf_evlist:
Before:
/* size: 2544, cachelines: 40, members: 17 */
/* sum members: 2533, holes: 2, sum holes: 11 */
/* last cacheline: 48 bytes */
After:
/* size: 2536, cachelines: 40, members: 17 */
/* sum members: 2533, holes: 1, sum holes: 3 */
/* last cacheline: 40 bytes */
timechart:
Before:
/* size: 288, cachelines: 5, members: 21 */
/* sum members: 271, holes: 2, sum holes: 10 */
/* padding: 7 */
/* last cacheline: 32 bytes */
After:
/* size: 272, cachelines: 5, members: 21 */
/* sum members: 271, holes: 1, sum holes: 1 */
/* last cacheline: 16 bytes */
thread:
Before:
/* size: 112, cachelines: 2, members: 15 */
/* sum members: 101, holes: 2, sum holes: 11 */
/* last cacheline: 48 bytes */
After:
/* size: 104, cachelines: 2, members: 15 */
/* sum members: 101, holes: 1, sum holes: 3 */
/* last cacheline: 40 bytes */
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-a543w7zjl9yyrg9nkf1teukp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-05-15 23:29:56 +03:00
|
|
|
u64 io_events;
|
2014-07-08 20:03:43 +04:00
|
|
|
u64 min_time,
|
|
|
|
merge_dist;
|
2013-11-28 18:25:19 +04:00
|
|
|
};
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
struct per_pidcomm;
|
|
|
|
struct cpu_sample;
|
2014-07-08 20:03:41 +04:00
|
|
|
struct io_sample;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Datastructure layout:
|
|
|
|
* We keep an list of "pid"s, matching the kernels notion of a task struct.
|
|
|
|
* Each "pid" entry, has a list of "comm"s.
|
|
|
|
* this is because we want to track different programs different, while
|
|
|
|
* exec will reuse the original pid (by design).
|
|
|
|
* Each comm has a list of samples that will be used to draw
|
|
|
|
* final graph.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct per_pid {
|
|
|
|
struct per_pid *next;
|
|
|
|
|
|
|
|
int pid;
|
|
|
|
int ppid;
|
|
|
|
|
|
|
|
u64 start_time;
|
|
|
|
u64 end_time;
|
|
|
|
u64 total_time;
|
2014-07-08 20:03:41 +04:00
|
|
|
u64 total_bytes;
|
2009-09-12 09:53:05 +04:00
|
|
|
int display;
|
|
|
|
|
|
|
|
struct per_pidcomm *all;
|
|
|
|
struct per_pidcomm *current;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct per_pidcomm {
|
|
|
|
struct per_pidcomm *next;
|
|
|
|
|
|
|
|
u64 start_time;
|
|
|
|
u64 end_time;
|
|
|
|
u64 total_time;
|
2014-07-08 20:03:41 +04:00
|
|
|
u64 max_bytes;
|
|
|
|
u64 total_bytes;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
int Y;
|
|
|
|
int display;
|
|
|
|
|
|
|
|
long state;
|
|
|
|
u64 state_since;
|
|
|
|
|
|
|
|
char *comm;
|
|
|
|
|
|
|
|
struct cpu_sample *samples;
|
2014-07-08 20:03:41 +04:00
|
|
|
struct io_sample *io_samples;
|
2009-09-12 09:53:05 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct sample_wrapper {
|
|
|
|
struct sample_wrapper *next;
|
|
|
|
|
|
|
|
u64 timestamp;
|
|
|
|
unsigned char data[0];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define TYPE_NONE 0
|
|
|
|
#define TYPE_RUNNING 1
|
|
|
|
#define TYPE_WAITING 2
|
|
|
|
#define TYPE_BLOCKED 3
|
|
|
|
|
|
|
|
struct cpu_sample {
|
|
|
|
struct cpu_sample *next;
|
|
|
|
|
|
|
|
u64 start_time;
|
|
|
|
u64 end_time;
|
|
|
|
int type;
|
|
|
|
int cpu;
|
2013-11-01 20:25:51 +04:00
|
|
|
const char *backtrace;
|
2009-09-12 09:53:05 +04:00
|
|
|
};
|
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
enum {
|
|
|
|
IOTYPE_READ,
|
|
|
|
IOTYPE_WRITE,
|
|
|
|
IOTYPE_SYNC,
|
|
|
|
IOTYPE_TX,
|
|
|
|
IOTYPE_RX,
|
|
|
|
IOTYPE_POLL,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct io_sample {
|
|
|
|
struct io_sample *next;
|
|
|
|
|
|
|
|
u64 start_time;
|
|
|
|
u64 end_time;
|
|
|
|
u64 bytes;
|
|
|
|
int type;
|
|
|
|
int fd;
|
|
|
|
int err;
|
|
|
|
int merges;
|
|
|
|
};
|
|
|
|
|
2009-09-12 09:53:05 +04:00
|
|
|
#define CSTATE 1
|
|
|
|
#define PSTATE 2
|
|
|
|
|
|
|
|
struct power_event {
|
|
|
|
struct power_event *next;
|
|
|
|
int type;
|
|
|
|
int state;
|
|
|
|
u64 start_time;
|
|
|
|
u64 end_time;
|
|
|
|
int cpu;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct wake_event {
|
|
|
|
struct wake_event *next;
|
|
|
|
int waker;
|
|
|
|
int wakee;
|
|
|
|
u64 time;
|
2013-11-01 20:25:51 +04:00
|
|
|
const char *backtrace;
|
2009-09-12 09:53:05 +04:00
|
|
|
};
|
|
|
|
|
2009-10-20 02:09:39 +04:00
|
|
|
struct process_filter {
|
2009-12-01 09:05:16 +03:00
|
|
|
char *name;
|
|
|
|
int pid;
|
|
|
|
struct process_filter *next;
|
2009-10-20 02:09:39 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct process_filter *process_filter;
|
|
|
|
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
2013-11-28 18:50:41 +04:00
|
|
|
struct per_pid *cursor = tchart->all_data;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
while (cursor) {
|
|
|
|
if (cursor->pid == pid)
|
|
|
|
return cursor;
|
|
|
|
cursor = cursor->next;
|
|
|
|
}
|
2012-09-24 18:16:40 +04:00
|
|
|
cursor = zalloc(sizeof(*cursor));
|
2009-09-12 09:53:05 +04:00
|
|
|
assert(cursor != NULL);
|
|
|
|
cursor->pid = pid;
|
2013-11-28 18:50:41 +04:00
|
|
|
cursor->next = tchart->all_data;
|
|
|
|
tchart->all_data = cursor;
|
2009-09-12 09:53:05 +04:00
|
|
|
return cursor;
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
2013-11-28 18:50:41 +04:00
|
|
|
p = find_create_pid(tchart, pid);
|
2009-09-12 09:53:05 +04:00
|
|
|
c = p->all;
|
|
|
|
while (c) {
|
|
|
|
if (c->comm && strcmp(c->comm, comm) == 0) {
|
|
|
|
p->current = c;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (!c->comm) {
|
|
|
|
c->comm = strdup(comm);
|
|
|
|
p->current = c;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
c = c->next;
|
|
|
|
}
|
2012-09-24 18:16:40 +04:00
|
|
|
c = zalloc(sizeof(*c));
|
2009-09-12 09:53:05 +04:00
|
|
|
assert(c != NULL);
|
|
|
|
c->comm = strdup(comm);
|
|
|
|
p->current = c;
|
|
|
|
c->next = p->all;
|
|
|
|
p->all = c;
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct per_pid *p, *pp;
|
2013-11-28 18:50:41 +04:00
|
|
|
p = find_create_pid(tchart, pid);
|
|
|
|
pp = find_create_pid(tchart, ppid);
|
2009-09-12 09:53:05 +04:00
|
|
|
p->ppid = ppid;
|
|
|
|
if (pp->current && pp->current->comm && !p->current)
|
2013-11-28 18:50:41 +04:00
|
|
|
pid_set_comm(tchart, pid, pp->current->comm);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
p->start_time = timestamp;
|
2014-07-08 20:03:42 +04:00
|
|
|
if (p->current && !p->current->start_time) {
|
2009-09-12 09:53:05 +04:00
|
|
|
p->current->start_time = timestamp;
|
|
|
|
p->current->state_since = timestamp;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct per_pid *p;
|
2013-11-28 18:50:41 +04:00
|
|
|
p = find_create_pid(tchart, pid);
|
2009-09-12 09:53:05 +04:00
|
|
|
p->end_time = timestamp;
|
|
|
|
if (p->current)
|
|
|
|
p->current->end_time = timestamp;
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
static void pid_put_sample(struct timechart *tchart, int pid, int type,
|
|
|
|
unsigned int cpu, u64 start, u64 end,
|
|
|
|
const char *backtrace)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
struct cpu_sample *sample;
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
p = find_create_pid(tchart, pid);
|
2009-09-12 09:53:05 +04:00
|
|
|
c = p->current;
|
|
|
|
if (!c) {
|
2012-09-24 18:16:40 +04:00
|
|
|
c = zalloc(sizeof(*c));
|
2009-09-12 09:53:05 +04:00
|
|
|
assert(c != NULL);
|
|
|
|
p->current = c;
|
|
|
|
c->next = p->all;
|
|
|
|
p->all = c;
|
|
|
|
}
|
|
|
|
|
2012-09-24 18:16:40 +04:00
|
|
|
sample = zalloc(sizeof(*sample));
|
2009-09-12 09:53:05 +04:00
|
|
|
assert(sample != NULL);
|
|
|
|
sample->start_time = start;
|
|
|
|
sample->end_time = end;
|
|
|
|
sample->type = type;
|
|
|
|
sample->next = c->samples;
|
|
|
|
sample->cpu = cpu;
|
2013-11-01 20:25:51 +04:00
|
|
|
sample->backtrace = backtrace;
|
2009-09-12 09:53:05 +04:00
|
|
|
c->samples = sample;
|
|
|
|
|
|
|
|
if (sample->type == TYPE_RUNNING && end > start && start > 0) {
|
|
|
|
c->total_time += (end-start);
|
|
|
|
p->total_time += (end-start);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c->start_time == 0 || c->start_time > start)
|
|
|
|
c->start_time = start;
|
|
|
|
if (p->start_time == 0 || p->start_time > start)
|
|
|
|
p->start_time = start;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MAX_CPUS 4096
|
|
|
|
|
|
|
|
static u64 cpus_cstate_start_times[MAX_CPUS];
|
|
|
|
static int cpus_cstate_state[MAX_CPUS];
|
|
|
|
static u64 cpus_pstate_start_times[MAX_CPUS];
|
|
|
|
static u64 cpus_pstate_state[MAX_CPUS];
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
static int process_comm_event(struct perf_tool *tool,
|
2011-11-25 14:19:45 +04:00
|
|
|
union perf_event *event,
|
2012-09-11 02:15:03 +04:00
|
|
|
struct perf_sample *sample __maybe_unused,
|
|
|
|
struct machine *machine __maybe_unused)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
2013-11-28 18:50:41 +04:00
|
|
|
struct timechart *tchart = container_of(tool, struct timechart, tool);
|
|
|
|
pid_set_comm(tchart, event->comm.tid, event->comm.comm);
|
2009-09-12 09:53:05 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2009-12-14 00:50:24 +03:00
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
static int process_fork_event(struct perf_tool *tool,
|
2011-11-25 14:19:45 +04:00
|
|
|
union perf_event *event,
|
2012-09-11 02:15:03 +04:00
|
|
|
struct perf_sample *sample __maybe_unused,
|
|
|
|
struct machine *machine __maybe_unused)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
2013-11-28 18:50:41 +04:00
|
|
|
struct timechart *tchart = container_of(tool, struct timechart, tool);
|
|
|
|
pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
|
2009-09-12 09:53:05 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
static int process_exit_event(struct perf_tool *tool,
|
2011-11-25 14:19:45 +04:00
|
|
|
union perf_event *event,
|
2012-09-11 02:15:03 +04:00
|
|
|
struct perf_sample *sample __maybe_unused,
|
|
|
|
struct machine *machine __maybe_unused)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
2013-11-28 18:50:41 +04:00
|
|
|
struct timechart *tchart = container_of(tool, struct timechart, tool);
|
|
|
|
pid_exit(tchart, event->fork.pid, event->fork.time);
|
2009-09-12 09:53:05 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-01-03 19:50:45 +03:00
|
|
|
#ifdef SUPPORT_OLD_POWER_EVENTS
|
|
|
|
static int use_old_power_events;
|
|
|
|
#endif
|
|
|
|
|
2009-09-12 09:53:05 +04:00
|
|
|
static void c_state_start(int cpu, u64 timestamp, int state)
|
|
|
|
{
|
|
|
|
cpus_cstate_start_times[cpu] = timestamp;
|
|
|
|
cpus_cstate_state[cpu] = state;
|
|
|
|
}
|
|
|
|
|
2013-11-28 20:23:05 +04:00
|
|
|
static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
2012-09-24 18:16:40 +04:00
|
|
|
struct power_event *pwr = zalloc(sizeof(*pwr));
|
|
|
|
|
2009-09-12 09:53:05 +04:00
|
|
|
if (!pwr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pwr->state = cpus_cstate_state[cpu];
|
|
|
|
pwr->start_time = cpus_cstate_start_times[cpu];
|
|
|
|
pwr->end_time = timestamp;
|
|
|
|
pwr->cpu = cpu;
|
|
|
|
pwr->type = CSTATE;
|
2013-11-28 20:23:05 +04:00
|
|
|
pwr->next = tchart->power_events;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 20:23:05 +04:00
|
|
|
tchart->power_events = pwr;
|
2009-09-12 09:53:05 +04:00
|
|
|
}
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct power_event *pwr;
|
|
|
|
|
|
|
|
if (new_freq > 8000000) /* detect invalid data */
|
|
|
|
return;
|
|
|
|
|
2012-09-24 18:16:40 +04:00
|
|
|
pwr = zalloc(sizeof(*pwr));
|
2009-09-12 09:53:05 +04:00
|
|
|
if (!pwr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pwr->state = cpus_pstate_state[cpu];
|
|
|
|
pwr->start_time = cpus_pstate_start_times[cpu];
|
|
|
|
pwr->end_time = timestamp;
|
|
|
|
pwr->cpu = cpu;
|
|
|
|
pwr->type = PSTATE;
|
2013-11-28 20:23:05 +04:00
|
|
|
pwr->next = tchart->power_events;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
if (!pwr->start_time)
|
2013-11-28 18:25:19 +04:00
|
|
|
pwr->start_time = tchart->first_time;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 20:23:05 +04:00
|
|
|
tchart->power_events = pwr;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
cpus_pstate_state[cpu] = new_freq;
|
|
|
|
cpus_pstate_start_times[cpu] = timestamp;
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
if ((u64)new_freq > tchart->max_freq)
|
|
|
|
tchart->max_freq = new_freq;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
if (new_freq < tchart->min_freq || tchart->min_freq == 0)
|
|
|
|
tchart->min_freq = new_freq;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
if (new_freq == tchart->max_freq - 1000)
|
|
|
|
tchart->turbo_frequency = tchart->max_freq;
|
2009-09-12 09:53:05 +04:00
|
|
|
}
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
|
|
|
|
int waker, int wakee, u8 flags, const char *backtrace)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct per_pid *p;
|
2012-09-24 18:16:40 +04:00
|
|
|
struct wake_event *we = zalloc(sizeof(*we));
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
if (!we)
|
|
|
|
return;
|
|
|
|
|
|
|
|
we->time = timestamp;
|
2013-11-27 14:45:00 +04:00
|
|
|
we->waker = waker;
|
2013-11-01 20:25:51 +04:00
|
|
|
we->backtrace = backtrace;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-27 14:45:00 +04:00
|
|
|
if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
|
2009-09-12 09:53:05 +04:00
|
|
|
we->waker = -1;
|
|
|
|
|
2013-11-27 14:45:00 +04:00
|
|
|
we->wakee = wakee;
|
2013-11-28 20:26:33 +04:00
|
|
|
we->next = tchart->wake_events;
|
|
|
|
tchart->wake_events = we;
|
2013-11-28 18:50:41 +04:00
|
|
|
p = find_create_pid(tchart, we->wakee);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
if (p && p->current && p->current->state == TYPE_NONE) {
|
|
|
|
p->current->state_since = timestamp;
|
|
|
|
p->current->state = TYPE_WAITING;
|
|
|
|
}
|
|
|
|
if (p && p->current && p->current->state == TYPE_BLOCKED) {
|
2013-11-28 18:50:41 +04:00
|
|
|
pid_put_sample(tchart, p->pid, p->current->state, cpu,
|
2013-11-01 20:25:51 +04:00
|
|
|
p->current->state_since, timestamp, NULL);
|
2009-09-12 09:53:05 +04:00
|
|
|
p->current->state_since = timestamp;
|
|
|
|
p->current->state = TYPE_WAITING;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
|
|
|
|
int prev_pid, int next_pid, u64 prev_state,
|
|
|
|
const char *backtrace)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct per_pid *p = NULL, *prev_p;
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
prev_p = find_create_pid(tchart, prev_pid);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
p = find_create_pid(tchart, next_pid);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
if (prev_p->current && prev_p->current->state != TYPE_NONE)
|
2013-11-28 18:50:41 +04:00
|
|
|
pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
|
2013-11-01 20:25:51 +04:00
|
|
|
prev_p->current->state_since, timestamp,
|
|
|
|
backtrace);
|
2009-09-12 09:53:05 +04:00
|
|
|
if (p && p->current) {
|
|
|
|
if (p->current->state != TYPE_NONE)
|
2013-11-28 18:50:41 +04:00
|
|
|
pid_put_sample(tchart, next_pid, p->current->state, cpu,
|
2013-11-01 20:25:51 +04:00
|
|
|
p->current->state_since, timestamp,
|
|
|
|
backtrace);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2010-08-06 00:27:51 +04:00
|
|
|
p->current->state_since = timestamp;
|
|
|
|
p->current->state = TYPE_RUNNING;
|
2009-09-12 09:53:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (prev_p->current) {
|
|
|
|
prev_p->current->state = TYPE_NONE;
|
|
|
|
prev_p->current->state_since = timestamp;
|
2013-11-27 14:45:00 +04:00
|
|
|
if (prev_state & 2)
|
2009-09-12 09:53:05 +04:00
|
|
|
prev_p->current->state = TYPE_BLOCKED;
|
2013-11-27 14:45:00 +04:00
|
|
|
if (prev_state == 0)
|
2009-09-12 09:53:05 +04:00
|
|
|
prev_p->current->state = TYPE_WAITING;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-01 20:25:51 +04:00
|
|
|
static const char *cat_backtrace(union perf_event *event,
|
|
|
|
struct perf_sample *sample,
|
|
|
|
struct machine *machine)
|
|
|
|
{
|
|
|
|
struct addr_location al;
|
|
|
|
unsigned int i;
|
|
|
|
char *p = NULL;
|
|
|
|
size_t p_len;
|
|
|
|
u8 cpumode = PERF_RECORD_MISC_USER;
|
|
|
|
struct addr_location tal;
|
|
|
|
struct ip_callchain *chain = sample->callchain;
|
|
|
|
FILE *f = open_memstream(&p, &p_len);
|
|
|
|
|
|
|
|
if (!f) {
|
|
|
|
perror("open_memstream error");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!chain)
|
|
|
|
goto exit;
|
|
|
|
|
2016-03-23 00:39:09 +03:00
|
|
|
if (machine__resolve(machine, &al, sample) < 0) {
|
2013-11-01 20:25:51 +04:00
|
|
|
fprintf(stderr, "problem processing %d event, skipping it.\n",
|
|
|
|
event->header.type);
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < chain->nr; i++) {
|
|
|
|
u64 ip;
|
|
|
|
|
|
|
|
if (callchain_param.order == ORDER_CALLEE)
|
|
|
|
ip = chain->ips[i];
|
|
|
|
else
|
|
|
|
ip = chain->ips[chain->nr - i - 1];
|
|
|
|
|
|
|
|
if (ip >= PERF_CONTEXT_MAX) {
|
|
|
|
switch (ip) {
|
|
|
|
case PERF_CONTEXT_HV:
|
|
|
|
cpumode = PERF_RECORD_MISC_HYPERVISOR;
|
|
|
|
break;
|
|
|
|
case PERF_CONTEXT_KERNEL:
|
|
|
|
cpumode = PERF_RECORD_MISC_KERNEL;
|
|
|
|
break;
|
|
|
|
case PERF_CONTEXT_USER:
|
|
|
|
cpumode = PERF_RECORD_MISC_USER;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
pr_debug("invalid callchain context: "
|
|
|
|
"%"PRId64"\n", (s64) ip);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It seems the callchain is corrupted.
|
|
|
|
* Discard all.
|
|
|
|
*/
|
2013-12-27 00:41:15 +04:00
|
|
|
zfree(&p);
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 02:43:22 +03:00
|
|
|
goto exit_put;
|
2013-11-01 20:25:51 +04:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-03-17 23:59:21 +04:00
|
|
|
tal.filtered = 0;
|
2014-10-23 19:50:25 +04:00
|
|
|
thread__find_addr_location(al.thread, cpumode,
|
2013-11-01 20:25:51 +04:00
|
|
|
MAP__FUNCTION, ip, &tal);
|
|
|
|
|
|
|
|
if (tal.sym)
|
|
|
|
fprintf(f, "..... %016" PRIx64 " %s\n", ip,
|
|
|
|
tal.sym->name);
|
|
|
|
else
|
|
|
|
fprintf(f, "..... %016" PRIx64 "\n", ip);
|
|
|
|
}
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 02:43:22 +03:00
|
|
|
exit_put:
|
|
|
|
addr_location__put(&al);
|
2013-11-01 20:25:51 +04:00
|
|
|
exit:
|
|
|
|
fclose(f);
|
|
|
|
|
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
typedef int (*tracepoint_handler)(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
2013-11-01 20:25:51 +04:00
|
|
|
struct perf_sample *sample,
|
|
|
|
const char *backtrace);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
static int process_sample_event(struct perf_tool *tool,
|
2013-11-27 23:32:56 +04:00
|
|
|
union perf_event *event,
|
2011-01-29 18:02:00 +03:00
|
|
|
struct perf_sample *sample,
|
2011-11-16 23:02:54 +04:00
|
|
|
struct perf_evsel *evsel,
|
2013-11-28 18:25:19 +04:00
|
|
|
struct machine *machine)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
2013-11-28 18:25:19 +04:00
|
|
|
struct timechart *tchart = container_of(tool, struct timechart, tool);
|
|
|
|
|
2011-11-16 23:02:54 +04:00
|
|
|
if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
|
2013-11-28 18:25:19 +04:00
|
|
|
if (!tchart->first_time || tchart->first_time > sample->time)
|
|
|
|
tchart->first_time = sample->time;
|
|
|
|
if (tchart->last_time < sample->time)
|
|
|
|
tchart->last_time = sample->time;
|
2009-09-12 09:53:05 +04:00
|
|
|
}
|
2009-12-06 14:08:24 +03:00
|
|
|
|
2013-11-06 17:17:38 +04:00
|
|
|
if (evsel->handler != NULL) {
|
|
|
|
tracepoint_handler f = evsel->handler;
|
2013-12-02 18:37:35 +04:00
|
|
|
return f(tchart, evsel, sample,
|
|
|
|
cat_backtrace(event, sample, machine));
|
2013-07-11 19:28:30 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2013-11-28 18:25:19 +04:00
|
|
|
process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
|
|
|
|
struct perf_evsel *evsel,
|
2013-11-01 20:25:51 +04:00
|
|
|
struct perf_sample *sample,
|
|
|
|
const char *backtrace __maybe_unused)
|
2013-07-11 19:28:30 +04:00
|
|
|
{
|
2013-11-27 14:45:00 +04:00
|
|
|
u32 state = perf_evsel__intval(evsel, sample, "state");
|
|
|
|
u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
|
2013-07-11 19:28:30 +04:00
|
|
|
|
2013-11-27 14:45:00 +04:00
|
|
|
if (state == (u32)PWR_EVENT_EXIT)
|
2013-11-28 20:23:05 +04:00
|
|
|
c_state_end(tchart, cpu_id, sample->time);
|
2013-07-11 19:28:30 +04:00
|
|
|
else
|
2013-11-27 14:45:00 +04:00
|
|
|
c_state_start(cpu_id, sample->time, state);
|
2013-07-11 19:28:30 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2013-11-28 18:25:19 +04:00
|
|
|
process_sample_cpu_frequency(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
2013-11-01 20:25:51 +04:00
|
|
|
struct perf_sample *sample,
|
|
|
|
const char *backtrace __maybe_unused)
|
2013-07-11 19:28:30 +04:00
|
|
|
{
|
2013-11-27 14:45:00 +04:00
|
|
|
u32 state = perf_evsel__intval(evsel, sample, "state");
|
|
|
|
u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
|
2013-07-11 19:28:30 +04:00
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
p_state_change(tchart, cpu_id, sample->time, state);
|
2013-07-11 19:28:30 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2013-11-28 18:50:41 +04:00
|
|
|
process_sample_sched_wakeup(struct timechart *tchart,
|
2013-11-28 18:25:19 +04:00
|
|
|
struct perf_evsel *evsel,
|
2013-11-01 20:25:51 +04:00
|
|
|
struct perf_sample *sample,
|
|
|
|
const char *backtrace)
|
2013-07-11 19:28:30 +04:00
|
|
|
{
|
2013-11-27 14:45:00 +04:00
|
|
|
u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
|
|
|
|
int waker = perf_evsel__intval(evsel, sample, "common_pid");
|
|
|
|
int wakee = perf_evsel__intval(evsel, sample, "pid");
|
2013-07-11 19:28:30 +04:00
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
|
2013-07-11 19:28:30 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-07-11 19:28:30 +04:00
|
|
|
static int
|
2013-11-28 18:50:41 +04:00
|
|
|
process_sample_sched_switch(struct timechart *tchart,
|
2013-11-28 18:25:19 +04:00
|
|
|
struct perf_evsel *evsel,
|
2013-11-01 20:25:51 +04:00
|
|
|
struct perf_sample *sample,
|
|
|
|
const char *backtrace)
|
2013-07-11 19:28:30 +04:00
|
|
|
{
|
2013-11-27 14:45:00 +04:00
|
|
|
int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
|
|
|
|
int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
|
|
|
|
u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
|
|
|
|
prev_state, backtrace);
|
2013-07-11 19:28:30 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2011-01-03 19:50:45 +03:00
|
|
|
|
|
|
|
#ifdef SUPPORT_OLD_POWER_EVENTS
|
2013-07-11 19:28:30 +04:00
|
|
|
static int
|
2013-11-28 18:25:19 +04:00
|
|
|
process_sample_power_start(struct timechart *tchart __maybe_unused,
|
|
|
|
struct perf_evsel *evsel,
|
2013-11-01 20:25:51 +04:00
|
|
|
struct perf_sample *sample,
|
|
|
|
const char *backtrace __maybe_unused)
|
2013-07-11 19:28:30 +04:00
|
|
|
{
|
2013-11-27 14:45:00 +04:00
|
|
|
u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
|
|
|
|
u64 value = perf_evsel__intval(evsel, sample, "value");
|
2013-07-11 19:28:30 +04:00
|
|
|
|
2013-11-27 14:45:00 +04:00
|
|
|
c_state_start(cpu_id, sample->time, value);
|
2013-07-11 19:28:30 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2013-11-28 20:23:05 +04:00
|
|
|
process_sample_power_end(struct timechart *tchart,
|
2013-11-28 18:25:19 +04:00
|
|
|
struct perf_evsel *evsel __maybe_unused,
|
2013-11-01 20:25:51 +04:00
|
|
|
struct perf_sample *sample,
|
|
|
|
const char *backtrace __maybe_unused)
|
2013-07-11 19:28:30 +04:00
|
|
|
{
|
2013-11-28 20:23:05 +04:00
|
|
|
c_state_end(tchart, sample->cpu, sample->time);
|
2013-07-11 19:28:30 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2013-11-28 18:25:19 +04:00
|
|
|
process_sample_power_frequency(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
2013-11-01 20:25:51 +04:00
|
|
|
struct perf_sample *sample,
|
|
|
|
const char *backtrace __maybe_unused)
|
2013-07-11 19:28:30 +04:00
|
|
|
{
|
2013-11-27 14:45:00 +04:00
|
|
|
u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
|
|
|
|
u64 value = perf_evsel__intval(evsel, sample, "value");
|
2013-07-11 19:28:30 +04:00
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
p_state_change(tchart, cpu_id, sample->time, value);
|
2009-09-12 09:53:05 +04:00
|
|
|
return 0;
|
|
|
|
}
|
2013-07-11 19:28:30 +04:00
|
|
|
#endif /* SUPPORT_OLD_POWER_EVENTS */
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* After the last sample we need to wrap up the current C/P state
|
|
|
|
* and close out each CPU for these.
|
|
|
|
*/
|
2013-11-28 18:25:19 +04:00
|
|
|
static void end_sample_processing(struct timechart *tchart)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
u64 cpu;
|
|
|
|
struct power_event *pwr;
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
|
2012-09-24 18:16:40 +04:00
|
|
|
/* C state */
|
|
|
|
#if 0
|
|
|
|
pwr = zalloc(sizeof(*pwr));
|
2009-09-12 09:53:05 +04:00
|
|
|
if (!pwr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pwr->state = cpus_cstate_state[cpu];
|
|
|
|
pwr->start_time = cpus_cstate_start_times[cpu];
|
2013-11-28 18:25:19 +04:00
|
|
|
pwr->end_time = tchart->last_time;
|
2009-09-12 09:53:05 +04:00
|
|
|
pwr->cpu = cpu;
|
|
|
|
pwr->type = CSTATE;
|
2013-11-28 20:23:05 +04:00
|
|
|
pwr->next = tchart->power_events;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 20:23:05 +04:00
|
|
|
tchart->power_events = pwr;
|
2009-09-12 09:53:05 +04:00
|
|
|
#endif
|
|
|
|
/* P state */
|
|
|
|
|
2012-09-24 18:16:40 +04:00
|
|
|
pwr = zalloc(sizeof(*pwr));
|
2009-09-12 09:53:05 +04:00
|
|
|
if (!pwr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pwr->state = cpus_pstate_state[cpu];
|
|
|
|
pwr->start_time = cpus_pstate_start_times[cpu];
|
2013-11-28 18:25:19 +04:00
|
|
|
pwr->end_time = tchart->last_time;
|
2009-09-12 09:53:05 +04:00
|
|
|
pwr->cpu = cpu;
|
|
|
|
pwr->type = PSTATE;
|
2013-11-28 20:23:05 +04:00
|
|
|
pwr->next = tchart->power_events;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
if (!pwr->start_time)
|
2013-11-28 18:25:19 +04:00
|
|
|
pwr->start_time = tchart->first_time;
|
2009-09-12 09:53:05 +04:00
|
|
|
if (!pwr->state)
|
2013-11-28 18:25:19 +04:00
|
|
|
pwr->state = tchart->min_freq;
|
2013-11-28 20:23:05 +04:00
|
|
|
tchart->power_events = pwr;
|
2009-09-12 09:53:05 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
|
|
|
|
u64 start, int fd)
|
|
|
|
{
|
|
|
|
struct per_pid *p = find_create_pid(tchart, pid);
|
|
|
|
struct per_pidcomm *c = p->current;
|
|
|
|
struct io_sample *sample;
|
|
|
|
struct io_sample *prev;
|
|
|
|
|
|
|
|
if (!c) {
|
|
|
|
c = zalloc(sizeof(*c));
|
|
|
|
if (!c)
|
|
|
|
return -ENOMEM;
|
|
|
|
p->current = c;
|
|
|
|
c->next = p->all;
|
|
|
|
p->all = c;
|
|
|
|
}
|
|
|
|
|
|
|
|
prev = c->io_samples;
|
|
|
|
|
|
|
|
if (prev && prev->start_time && !prev->end_time) {
|
|
|
|
pr_warning("Skip invalid start event: "
|
|
|
|
"previous event already started!\n");
|
|
|
|
|
|
|
|
/* remove previous event that has been started,
|
|
|
|
* we are not sure we will ever get an end for it */
|
|
|
|
c->io_samples = prev->next;
|
|
|
|
free(prev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
sample = zalloc(sizeof(*sample));
|
|
|
|
if (!sample)
|
|
|
|
return -ENOMEM;
|
|
|
|
sample->start_time = start;
|
|
|
|
sample->type = type;
|
|
|
|
sample->fd = fd;
|
|
|
|
sample->next = c->io_samples;
|
|
|
|
c->io_samples = sample;
|
|
|
|
|
|
|
|
if (c->start_time == 0 || c->start_time > start)
|
|
|
|
c->start_time = start;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
|
|
|
|
u64 end, long ret)
|
|
|
|
{
|
|
|
|
struct per_pid *p = find_create_pid(tchart, pid);
|
|
|
|
struct per_pidcomm *c = p->current;
|
2014-07-08 20:03:43 +04:00
|
|
|
struct io_sample *sample, *prev;
|
2014-07-08 20:03:41 +04:00
|
|
|
|
|
|
|
if (!c) {
|
|
|
|
pr_warning("Invalid pidcomm!\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
sample = c->io_samples;
|
|
|
|
|
|
|
|
if (!sample) /* skip partially captured events */
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (sample->end_time) {
|
|
|
|
pr_warning("Skip invalid end event: "
|
|
|
|
"previous event already ended!\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sample->type != type) {
|
|
|
|
pr_warning("Skip invalid end event: invalid event type!\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
sample->end_time = end;
|
2014-07-08 20:03:43 +04:00
|
|
|
prev = sample->next;
|
|
|
|
|
|
|
|
/* we want to be able to see small and fast transfers, so make them
|
|
|
|
* at least min_time long, but don't overlap them */
|
|
|
|
if (sample->end_time - sample->start_time < tchart->min_time)
|
|
|
|
sample->end_time = sample->start_time + tchart->min_time;
|
|
|
|
if (prev && sample->start_time < prev->end_time) {
|
|
|
|
if (prev->err) /* try to make errors more visible */
|
|
|
|
sample->start_time = prev->end_time;
|
|
|
|
else
|
|
|
|
prev->end_time = sample->start_time;
|
|
|
|
}
|
2014-07-08 20:03:41 +04:00
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
sample->err = ret;
|
|
|
|
} else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
|
|
|
|
type == IOTYPE_TX || type == IOTYPE_RX) {
|
|
|
|
|
|
|
|
if ((u64)ret > c->max_bytes)
|
|
|
|
c->max_bytes = ret;
|
|
|
|
|
|
|
|
c->total_bytes += ret;
|
|
|
|
p->total_bytes += ret;
|
|
|
|
sample->bytes = ret;
|
|
|
|
}
|
|
|
|
|
2014-07-08 20:03:43 +04:00
|
|
|
/* merge two requests to make svg smaller and render-friendly */
|
|
|
|
if (prev &&
|
|
|
|
prev->type == sample->type &&
|
|
|
|
prev->err == sample->err &&
|
|
|
|
prev->fd == sample->fd &&
|
|
|
|
prev->end_time + tchart->merge_dist >= sample->start_time) {
|
|
|
|
|
|
|
|
sample->bytes += prev->bytes;
|
|
|
|
sample->merges += prev->merges + 1;
|
|
|
|
|
|
|
|
sample->start_time = prev->start_time;
|
|
|
|
sample->next = prev->next;
|
|
|
|
free(prev);
|
|
|
|
|
|
|
|
if (!sample->err && sample->bytes > c->max_bytes)
|
|
|
|
c->max_bytes = sample->bytes;
|
|
|
|
}
|
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
tchart->io_events++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_enter_read(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
long fd = perf_evsel__intval(evsel, sample, "fd");
|
|
|
|
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
|
|
|
|
sample->time, fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_exit_read(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
long ret = perf_evsel__intval(evsel, sample, "ret");
|
|
|
|
return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
|
|
|
|
sample->time, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_enter_write(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
long fd = perf_evsel__intval(evsel, sample, "fd");
|
|
|
|
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
|
|
|
|
sample->time, fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_exit_write(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
long ret = perf_evsel__intval(evsel, sample, "ret");
|
|
|
|
return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
|
|
|
|
sample->time, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_enter_sync(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
long fd = perf_evsel__intval(evsel, sample, "fd");
|
|
|
|
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
|
|
|
|
sample->time, fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_exit_sync(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
long ret = perf_evsel__intval(evsel, sample, "ret");
|
|
|
|
return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
|
|
|
|
sample->time, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_enter_tx(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
long fd = perf_evsel__intval(evsel, sample, "fd");
|
|
|
|
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
|
|
|
|
sample->time, fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_exit_tx(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
long ret = perf_evsel__intval(evsel, sample, "ret");
|
|
|
|
return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
|
|
|
|
sample->time, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_enter_rx(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
long fd = perf_evsel__intval(evsel, sample, "fd");
|
|
|
|
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
|
|
|
|
sample->time, fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_exit_rx(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
long ret = perf_evsel__intval(evsel, sample, "ret");
|
|
|
|
return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
|
|
|
|
sample->time, ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_enter_poll(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
long fd = perf_evsel__intval(evsel, sample, "fd");
|
|
|
|
return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
|
|
|
|
sample->time, fd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
process_exit_poll(struct timechart *tchart,
|
|
|
|
struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample)
|
|
|
|
{
|
|
|
|
long ret = perf_evsel__intval(evsel, sample, "ret");
|
|
|
|
return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
|
|
|
|
sample->time, ret);
|
|
|
|
}
|
|
|
|
|
2009-09-12 09:53:05 +04:00
|
|
|
/*
|
|
|
|
* Sort the pid datastructure
|
|
|
|
*/
|
2013-11-28 18:50:41 +04:00
|
|
|
static void sort_pids(struct timechart *tchart)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct per_pid *new_list, *p, *cursor, *prev;
|
|
|
|
/* sort by ppid first, then by pid, lowest to highest */
|
|
|
|
|
|
|
|
new_list = NULL;
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
while (tchart->all_data) {
|
|
|
|
p = tchart->all_data;
|
|
|
|
tchart->all_data = p->next;
|
2009-09-12 09:53:05 +04:00
|
|
|
p->next = NULL;
|
|
|
|
|
|
|
|
if (new_list == NULL) {
|
|
|
|
new_list = p;
|
|
|
|
p->next = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
prev = NULL;
|
|
|
|
cursor = new_list;
|
|
|
|
while (cursor) {
|
|
|
|
if (cursor->ppid > p->ppid ||
|
|
|
|
(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
|
|
|
|
/* must insert before */
|
|
|
|
if (prev) {
|
|
|
|
p->next = prev->next;
|
|
|
|
prev->next = p;
|
|
|
|
cursor = NULL;
|
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
p->next = new_list;
|
|
|
|
new_list = p;
|
|
|
|
cursor = NULL;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
prev = cursor;
|
|
|
|
cursor = cursor->next;
|
|
|
|
if (!cursor)
|
|
|
|
prev->next = p;
|
|
|
|
}
|
|
|
|
}
|
2013-11-28 18:50:41 +04:00
|
|
|
tchart->all_data = new_list;
|
2009-09-12 09:53:05 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
static void draw_c_p_states(struct timechart *tchart)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct power_event *pwr;
|
2013-11-28 20:23:05 +04:00
|
|
|
pwr = tchart->power_events;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* two pass drawing so that the P state bars are on top of the C state blocks
|
|
|
|
*/
|
|
|
|
while (pwr) {
|
|
|
|
if (pwr->type == CSTATE)
|
|
|
|
svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
|
|
|
|
pwr = pwr->next;
|
|
|
|
}
|
|
|
|
|
2013-11-28 20:23:05 +04:00
|
|
|
pwr = tchart->power_events;
|
2009-09-12 09:53:05 +04:00
|
|
|
while (pwr) {
|
|
|
|
if (pwr->type == PSTATE) {
|
|
|
|
if (!pwr->state)
|
2013-11-28 18:25:19 +04:00
|
|
|
pwr->state = tchart->min_freq;
|
2009-09-12 09:53:05 +04:00
|
|
|
svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
|
|
|
|
}
|
|
|
|
pwr = pwr->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
static void draw_wakeups(struct timechart *tchart)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct wake_event *we;
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
|
2013-11-28 20:26:33 +04:00
|
|
|
we = tchart->wake_events;
|
2009-09-12 09:53:05 +04:00
|
|
|
while (we) {
|
|
|
|
int from = 0, to = 0;
|
2009-09-20 20:13:28 +04:00
|
|
|
char *task_from = NULL, *task_to = NULL;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
/* locate the column of the waker and wakee */
|
2013-11-28 18:50:41 +04:00
|
|
|
p = tchart->all_data;
|
2009-09-12 09:53:05 +04:00
|
|
|
while (p) {
|
|
|
|
if (p->pid == we->waker || p->pid == we->wakee) {
|
|
|
|
c = p->all;
|
|
|
|
while (c) {
|
|
|
|
if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
|
2009-10-20 02:09:39 +04:00
|
|
|
if (p->pid == we->waker && !from) {
|
2009-09-12 09:53:05 +04:00
|
|
|
from = c->Y;
|
2009-10-20 01:46:49 +04:00
|
|
|
task_from = strdup(c->comm);
|
2009-09-20 20:13:28 +04:00
|
|
|
}
|
2009-10-20 02:09:39 +04:00
|
|
|
if (p->pid == we->wakee && !to) {
|
2009-09-12 09:53:05 +04:00
|
|
|
to = c->Y;
|
2009-10-20 01:46:49 +04:00
|
|
|
task_to = strdup(c->comm);
|
2009-09-20 20:13:28 +04:00
|
|
|
}
|
2009-09-12 09:53:05 +04:00
|
|
|
}
|
|
|
|
c = c->next;
|
|
|
|
}
|
2009-10-20 01:46:49 +04:00
|
|
|
c = p->all;
|
|
|
|
while (c) {
|
|
|
|
if (p->pid == we->waker && !from) {
|
|
|
|
from = c->Y;
|
|
|
|
task_from = strdup(c->comm);
|
|
|
|
}
|
|
|
|
if (p->pid == we->wakee && !to) {
|
|
|
|
to = c->Y;
|
|
|
|
task_to = strdup(c->comm);
|
|
|
|
}
|
|
|
|
c = c->next;
|
|
|
|
}
|
2009-09-12 09:53:05 +04:00
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
|
2009-10-20 01:46:49 +04:00
|
|
|
if (!task_from) {
|
|
|
|
task_from = malloc(40);
|
|
|
|
sprintf(task_from, "[%i]", we->waker);
|
|
|
|
}
|
|
|
|
if (!task_to) {
|
|
|
|
task_to = malloc(40);
|
|
|
|
sprintf(task_to, "[%i]", we->wakee);
|
|
|
|
}
|
|
|
|
|
2009-09-12 09:53:05 +04:00
|
|
|
if (we->waker == -1)
|
2013-11-01 20:25:51 +04:00
|
|
|
svg_interrupt(we->time, to, we->backtrace);
|
2009-09-12 09:53:05 +04:00
|
|
|
else if (from && to && abs(from - to) == 1)
|
2013-11-01 20:25:51 +04:00
|
|
|
svg_wakeline(we->time, from, to, we->backtrace);
|
2009-09-12 09:53:05 +04:00
|
|
|
else
|
2013-11-01 20:25:51 +04:00
|
|
|
svg_partial_wakeline(we->time, from, task_from, to,
|
|
|
|
task_to, we->backtrace);
|
2009-09-12 09:53:05 +04:00
|
|
|
we = we->next;
|
2009-10-20 01:46:49 +04:00
|
|
|
|
|
|
|
free(task_from);
|
|
|
|
free(task_to);
|
2009-09-12 09:53:05 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
static void draw_cpu_usage(struct timechart *tchart)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
struct cpu_sample *sample;
|
2013-11-28 18:50:41 +04:00
|
|
|
p = tchart->all_data;
|
2009-09-12 09:53:05 +04:00
|
|
|
while (p) {
|
|
|
|
c = p->all;
|
|
|
|
while (c) {
|
|
|
|
sample = c->samples;
|
|
|
|
while (sample) {
|
2013-12-02 18:37:33 +04:00
|
|
|
if (sample->type == TYPE_RUNNING) {
|
|
|
|
svg_process(sample->cpu,
|
|
|
|
sample->start_time,
|
|
|
|
sample->end_time,
|
2013-12-02 18:37:34 +04:00
|
|
|
p->pid,
|
2013-12-02 18:37:33 +04:00
|
|
|
c->comm,
|
|
|
|
sample->backtrace);
|
|
|
|
}
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
sample = sample->next;
|
|
|
|
}
|
|
|
|
c = c->next;
|
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
static void draw_io_bars(struct timechart *tchart)
|
|
|
|
{
|
|
|
|
const char *suf;
|
|
|
|
double bytes;
|
|
|
|
char comm[256];
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
struct io_sample *sample;
|
|
|
|
int Y = 1;
|
|
|
|
|
|
|
|
p = tchart->all_data;
|
|
|
|
while (p) {
|
|
|
|
c = p->all;
|
|
|
|
while (c) {
|
|
|
|
if (!c->display) {
|
|
|
|
c->Y = 0;
|
|
|
|
c = c->next;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
svg_box(Y, c->start_time, c->end_time, "process3");
|
|
|
|
sample = c->io_samples;
|
|
|
|
for (sample = c->io_samples; sample; sample = sample->next) {
|
|
|
|
double h = (double)sample->bytes / c->max_bytes;
|
|
|
|
|
2014-07-08 20:03:43 +04:00
|
|
|
if (tchart->skip_eagain &&
|
|
|
|
sample->err == -EAGAIN)
|
|
|
|
continue;
|
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
if (sample->err)
|
|
|
|
h = 1;
|
|
|
|
|
|
|
|
if (sample->type == IOTYPE_SYNC)
|
|
|
|
svg_fbox(Y,
|
|
|
|
sample->start_time,
|
|
|
|
sample->end_time,
|
|
|
|
1,
|
|
|
|
sample->err ? "error" : "sync",
|
|
|
|
sample->fd,
|
|
|
|
sample->err,
|
|
|
|
sample->merges);
|
|
|
|
else if (sample->type == IOTYPE_POLL)
|
|
|
|
svg_fbox(Y,
|
|
|
|
sample->start_time,
|
|
|
|
sample->end_time,
|
|
|
|
1,
|
|
|
|
sample->err ? "error" : "poll",
|
|
|
|
sample->fd,
|
|
|
|
sample->err,
|
|
|
|
sample->merges);
|
|
|
|
else if (sample->type == IOTYPE_READ)
|
|
|
|
svg_ubox(Y,
|
|
|
|
sample->start_time,
|
|
|
|
sample->end_time,
|
|
|
|
h,
|
|
|
|
sample->err ? "error" : "disk",
|
|
|
|
sample->fd,
|
|
|
|
sample->err,
|
|
|
|
sample->merges);
|
|
|
|
else if (sample->type == IOTYPE_WRITE)
|
|
|
|
svg_lbox(Y,
|
|
|
|
sample->start_time,
|
|
|
|
sample->end_time,
|
|
|
|
h,
|
|
|
|
sample->err ? "error" : "disk",
|
|
|
|
sample->fd,
|
|
|
|
sample->err,
|
|
|
|
sample->merges);
|
|
|
|
else if (sample->type == IOTYPE_RX)
|
|
|
|
svg_ubox(Y,
|
|
|
|
sample->start_time,
|
|
|
|
sample->end_time,
|
|
|
|
h,
|
|
|
|
sample->err ? "error" : "net",
|
|
|
|
sample->fd,
|
|
|
|
sample->err,
|
|
|
|
sample->merges);
|
|
|
|
else if (sample->type == IOTYPE_TX)
|
|
|
|
svg_lbox(Y,
|
|
|
|
sample->start_time,
|
|
|
|
sample->end_time,
|
|
|
|
h,
|
|
|
|
sample->err ? "error" : "net",
|
|
|
|
sample->fd,
|
|
|
|
sample->err,
|
|
|
|
sample->merges);
|
|
|
|
}
|
|
|
|
|
|
|
|
suf = "";
|
|
|
|
bytes = c->total_bytes;
|
|
|
|
if (bytes > 1024) {
|
|
|
|
bytes = bytes / 1024;
|
|
|
|
suf = "K";
|
|
|
|
}
|
|
|
|
if (bytes > 1024) {
|
|
|
|
bytes = bytes / 1024;
|
|
|
|
suf = "M";
|
|
|
|
}
|
|
|
|
if (bytes > 1024) {
|
|
|
|
bytes = bytes / 1024;
|
|
|
|
suf = "G";
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
|
|
|
|
svg_text(Y, c->start_time, comm);
|
|
|
|
|
|
|
|
c->Y = Y;
|
|
|
|
Y++;
|
|
|
|
c = c->next;
|
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
static void draw_process_bars(struct timechart *tchart)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
struct cpu_sample *sample;
|
|
|
|
int Y = 0;
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
Y = 2 * tchart->numcpus + 2;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
p = tchart->all_data;
|
2009-09-12 09:53:05 +04:00
|
|
|
while (p) {
|
|
|
|
c = p->all;
|
|
|
|
while (c) {
|
|
|
|
if (!c->display) {
|
|
|
|
c->Y = 0;
|
|
|
|
c = c->next;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-09-20 20:13:53 +04:00
|
|
|
svg_box(Y, c->start_time, c->end_time, "process");
|
2009-09-12 09:53:05 +04:00
|
|
|
sample = c->samples;
|
|
|
|
while (sample) {
|
|
|
|
if (sample->type == TYPE_RUNNING)
|
2013-11-01 20:25:51 +04:00
|
|
|
svg_running(Y, sample->cpu,
|
|
|
|
sample->start_time,
|
|
|
|
sample->end_time,
|
|
|
|
sample->backtrace);
|
2009-09-12 09:53:05 +04:00
|
|
|
if (sample->type == TYPE_BLOCKED)
|
2013-11-01 20:25:51 +04:00
|
|
|
svg_blocked(Y, sample->cpu,
|
|
|
|
sample->start_time,
|
|
|
|
sample->end_time,
|
|
|
|
sample->backtrace);
|
2009-09-12 09:53:05 +04:00
|
|
|
if (sample->type == TYPE_WAITING)
|
2013-11-01 20:25:51 +04:00
|
|
|
svg_waiting(Y, sample->cpu,
|
|
|
|
sample->start_time,
|
|
|
|
sample->end_time,
|
|
|
|
sample->backtrace);
|
2009-09-12 09:53:05 +04:00
|
|
|
sample = sample->next;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c->comm) {
|
|
|
|
char comm[256];
|
|
|
|
if (c->total_time > 5000000000) /* 5 seconds */
|
2016-08-08 18:45:58 +03:00
|
|
|
sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC);
|
2009-09-12 09:53:05 +04:00
|
|
|
else
|
2016-08-08 18:45:58 +03:00
|
|
|
sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
svg_text(Y, c->start_time, comm);
|
|
|
|
}
|
|
|
|
c->Y = Y;
|
|
|
|
Y++;
|
|
|
|
c = c->next;
|
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-10-20 02:09:39 +04:00
|
|
|
static void add_process_filter(const char *string)
|
|
|
|
{
|
2012-09-24 18:16:40 +04:00
|
|
|
int pid = strtoull(string, NULL, 10);
|
|
|
|
struct process_filter *filt = malloc(sizeof(*filt));
|
2009-10-20 02:09:39 +04:00
|
|
|
|
|
|
|
if (!filt)
|
|
|
|
return;
|
|
|
|
|
|
|
|
filt->name = strdup(string);
|
|
|
|
filt->pid = pid;
|
|
|
|
filt->next = process_filter;
|
|
|
|
|
|
|
|
process_filter = filt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
|
|
|
|
{
|
|
|
|
struct process_filter *filt;
|
|
|
|
if (!process_filter)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
filt = process_filter;
|
|
|
|
while (filt) {
|
|
|
|
if (filt->pid && p->pid == filt->pid)
|
|
|
|
return 1;
|
|
|
|
if (strcmp(filt->name, c->comm) == 0)
|
|
|
|
return 1;
|
|
|
|
filt = filt->next;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
static int determine_display_tasks_filtered(struct timechart *tchart)
|
2009-10-20 02:09:39 +04:00
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
int count = 0;
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
p = tchart->all_data;
|
2009-10-20 02:09:39 +04:00
|
|
|
while (p) {
|
|
|
|
p->display = 0;
|
|
|
|
if (p->start_time == 1)
|
2013-11-28 18:25:19 +04:00
|
|
|
p->start_time = tchart->first_time;
|
2009-10-20 02:09:39 +04:00
|
|
|
|
|
|
|
/* no exit marker, task kept running to the end */
|
|
|
|
if (p->end_time == 0)
|
2013-11-28 18:25:19 +04:00
|
|
|
p->end_time = tchart->last_time;
|
2009-10-20 02:09:39 +04:00
|
|
|
|
|
|
|
c = p->all;
|
|
|
|
|
|
|
|
while (c) {
|
|
|
|
c->display = 0;
|
|
|
|
|
|
|
|
if (c->start_time == 1)
|
2013-11-28 18:25:19 +04:00
|
|
|
c->start_time = tchart->first_time;
|
2009-10-20 02:09:39 +04:00
|
|
|
|
|
|
|
if (passes_filter(p, c)) {
|
|
|
|
c->display = 1;
|
|
|
|
p->display = 1;
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c->end_time == 0)
|
2013-11-28 18:25:19 +04:00
|
|
|
c->end_time = tchart->last_time;
|
2009-10-20 02:09:39 +04:00
|
|
|
|
|
|
|
c = c->next;
|
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
static int determine_display_tasks(struct timechart *tchart, u64 threshold)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
int count = 0;
|
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
p = tchart->all_data;
|
2009-09-12 09:53:05 +04:00
|
|
|
while (p) {
|
|
|
|
p->display = 0;
|
|
|
|
if (p->start_time == 1)
|
2013-11-28 18:25:19 +04:00
|
|
|
p->start_time = tchart->first_time;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
/* no exit marker, task kept running to the end */
|
|
|
|
if (p->end_time == 0)
|
2013-11-28 18:25:19 +04:00
|
|
|
p->end_time = tchart->last_time;
|
2013-11-01 20:25:47 +04:00
|
|
|
if (p->total_time >= threshold)
|
2009-09-12 09:53:05 +04:00
|
|
|
p->display = 1;
|
|
|
|
|
|
|
|
c = p->all;
|
|
|
|
|
|
|
|
while (c) {
|
|
|
|
c->display = 0;
|
|
|
|
|
|
|
|
if (c->start_time == 1)
|
2013-11-28 18:25:19 +04:00
|
|
|
c->start_time = tchart->first_time;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-01 20:25:47 +04:00
|
|
|
if (c->total_time >= threshold) {
|
2009-09-12 09:53:05 +04:00
|
|
|
c->display = 1;
|
|
|
|
count++;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (c->end_time == 0)
|
2013-11-28 18:25:19 +04:00
|
|
|
c->end_time = tchart->last_time;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
c = c->next;
|
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
|
|
|
|
{
|
|
|
|
struct per_pid *p;
|
|
|
|
struct per_pidcomm *c;
|
|
|
|
int count = 0;
|
|
|
|
|
|
|
|
p = timechart->all_data;
|
|
|
|
while (p) {
|
|
|
|
/* no exit marker, task kept running to the end */
|
|
|
|
if (p->end_time == 0)
|
|
|
|
p->end_time = timechart->last_time;
|
|
|
|
|
|
|
|
c = p->all;
|
|
|
|
|
|
|
|
while (c) {
|
|
|
|
c->display = 0;
|
|
|
|
|
|
|
|
if (c->total_bytes >= threshold) {
|
|
|
|
c->display = 1;
|
|
|
|
count++;
|
|
|
|
}
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
if (c->end_time == 0)
|
|
|
|
c->end_time = timechart->last_time;
|
|
|
|
|
|
|
|
c = c->next;
|
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
return count;
|
|
|
|
}
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
#define BYTES_THRESH (1 * 1024 * 1024)
|
2009-09-12 09:53:05 +04:00
|
|
|
#define TIME_THRESH 10000000
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
static void write_svg_file(struct timechart *tchart, const char *filename)
|
2009-09-12 09:53:05 +04:00
|
|
|
{
|
|
|
|
u64 i;
|
|
|
|
int count;
|
2014-07-08 20:03:41 +04:00
|
|
|
int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
if (tchart->power_only)
|
|
|
|
tchart->proc_num = 0;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-01 20:25:45 +04:00
|
|
|
/* We'd like to show at least proc_num tasks;
|
|
|
|
* be less picky if we have fewer */
|
|
|
|
do {
|
2014-07-08 20:03:41 +04:00
|
|
|
if (process_filter)
|
|
|
|
count = determine_display_tasks_filtered(tchart);
|
|
|
|
else if (tchart->io_events)
|
|
|
|
count = determine_display_io_tasks(tchart, thresh);
|
|
|
|
else
|
|
|
|
count = determine_display_tasks(tchart, thresh);
|
2013-11-01 20:25:45 +04:00
|
|
|
thresh /= 10;
|
2013-11-28 18:25:19 +04:00
|
|
|
} while (!process_filter && thresh && count < tchart->proc_num);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2014-01-20 15:39:38 +04:00
|
|
|
if (!tchart->proc_num)
|
|
|
|
count = 0;
|
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
if (tchart->io_events) {
|
|
|
|
open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
svg_time_grid(0.5);
|
|
|
|
svg_io_legenda();
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
draw_io_bars(tchart);
|
|
|
|
} else {
|
|
|
|
open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
svg_time_grid(0);
|
|
|
|
|
|
|
|
svg_legenda();
|
|
|
|
|
|
|
|
for (i = 0; i < tchart->numcpus; i++)
|
|
|
|
svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
|
|
|
|
|
|
|
|
draw_cpu_usage(tchart);
|
|
|
|
if (tchart->proc_num)
|
|
|
|
draw_process_bars(tchart);
|
|
|
|
if (!tchart->tasks_only)
|
|
|
|
draw_c_p_states(tchart);
|
|
|
|
if (tchart->proc_num)
|
|
|
|
draw_wakeups(tchart);
|
|
|
|
}
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
svg_close();
|
|
|
|
}
|
|
|
|
|
2013-12-02 18:37:35 +04:00
|
|
|
static int process_header(struct perf_file_section *section __maybe_unused,
|
|
|
|
struct perf_header *ph,
|
|
|
|
int feat,
|
|
|
|
int fd __maybe_unused,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
struct timechart *tchart = data;
|
|
|
|
|
|
|
|
switch (feat) {
|
|
|
|
case HEADER_NRCPUS:
|
|
|
|
tchart->numcpus = ph->env.nr_cpus_avail;
|
|
|
|
break;
|
2013-12-02 18:37:36 +04:00
|
|
|
|
|
|
|
case HEADER_CPU_TOPOLOGY:
|
|
|
|
if (!tchart->topology)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (svg_build_topology_map(ph->env.sibling_cores,
|
|
|
|
ph->env.nr_sibling_cores,
|
|
|
|
ph->env.sibling_threads,
|
|
|
|
ph->env.nr_sibling_threads))
|
|
|
|
fprintf(stderr, "problem building topology\n");
|
|
|
|
break;
|
|
|
|
|
2013-12-02 18:37:35 +04:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
static int __cmd_timechart(struct timechart *tchart, const char *output_name)
|
2009-12-01 09:05:16 +03:00
|
|
|
{
|
2013-07-11 19:28:30 +04:00
|
|
|
const struct perf_evsel_str_handler power_tracepoints[] = {
|
|
|
|
{ "power:cpu_idle", process_sample_cpu_idle },
|
|
|
|
{ "power:cpu_frequency", process_sample_cpu_frequency },
|
|
|
|
{ "sched:sched_wakeup", process_sample_sched_wakeup },
|
|
|
|
{ "sched:sched_switch", process_sample_sched_switch },
|
|
|
|
#ifdef SUPPORT_OLD_POWER_EVENTS
|
|
|
|
{ "power:power_start", process_sample_power_start },
|
|
|
|
{ "power:power_end", process_sample_power_end },
|
|
|
|
{ "power:power_frequency", process_sample_power_frequency },
|
|
|
|
#endif
|
2014-07-08 20:03:41 +04:00
|
|
|
|
|
|
|
{ "syscalls:sys_enter_read", process_enter_read },
|
|
|
|
{ "syscalls:sys_enter_pread64", process_enter_read },
|
|
|
|
{ "syscalls:sys_enter_readv", process_enter_read },
|
|
|
|
{ "syscalls:sys_enter_preadv", process_enter_read },
|
|
|
|
{ "syscalls:sys_enter_write", process_enter_write },
|
|
|
|
{ "syscalls:sys_enter_pwrite64", process_enter_write },
|
|
|
|
{ "syscalls:sys_enter_writev", process_enter_write },
|
|
|
|
{ "syscalls:sys_enter_pwritev", process_enter_write },
|
|
|
|
{ "syscalls:sys_enter_sync", process_enter_sync },
|
|
|
|
{ "syscalls:sys_enter_sync_file_range", process_enter_sync },
|
|
|
|
{ "syscalls:sys_enter_fsync", process_enter_sync },
|
|
|
|
{ "syscalls:sys_enter_msync", process_enter_sync },
|
|
|
|
{ "syscalls:sys_enter_recvfrom", process_enter_rx },
|
|
|
|
{ "syscalls:sys_enter_recvmmsg", process_enter_rx },
|
|
|
|
{ "syscalls:sys_enter_recvmsg", process_enter_rx },
|
|
|
|
{ "syscalls:sys_enter_sendto", process_enter_tx },
|
|
|
|
{ "syscalls:sys_enter_sendmsg", process_enter_tx },
|
|
|
|
{ "syscalls:sys_enter_sendmmsg", process_enter_tx },
|
|
|
|
{ "syscalls:sys_enter_epoll_pwait", process_enter_poll },
|
|
|
|
{ "syscalls:sys_enter_epoll_wait", process_enter_poll },
|
|
|
|
{ "syscalls:sys_enter_poll", process_enter_poll },
|
|
|
|
{ "syscalls:sys_enter_ppoll", process_enter_poll },
|
|
|
|
{ "syscalls:sys_enter_pselect6", process_enter_poll },
|
|
|
|
{ "syscalls:sys_enter_select", process_enter_poll },
|
|
|
|
|
|
|
|
{ "syscalls:sys_exit_read", process_exit_read },
|
|
|
|
{ "syscalls:sys_exit_pread64", process_exit_read },
|
|
|
|
{ "syscalls:sys_exit_readv", process_exit_read },
|
|
|
|
{ "syscalls:sys_exit_preadv", process_exit_read },
|
|
|
|
{ "syscalls:sys_exit_write", process_exit_write },
|
|
|
|
{ "syscalls:sys_exit_pwrite64", process_exit_write },
|
|
|
|
{ "syscalls:sys_exit_writev", process_exit_write },
|
|
|
|
{ "syscalls:sys_exit_pwritev", process_exit_write },
|
|
|
|
{ "syscalls:sys_exit_sync", process_exit_sync },
|
|
|
|
{ "syscalls:sys_exit_sync_file_range", process_exit_sync },
|
|
|
|
{ "syscalls:sys_exit_fsync", process_exit_sync },
|
|
|
|
{ "syscalls:sys_exit_msync", process_exit_sync },
|
|
|
|
{ "syscalls:sys_exit_recvfrom", process_exit_rx },
|
|
|
|
{ "syscalls:sys_exit_recvmmsg", process_exit_rx },
|
|
|
|
{ "syscalls:sys_exit_recvmsg", process_exit_rx },
|
|
|
|
{ "syscalls:sys_exit_sendto", process_exit_tx },
|
|
|
|
{ "syscalls:sys_exit_sendmsg", process_exit_tx },
|
|
|
|
{ "syscalls:sys_exit_sendmmsg", process_exit_tx },
|
|
|
|
{ "syscalls:sys_exit_epoll_pwait", process_exit_poll },
|
|
|
|
{ "syscalls:sys_exit_epoll_wait", process_exit_poll },
|
|
|
|
{ "syscalls:sys_exit_poll", process_exit_poll },
|
|
|
|
{ "syscalls:sys_exit_ppoll", process_exit_poll },
|
|
|
|
{ "syscalls:sys_exit_pselect6", process_exit_poll },
|
|
|
|
{ "syscalls:sys_exit_select", process_exit_poll },
|
2013-07-11 19:28:30 +04:00
|
|
|
};
|
2013-10-15 18:27:32 +04:00
|
|
|
struct perf_data_file file = {
|
|
|
|
.path = input_name,
|
|
|
|
.mode = PERF_DATA_MODE_READ,
|
perf timechart: Support using -f to override perf.data file ownership
Enable perf timechart to use perf.data when it is not owned by current
user or root.
Example:
# perf timechart record ls
# chown Yunlong.Song:Yunlong.Song perf.data
# ls -al perf.data
-rw------- 1 Yunlong.Song Yunlong.Song 5471744 Apr 2 15:15 perf.data
# id
uid=0(root) gid=0(root) groups=0(root),64(pkcs11)
Before this patch:
# perf timechart
File perf.data not owned by current user or root (use -f to override)
# perf timechart -f
Error: unknown switch `f'
usage: perf timechart [<options>] {record}
-i, --input <file> input file name
-o, --output <file> output file name
-w, --width <n> page width
--highlight <duration or task name>
highlight tasks. Pass duration in ns or process name.
-P, --power-only output power data only
-T, --tasks-only output processes data only
-p, --process <process>
process selector. Pass a pid or process name.
--symfs <directory>
Look for files with symbols relative to this directory
-n, --proc-num <n> min. number of tasks to print
-t, --topology sort CPUs according to topology
--io-skip-eagain skip EAGAIN errors
--io-min-time <time>
all IO faster than min-time will visually appear longer
--io-merge-dist <time>
merge events that are merge-dist us apart
As shown above, the -f option does not work at all.
After this patch:
# perf timechart
File perf.data not owned by current user or root (use -f to override)
# perf timechart -f
Written 0.0 seconds of trace to output.svg.
# cat output.svg
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg SYSTEM "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg width="1000" height="10110" version="1.1" xmlns="http://www.w3.org/2000/svg">
<defs>
<style type="text/css">
<![CDATA[
rect { stroke-width: 1; }
...
...
As shown above, the -f option really works now.
Signed-off-by: Yunlong Song <yunlong.song@huawei.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1427982439-27388-9-git-send-email-yunlong.song@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-02 16:47:17 +03:00
|
|
|
.force = tchart->force,
|
2013-10-15 18:27:32 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
struct perf_session *session = perf_session__new(&file, false,
|
2013-11-28 18:25:19 +04:00
|
|
|
&tchart->tool);
|
2009-12-28 02:37:02 +03:00
|
|
|
int ret = -EINVAL;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2009-12-12 02:24:02 +03:00
|
|
|
if (session == NULL)
|
2014-09-24 05:33:37 +04:00
|
|
|
return -1;
|
2009-12-12 02:24:02 +03:00
|
|
|
|
2014-08-12 10:40:45 +04:00
|
|
|
symbol__init(&session->header.env);
|
2014-08-12 10:40:43 +04:00
|
|
|
|
2013-12-02 18:37:35 +04:00
|
|
|
(void)perf_header__process_sections(&session->header,
|
|
|
|
perf_data_file__fd(session->file),
|
|
|
|
tchart,
|
|
|
|
process_header);
|
|
|
|
|
2009-12-28 02:37:02 +03:00
|
|
|
if (!perf_session__has_traces(session, "timechart record"))
|
|
|
|
goto out_delete;
|
|
|
|
|
2013-07-11 19:28:30 +04:00
|
|
|
if (perf_session__set_tracepoints_handlers(session,
|
|
|
|
power_tracepoints)) {
|
|
|
|
pr_err("Initializing session tracepoint handlers failed\n");
|
|
|
|
goto out_delete;
|
|
|
|
}
|
|
|
|
|
2015-03-03 17:58:45 +03:00
|
|
|
ret = perf_session__process_events(session);
|
2009-12-01 09:05:16 +03:00
|
|
|
if (ret)
|
2009-12-12 02:24:02 +03:00
|
|
|
goto out_delete;
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
end_sample_processing(tchart);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 18:50:41 +04:00
|
|
|
sort_pids(tchart);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
write_svg_file(tchart, output_name);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2009-10-21 23:34:06 +04:00
|
|
|
pr_info("Written %2.1f seconds of trace to %s.\n",
|
2016-08-08 18:45:58 +03:00
|
|
|
(tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name);
|
2009-12-12 02:24:02 +03:00
|
|
|
out_delete:
|
|
|
|
perf_session__delete(session);
|
|
|
|
return ret;
|
2009-09-12 09:53:05 +04:00
|
|
|
}
|
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
static int timechart__io_record(int argc, const char **argv)
|
|
|
|
{
|
|
|
|
unsigned int rec_argc, i;
|
|
|
|
const char **rec_argv;
|
|
|
|
const char **p;
|
|
|
|
char *filter = NULL;
|
|
|
|
|
|
|
|
const char * const common_args[] = {
|
|
|
|
"record", "-a", "-R", "-c", "1",
|
|
|
|
};
|
|
|
|
unsigned int common_args_nr = ARRAY_SIZE(common_args);
|
|
|
|
|
|
|
|
const char * const disk_events[] = {
|
|
|
|
"syscalls:sys_enter_read",
|
|
|
|
"syscalls:sys_enter_pread64",
|
|
|
|
"syscalls:sys_enter_readv",
|
|
|
|
"syscalls:sys_enter_preadv",
|
|
|
|
"syscalls:sys_enter_write",
|
|
|
|
"syscalls:sys_enter_pwrite64",
|
|
|
|
"syscalls:sys_enter_writev",
|
|
|
|
"syscalls:sys_enter_pwritev",
|
|
|
|
"syscalls:sys_enter_sync",
|
|
|
|
"syscalls:sys_enter_sync_file_range",
|
|
|
|
"syscalls:sys_enter_fsync",
|
|
|
|
"syscalls:sys_enter_msync",
|
|
|
|
|
|
|
|
"syscalls:sys_exit_read",
|
|
|
|
"syscalls:sys_exit_pread64",
|
|
|
|
"syscalls:sys_exit_readv",
|
|
|
|
"syscalls:sys_exit_preadv",
|
|
|
|
"syscalls:sys_exit_write",
|
|
|
|
"syscalls:sys_exit_pwrite64",
|
|
|
|
"syscalls:sys_exit_writev",
|
|
|
|
"syscalls:sys_exit_pwritev",
|
|
|
|
"syscalls:sys_exit_sync",
|
|
|
|
"syscalls:sys_exit_sync_file_range",
|
|
|
|
"syscalls:sys_exit_fsync",
|
|
|
|
"syscalls:sys_exit_msync",
|
|
|
|
};
|
|
|
|
unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
|
|
|
|
|
|
|
|
const char * const net_events[] = {
|
|
|
|
"syscalls:sys_enter_recvfrom",
|
|
|
|
"syscalls:sys_enter_recvmmsg",
|
|
|
|
"syscalls:sys_enter_recvmsg",
|
|
|
|
"syscalls:sys_enter_sendto",
|
|
|
|
"syscalls:sys_enter_sendmsg",
|
|
|
|
"syscalls:sys_enter_sendmmsg",
|
|
|
|
|
|
|
|
"syscalls:sys_exit_recvfrom",
|
|
|
|
"syscalls:sys_exit_recvmmsg",
|
|
|
|
"syscalls:sys_exit_recvmsg",
|
|
|
|
"syscalls:sys_exit_sendto",
|
|
|
|
"syscalls:sys_exit_sendmsg",
|
|
|
|
"syscalls:sys_exit_sendmmsg",
|
|
|
|
};
|
|
|
|
unsigned int net_events_nr = ARRAY_SIZE(net_events);
|
|
|
|
|
|
|
|
const char * const poll_events[] = {
|
|
|
|
"syscalls:sys_enter_epoll_pwait",
|
|
|
|
"syscalls:sys_enter_epoll_wait",
|
|
|
|
"syscalls:sys_enter_poll",
|
|
|
|
"syscalls:sys_enter_ppoll",
|
|
|
|
"syscalls:sys_enter_pselect6",
|
|
|
|
"syscalls:sys_enter_select",
|
|
|
|
|
|
|
|
"syscalls:sys_exit_epoll_pwait",
|
|
|
|
"syscalls:sys_exit_epoll_wait",
|
|
|
|
"syscalls:sys_exit_poll",
|
|
|
|
"syscalls:sys_exit_ppoll",
|
|
|
|
"syscalls:sys_exit_pselect6",
|
|
|
|
"syscalls:sys_exit_select",
|
|
|
|
};
|
|
|
|
unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
|
|
|
|
|
|
|
|
rec_argc = common_args_nr +
|
|
|
|
disk_events_nr * 4 +
|
|
|
|
net_events_nr * 4 +
|
|
|
|
poll_events_nr * 4 +
|
|
|
|
argc;
|
|
|
|
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
|
|
|
|
|
|
|
if (rec_argv == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (asprintf(&filter, "common_pid != %d", getpid()) < 0)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
p = rec_argv;
|
|
|
|
for (i = 0; i < common_args_nr; i++)
|
|
|
|
*p++ = strdup(common_args[i]);
|
|
|
|
|
|
|
|
for (i = 0; i < disk_events_nr; i++) {
|
|
|
|
if (!is_valid_tracepoint(disk_events[i])) {
|
|
|
|
rec_argc -= 4;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
*p++ = "-e";
|
|
|
|
*p++ = strdup(disk_events[i]);
|
|
|
|
*p++ = "--filter";
|
|
|
|
*p++ = filter;
|
|
|
|
}
|
|
|
|
for (i = 0; i < net_events_nr; i++) {
|
|
|
|
if (!is_valid_tracepoint(net_events[i])) {
|
|
|
|
rec_argc -= 4;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
*p++ = "-e";
|
|
|
|
*p++ = strdup(net_events[i]);
|
|
|
|
*p++ = "--filter";
|
|
|
|
*p++ = filter;
|
|
|
|
}
|
|
|
|
for (i = 0; i < poll_events_nr; i++) {
|
|
|
|
if (!is_valid_tracepoint(poll_events[i])) {
|
|
|
|
rec_argc -= 4;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
*p++ = "-e";
|
|
|
|
*p++ = strdup(poll_events[i]);
|
|
|
|
*p++ = "--filter";
|
|
|
|
*p++ = filter;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < (unsigned int)argc; i++)
|
|
|
|
*p++ = argv[i];
|
|
|
|
|
|
|
|
return cmd_record(rec_argc, rec_argv, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
static int timechart__record(struct timechart *tchart, int argc, const char **argv)
|
2009-09-19 15:34:42 +04:00
|
|
|
{
|
2013-11-01 20:25:50 +04:00
|
|
|
unsigned int rec_argc, i, j;
|
|
|
|
const char **rec_argv;
|
|
|
|
const char **p;
|
|
|
|
unsigned int record_elems;
|
|
|
|
|
|
|
|
const char * const common_args[] = {
|
2013-06-05 15:37:21 +04:00
|
|
|
"record", "-a", "-R", "-c", "1",
|
2013-11-01 20:25:50 +04:00
|
|
|
};
|
|
|
|
unsigned int common_args_nr = ARRAY_SIZE(common_args);
|
|
|
|
|
2013-11-01 20:25:51 +04:00
|
|
|
const char * const backtrace_args[] = {
|
|
|
|
"-g",
|
|
|
|
};
|
|
|
|
unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
|
|
|
|
|
2013-11-01 20:25:50 +04:00
|
|
|
const char * const power_args[] = {
|
|
|
|
"-e", "power:cpu_frequency",
|
|
|
|
"-e", "power:cpu_idle",
|
|
|
|
};
|
|
|
|
unsigned int power_args_nr = ARRAY_SIZE(power_args);
|
|
|
|
|
|
|
|
const char * const old_power_args[] = {
|
|
|
|
#ifdef SUPPORT_OLD_POWER_EVENTS
|
2012-10-01 22:20:58 +04:00
|
|
|
"-e", "power:power_start",
|
|
|
|
"-e", "power:power_end",
|
|
|
|
"-e", "power:power_frequency",
|
|
|
|
#endif
|
2013-11-01 20:25:50 +04:00
|
|
|
};
|
|
|
|
unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
|
|
|
|
|
|
|
|
const char * const tasks_args[] = {
|
2012-10-01 22:20:58 +04:00
|
|
|
"-e", "sched:sched_wakeup",
|
|
|
|
"-e", "sched:sched_switch",
|
|
|
|
};
|
2013-11-01 20:25:50 +04:00
|
|
|
unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
|
2011-01-03 19:50:45 +03:00
|
|
|
|
|
|
|
#ifdef SUPPORT_OLD_POWER_EVENTS
|
|
|
|
if (!is_valid_tracepoint("power:cpu_idle") &&
|
|
|
|
is_valid_tracepoint("power:power_start")) {
|
|
|
|
use_old_power_events = 1;
|
2013-11-01 20:25:50 +04:00
|
|
|
power_args_nr = 0;
|
|
|
|
} else {
|
|
|
|
old_power_args_nr = 0;
|
2011-01-03 19:50:45 +03:00
|
|
|
}
|
|
|
|
#endif
|
2009-09-19 15:34:42 +04:00
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
if (tchart->power_only)
|
2013-11-01 20:25:50 +04:00
|
|
|
tasks_args_nr = 0;
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
if (tchart->tasks_only) {
|
2013-11-01 20:25:50 +04:00
|
|
|
power_args_nr = 0;
|
|
|
|
old_power_args_nr = 0;
|
|
|
|
}
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
if (!tchart->with_backtrace)
|
2013-11-01 20:25:51 +04:00
|
|
|
backtrace_args_no = 0;
|
|
|
|
|
2013-11-01 20:25:50 +04:00
|
|
|
record_elems = common_args_nr + tasks_args_nr +
|
2013-11-01 20:25:51 +04:00
|
|
|
power_args_nr + old_power_args_nr + backtrace_args_no;
|
2013-11-01 20:25:50 +04:00
|
|
|
|
|
|
|
rec_argc = record_elems + argc;
|
2009-09-19 15:34:42 +04:00
|
|
|
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
|
|
|
|
2010-11-13 05:35:06 +03:00
|
|
|
if (rec_argv == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2013-11-01 20:25:50 +04:00
|
|
|
p = rec_argv;
|
|
|
|
for (i = 0; i < common_args_nr; i++)
|
|
|
|
*p++ = strdup(common_args[i]);
|
|
|
|
|
2013-11-01 20:25:51 +04:00
|
|
|
for (i = 0; i < backtrace_args_no; i++)
|
|
|
|
*p++ = strdup(backtrace_args[i]);
|
|
|
|
|
2013-11-01 20:25:50 +04:00
|
|
|
for (i = 0; i < tasks_args_nr; i++)
|
|
|
|
*p++ = strdup(tasks_args[i]);
|
|
|
|
|
|
|
|
for (i = 0; i < power_args_nr; i++)
|
|
|
|
*p++ = strdup(power_args[i]);
|
2009-09-19 15:34:42 +04:00
|
|
|
|
2013-11-01 20:25:50 +04:00
|
|
|
for (i = 0; i < old_power_args_nr; i++)
|
|
|
|
*p++ = strdup(old_power_args[i]);
|
2009-09-19 15:34:42 +04:00
|
|
|
|
2014-03-16 20:06:05 +04:00
|
|
|
for (j = 0; j < (unsigned int)argc; j++)
|
2013-11-01 20:25:50 +04:00
|
|
|
*p++ = argv[j];
|
|
|
|
|
|
|
|
return cmd_record(rec_argc, rec_argv, NULL);
|
2009-09-19 15:34:42 +04:00
|
|
|
}
|
|
|
|
|
2009-10-20 02:09:39 +04:00
|
|
|
static int
|
2012-09-11 02:15:03 +04:00
|
|
|
parse_process(const struct option *opt __maybe_unused, const char *arg,
|
|
|
|
int __maybe_unused unset)
|
2009-10-20 02:09:39 +04:00
|
|
|
{
|
|
|
|
if (arg)
|
|
|
|
add_process_filter(arg);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-12-17 19:53:49 +04:00
|
|
|
static int
|
|
|
|
parse_highlight(const struct option *opt __maybe_unused, const char *arg,
|
|
|
|
int __maybe_unused unset)
|
|
|
|
{
|
|
|
|
unsigned long duration = strtoul(arg, NULL, 0);
|
|
|
|
|
|
|
|
if (svg_highlight || svg_highlight_name)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (duration)
|
|
|
|
svg_highlight = duration;
|
|
|
|
else
|
|
|
|
svg_highlight_name = strdup(arg);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-08 20:03:43 +04:00
|
|
|
static int
|
|
|
|
parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
|
|
|
|
{
|
|
|
|
char unit = 'n';
|
|
|
|
u64 *value = opt->value;
|
|
|
|
|
|
|
|
if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
|
|
|
|
switch (unit) {
|
|
|
|
case 'm':
|
2016-08-08 18:45:58 +03:00
|
|
|
*value *= NSEC_PER_MSEC;
|
2014-07-08 20:03:43 +04:00
|
|
|
break;
|
|
|
|
case 'u':
|
2016-08-08 18:45:58 +03:00
|
|
|
*value *= NSEC_PER_USEC;
|
2014-07-08 20:03:43 +04:00
|
|
|
break;
|
|
|
|
case 'n':
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-01 22:20:58 +04:00
|
|
|
int cmd_timechart(int argc, const char **argv,
|
|
|
|
const char *prefix __maybe_unused)
|
|
|
|
{
|
2013-11-28 18:25:19 +04:00
|
|
|
struct timechart tchart = {
|
|
|
|
.tool = {
|
|
|
|
.comm = process_comm_event,
|
|
|
|
.fork = process_fork_event,
|
|
|
|
.exit = process_exit_event,
|
|
|
|
.sample = process_sample_event,
|
2014-07-06 16:18:21 +04:00
|
|
|
.ordered_events = true,
|
2013-11-28 18:25:19 +04:00
|
|
|
},
|
|
|
|
.proc_num = 15,
|
2016-08-08 18:45:58 +03:00
|
|
|
.min_time = NSEC_PER_MSEC,
|
2014-07-08 20:03:43 +04:00
|
|
|
.merge_dist = 1000,
|
2013-11-28 18:25:19 +04:00
|
|
|
};
|
2012-10-01 22:20:58 +04:00
|
|
|
const char *output_name = "output.svg";
|
2013-11-01 20:25:50 +04:00
|
|
|
const struct option timechart_options[] = {
|
2012-10-01 22:20:58 +04:00
|
|
|
OPT_STRING('i', "input", &input_name, "file", "input file name"),
|
|
|
|
OPT_STRING('o', "output", &output_name, "file", "output file name"),
|
|
|
|
OPT_INTEGER('w', "width", &svg_page_width, "page width"),
|
2013-12-17 19:53:49 +04:00
|
|
|
OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
|
|
|
|
"highlight tasks. Pass duration in ns or process name.",
|
|
|
|
parse_highlight),
|
2013-11-28 18:25:19 +04:00
|
|
|
OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
|
|
|
|
OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
|
2013-11-01 20:25:48 +04:00
|
|
|
"output processes data only"),
|
2009-10-20 02:09:39 +04:00
|
|
|
OPT_CALLBACK('p', "process", NULL, "process",
|
|
|
|
"process selector. Pass a pid or process name.",
|
|
|
|
parse_process),
|
2016-05-19 14:47:37 +03:00
|
|
|
OPT_CALLBACK(0, "symfs", NULL, "directory",
|
|
|
|
"Look for files with symbols relative to this directory",
|
|
|
|
symbol__config_symfs),
|
2013-11-28 18:25:19 +04:00
|
|
|
OPT_INTEGER('n', "proc-num", &tchart.proc_num,
|
2013-11-01 20:25:46 +04:00
|
|
|
"min. number of tasks to print"),
|
2013-12-02 18:37:36 +04:00
|
|
|
OPT_BOOLEAN('t', "topology", &tchart.topology,
|
|
|
|
"sort CPUs according to topology"),
|
2014-07-08 20:03:43 +04:00
|
|
|
OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
|
|
|
|
"skip EAGAIN errors"),
|
|
|
|
OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
|
|
|
|
"all IO faster than min-time will visually appear longer",
|
|
|
|
parse_time),
|
|
|
|
OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
|
|
|
|
"merge events that are merge-dist us apart",
|
|
|
|
parse_time),
|
perf timechart: Support using -f to override perf.data file ownership
Enable perf timechart to use perf.data when it is not owned by current
user or root.
Example:
# perf timechart record ls
# chown Yunlong.Song:Yunlong.Song perf.data
# ls -al perf.data
-rw------- 1 Yunlong.Song Yunlong.Song 5471744 Apr 2 15:15 perf.data
# id
uid=0(root) gid=0(root) groups=0(root),64(pkcs11)
Before this patch:
# perf timechart
File perf.data not owned by current user or root (use -f to override)
# perf timechart -f
Error: unknown switch `f'
usage: perf timechart [<options>] {record}
-i, --input <file> input file name
-o, --output <file> output file name
-w, --width <n> page width
--highlight <duration or task name>
highlight tasks. Pass duration in ns or process name.
-P, --power-only output power data only
-T, --tasks-only output processes data only
-p, --process <process>
process selector. Pass a pid or process name.
--symfs <directory>
Look for files with symbols relative to this directory
-n, --proc-num <n> min. number of tasks to print
-t, --topology sort CPUs according to topology
--io-skip-eagain skip EAGAIN errors
--io-min-time <time>
all IO faster than min-time will visually appear longer
--io-merge-dist <time>
merge events that are merge-dist us apart
As shown above, the -f option does not work at all.
After this patch:
# perf timechart
File perf.data not owned by current user or root (use -f to override)
# perf timechart -f
Written 0.0 seconds of trace to output.svg.
# cat output.svg
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg SYSTEM "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg width="1000" height="10110" version="1.1" xmlns="http://www.w3.org/2000/svg">
<defs>
<style type="text/css">
<![CDATA[
rect { stroke-width: 1; }
...
...
As shown above, the -f option really works now.
Signed-off-by: Yunlong Song <yunlong.song@huawei.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1427982439-27388-9-git-send-email-yunlong.song@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-02 16:47:17 +03:00
|
|
|
OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
|
2009-09-12 09:53:05 +04:00
|
|
|
OPT_END()
|
2012-10-01 22:20:58 +04:00
|
|
|
};
|
2015-03-18 16:35:56 +03:00
|
|
|
const char * const timechart_subcommands[] = { "record", NULL };
|
|
|
|
const char *timechart_usage[] = {
|
2012-10-01 22:20:58 +04:00
|
|
|
"perf timechart [<options>] {record}",
|
|
|
|
NULL
|
|
|
|
};
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2014-10-22 19:15:46 +04:00
|
|
|
const struct option timechart_record_options[] = {
|
2013-11-28 18:25:19 +04:00
|
|
|
OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
|
|
|
|
OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
|
2013-11-01 20:25:50 +04:00
|
|
|
"output processes data only"),
|
2014-07-08 20:03:41 +04:00
|
|
|
OPT_BOOLEAN('I', "io-only", &tchart.io_only,
|
|
|
|
"record only IO data"),
|
2013-11-28 18:25:19 +04:00
|
|
|
OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
|
2013-11-01 20:25:50 +04:00
|
|
|
OPT_END()
|
|
|
|
};
|
2014-10-22 19:15:46 +04:00
|
|
|
const char * const timechart_record_usage[] = {
|
2013-11-01 20:25:50 +04:00
|
|
|
"perf timechart record [<options>]",
|
|
|
|
NULL
|
|
|
|
};
|
2015-03-18 16:35:56 +03:00
|
|
|
argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
|
|
|
|
timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
if (tchart.power_only && tchart.tasks_only) {
|
2013-11-01 20:25:48 +04:00
|
|
|
pr_err("-P and -T options cannot be used at the same time.\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2013-11-01 20:25:50 +04:00
|
|
|
if (argc && !strncmp(argv[0], "rec", 3)) {
|
2014-10-22 19:15:46 +04:00
|
|
|
argc = parse_options(argc, argv, timechart_record_options,
|
|
|
|
timechart_record_usage,
|
2013-11-01 20:25:50 +04:00
|
|
|
PARSE_OPT_STOP_AT_NON_OPTION);
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
if (tchart.power_only && tchart.tasks_only) {
|
2013-11-01 20:25:50 +04:00
|
|
|
pr_err("-P and -T options cannot be used at the same time.\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2014-07-08 20:03:41 +04:00
|
|
|
if (tchart.io_only)
|
|
|
|
return timechart__io_record(argc, argv);
|
|
|
|
else
|
|
|
|
return timechart__record(&tchart, argc, argv);
|
2013-11-01 20:25:50 +04:00
|
|
|
} else if (argc)
|
|
|
|
usage_with_options(timechart_usage, timechart_options);
|
2009-09-12 09:53:05 +04:00
|
|
|
|
|
|
|
setup_pager();
|
|
|
|
|
2013-11-28 18:25:19 +04:00
|
|
|
return __cmd_timechart(&tchart, output_name);
|
2009-09-12 09:53:05 +04:00
|
|
|
}
|