perf target: Shorten perf_target__ to target__
Getting unwieldly long, for this app domain should be descriptive enough and the use of __ to separate the class from the method names should help with avoiding clashes with other code bases. Reported-by: David Ahern <dsahern@gmail.com> Suggested-by: Ingo Molnar <mingo@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/20131112113427.GA4053@ghostprotocols.net Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Родитель
48095b721c
Коммит
602ad878d4
|
@ -1510,13 +1510,13 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
|
|||
/*
|
||||
* target related setups
|
||||
*/
|
||||
err = perf_target__validate(&kvm->opts.target);
|
||||
err = target__validate(&kvm->opts.target);
|
||||
if (err) {
|
||||
perf_target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
|
||||
target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
|
||||
ui__warning("%s", errbuf);
|
||||
}
|
||||
|
||||
if (perf_target__none(&kvm->opts.target))
|
||||
if (target__none(&kvm->opts.target))
|
||||
kvm->opts.target.system_wide = true;
|
||||
|
||||
|
||||
|
|
|
@ -506,7 +506,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
|
|||
* (apart from group members) have enable_on_exec=1 set,
|
||||
* so don't spoil it by prematurely enabling them.
|
||||
*/
|
||||
if (!perf_target__none(&opts->target))
|
||||
if (!target__none(&opts->target))
|
||||
perf_evlist__enable(evsel_list);
|
||||
|
||||
/*
|
||||
|
@ -535,7 +535,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
|
|||
* die with the process and we wait for that. Thus no need to
|
||||
* disable events in this case.
|
||||
*/
|
||||
if (done && !disabled && !perf_target__none(&opts->target)) {
|
||||
if (done && !disabled && !target__none(&opts->target)) {
|
||||
perf_evlist__disable(evsel_list);
|
||||
disabled = true;
|
||||
}
|
||||
|
@ -906,7 +906,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
|
|||
|
||||
argc = parse_options(argc, argv, record_options, record_usage,
|
||||
PARSE_OPT_STOP_AT_NON_OPTION);
|
||||
if (!argc && perf_target__none(&rec->opts.target))
|
||||
if (!argc && target__none(&rec->opts.target))
|
||||
usage_with_options(record_usage, record_options);
|
||||
|
||||
if (nr_cgroups && !rec->opts.target.system_wide) {
|
||||
|
@ -936,17 +936,17 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
|
|||
goto out_symbol_exit;
|
||||
}
|
||||
|
||||
err = perf_target__validate(&rec->opts.target);
|
||||
err = target__validate(&rec->opts.target);
|
||||
if (err) {
|
||||
perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
|
||||
target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
|
||||
ui__warning("%s", errbuf);
|
||||
}
|
||||
|
||||
err = perf_target__parse_uid(&rec->opts.target);
|
||||
err = target__parse_uid(&rec->opts.target);
|
||||
if (err) {
|
||||
int saved_errno = errno;
|
||||
|
||||
perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
|
||||
target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
|
||||
ui__error("%s", errbuf);
|
||||
|
||||
err = -saved_errno;
|
||||
|
|
|
@ -108,7 +108,7 @@ enum {
|
|||
|
||||
static struct perf_evlist *evsel_list;
|
||||
|
||||
static struct perf_target target = {
|
||||
static struct target target = {
|
||||
.uid = UINT_MAX,
|
||||
};
|
||||
|
||||
|
@ -294,11 +294,10 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
|
|||
|
||||
attr->inherit = !no_inherit;
|
||||
|
||||
if (perf_target__has_cpu(&target))
|
||||
if (target__has_cpu(&target))
|
||||
return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
|
||||
|
||||
if (!perf_target__has_task(&target) &&
|
||||
perf_evsel__is_group_leader(evsel)) {
|
||||
if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) {
|
||||
attr->disabled = 1;
|
||||
if (!initial_delay)
|
||||
attr->enable_on_exec = 1;
|
||||
|
@ -1236,7 +1235,7 @@ static void print_stat(int argc, const char **argv)
|
|||
fprintf(output, "\'system wide");
|
||||
else if (target.cpu_list)
|
||||
fprintf(output, "\'CPU(s) %s", target.cpu_list);
|
||||
else if (!perf_target__has_task(&target)) {
|
||||
else if (!target__has_task(&target)) {
|
||||
fprintf(output, "\'%s", argv[0]);
|
||||
for (i = 1; i < argc; i++)
|
||||
fprintf(output, " %s", argv[i]);
|
||||
|
@ -1667,7 +1666,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
|
|||
} else if (big_num_opt == 0) /* User passed --no-big-num */
|
||||
big_num = false;
|
||||
|
||||
if (!argc && perf_target__none(&target))
|
||||
if (!argc && target__none(&target))
|
||||
usage_with_options(stat_usage, options);
|
||||
|
||||
if (run_count < 0) {
|
||||
|
@ -1680,8 +1679,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
|
|||
}
|
||||
|
||||
/* no_aggr, cgroup are for system-wide only */
|
||||
if ((aggr_mode != AGGR_GLOBAL || nr_cgroups)
|
||||
&& !perf_target__has_cpu(&target)) {
|
||||
if ((aggr_mode != AGGR_GLOBAL || nr_cgroups) &&
|
||||
!target__has_cpu(&target)) {
|
||||
fprintf(stderr, "both cgroup and no-aggregation "
|
||||
"modes only available in system-wide mode\n");
|
||||
|
||||
|
@ -1694,14 +1693,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
|
|||
if (add_default_attributes())
|
||||
goto out;
|
||||
|
||||
perf_target__validate(&target);
|
||||
target__validate(&target);
|
||||
|
||||
if (perf_evlist__create_maps(evsel_list, &target) < 0) {
|
||||
if (perf_target__has_task(&target)) {
|
||||
if (target__has_task(&target)) {
|
||||
pr_err("Problems finding threads of monitor\n");
|
||||
parse_options_usage(stat_usage, options, "p", 1);
|
||||
parse_options_usage(NULL, options, "t", 1);
|
||||
} else if (perf_target__has_cpu(&target)) {
|
||||
} else if (target__has_cpu(&target)) {
|
||||
perror("failed to parse CPUs map");
|
||||
parse_options_usage(stat_usage, options, "C", 1);
|
||||
parse_options_usage(NULL, options, "a", 1);
|
||||
|
|
|
@ -967,7 +967,7 @@ static int __cmd_top(struct perf_top *top)
|
|||
* XXX 'top' still doesn't start workloads like record, trace, but should,
|
||||
* so leave the check here.
|
||||
*/
|
||||
if (!perf_target__none(&opts->target))
|
||||
if (!target__none(&opts->target))
|
||||
perf_evlist__enable(top->evlist);
|
||||
|
||||
/* Wait for a minimal set of events before starting the snapshot */
|
||||
|
@ -1053,7 +1053,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
|
|||
.sym_pcnt_filter = 5,
|
||||
};
|
||||
struct perf_record_opts *opts = &top.record_opts;
|
||||
struct perf_target *target = &opts->target;
|
||||
struct target *target = &opts->target;
|
||||
const struct option options[] = {
|
||||
OPT_CALLBACK('e', "event", &top.evlist, "event",
|
||||
"event selector. use 'perf list' to list available events",
|
||||
|
@ -1169,24 +1169,24 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
|
|||
|
||||
setup_browser(false);
|
||||
|
||||
status = perf_target__validate(target);
|
||||
status = target__validate(target);
|
||||
if (status) {
|
||||
perf_target__strerror(target, status, errbuf, BUFSIZ);
|
||||
target__strerror(target, status, errbuf, BUFSIZ);
|
||||
ui__warning("%s", errbuf);
|
||||
}
|
||||
|
||||
status = perf_target__parse_uid(target);
|
||||
status = target__parse_uid(target);
|
||||
if (status) {
|
||||
int saved_errno = errno;
|
||||
|
||||
perf_target__strerror(target, status, errbuf, BUFSIZ);
|
||||
target__strerror(target, status, errbuf, BUFSIZ);
|
||||
ui__error("%s", errbuf);
|
||||
|
||||
status = -saved_errno;
|
||||
goto out_delete_evlist;
|
||||
}
|
||||
|
||||
if (perf_target__none(target))
|
||||
if (target__none(target))
|
||||
target->system_wide = true;
|
||||
|
||||
if (perf_evlist__create_maps(top.evlist, target) < 0)
|
||||
|
|
|
@ -2327,21 +2327,21 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
|
|||
}
|
||||
}
|
||||
|
||||
err = perf_target__validate(&trace.opts.target);
|
||||
err = target__validate(&trace.opts.target);
|
||||
if (err) {
|
||||
perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
|
||||
target__strerror(&trace.opts.target, err, bf, sizeof(bf));
|
||||
fprintf(trace.output, "%s", bf);
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
err = perf_target__parse_uid(&trace.opts.target);
|
||||
err = target__parse_uid(&trace.opts.target);
|
||||
if (err) {
|
||||
perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
|
||||
target__strerror(&trace.opts.target, err, bf, sizeof(bf));
|
||||
fprintf(trace.output, "%s", bf);
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
if (!argc && perf_target__none(&trace.opts.target))
|
||||
if (!argc && target__none(&trace.opts.target))
|
||||
trace.opts.target.system_wide = true;
|
||||
|
||||
if (input_name)
|
||||
|
|
|
@ -248,7 +248,7 @@ enum perf_call_graph_mode {
|
|||
};
|
||||
|
||||
struct perf_record_opts {
|
||||
struct perf_target target;
|
||||
struct target target;
|
||||
int call_graph;
|
||||
bool group;
|
||||
bool inherit_stat;
|
||||
|
|
|
@ -28,7 +28,7 @@ int test__task_exit(void)
|
|||
union perf_event *event;
|
||||
struct perf_evsel *evsel;
|
||||
struct perf_evlist *evlist;
|
||||
struct perf_target target = {
|
||||
struct target target = {
|
||||
.uid = UINT_MAX,
|
||||
.uses_mmap = true,
|
||||
};
|
||||
|
|
|
@ -811,8 +811,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
|
|||
return perf_evlist__mmap_per_cpu(evlist, prot, mask);
|
||||
}
|
||||
|
||||
int perf_evlist__create_maps(struct perf_evlist *evlist,
|
||||
struct perf_target *target)
|
||||
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
|
||||
{
|
||||
evlist->threads = thread_map__new_str(target->pid, target->tid,
|
||||
target->uid);
|
||||
|
@ -820,9 +819,9 @@ int perf_evlist__create_maps(struct perf_evlist *evlist,
|
|||
if (evlist->threads == NULL)
|
||||
return -1;
|
||||
|
||||
if (perf_target__has_task(target))
|
||||
if (target__has_task(target))
|
||||
evlist->cpus = cpu_map__dummy_new();
|
||||
else if (!perf_target__has_cpu(target) && !target->uses_mmap)
|
||||
else if (!target__has_cpu(target) && !target->uses_mmap)
|
||||
evlist->cpus = cpu_map__dummy_new();
|
||||
else
|
||||
evlist->cpus = cpu_map__new(target->cpu_list);
|
||||
|
@ -1031,8 +1030,7 @@ out_err:
|
|||
return err;
|
||||
}
|
||||
|
||||
int perf_evlist__prepare_workload(struct perf_evlist *evlist,
|
||||
struct perf_target *target,
|
||||
int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
|
||||
const char *argv[], bool pipe_output,
|
||||
bool want_signal)
|
||||
{
|
||||
|
@ -1084,7 +1082,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
|
|||
exit(-1);
|
||||
}
|
||||
|
||||
if (perf_target__none(target))
|
||||
if (target__none(target))
|
||||
evlist->threads->map[0] = evlist->workload.pid;
|
||||
|
||||
close(child_ready_pipe[1]);
|
||||
|
|
|
@ -102,7 +102,7 @@ void perf_evlist__config(struct perf_evlist *evlist,
|
|||
int perf_record_opts__config(struct perf_record_opts *opts);
|
||||
|
||||
int perf_evlist__prepare_workload(struct perf_evlist *evlist,
|
||||
struct perf_target *target,
|
||||
struct target *target,
|
||||
const char *argv[], bool pipe_output,
|
||||
bool want_signal);
|
||||
int perf_evlist__start_workload(struct perf_evlist *evlist);
|
||||
|
@ -134,8 +134,7 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
|
|||
evlist->threads = threads;
|
||||
}
|
||||
|
||||
int perf_evlist__create_maps(struct perf_evlist *evlist,
|
||||
struct perf_target *target);
|
||||
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
|
||||
void perf_evlist__delete_maps(struct perf_evlist *evlist);
|
||||
int perf_evlist__apply_filters(struct perf_evlist *evlist);
|
||||
|
||||
|
|
|
@ -645,7 +645,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
|
|||
}
|
||||
}
|
||||
|
||||
if (perf_target__has_cpu(&opts->target))
|
||||
if (target__has_cpu(&opts->target))
|
||||
perf_evsel__set_sample_bit(evsel, CPU);
|
||||
|
||||
if (opts->period)
|
||||
|
@ -653,7 +653,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
|
|||
|
||||
if (!perf_missing_features.sample_id_all &&
|
||||
(opts->sample_time || !opts->no_inherit ||
|
||||
perf_target__has_cpu(&opts->target)))
|
||||
target__has_cpu(&opts->target)))
|
||||
perf_evsel__set_sample_bit(evsel, TIME);
|
||||
|
||||
if (opts->raw_samples) {
|
||||
|
@ -696,7 +696,7 @@ void perf_evsel__config(struct perf_evsel *evsel,
|
|||
* Setting enable_on_exec for independent events and
|
||||
* group leaders for traced executed by perf.
|
||||
*/
|
||||
if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
|
||||
if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
|
||||
attr->enable_on_exec = 1;
|
||||
}
|
||||
|
||||
|
@ -2006,8 +2006,7 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
|
|||
return false;
|
||||
}
|
||||
|
||||
int perf_evsel__open_strerror(struct perf_evsel *evsel,
|
||||
struct perf_target *target,
|
||||
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
|
||||
int err, char *msg, size_t size)
|
||||
{
|
||||
switch (err) {
|
||||
|
|
|
@ -318,8 +318,7 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
|
|||
|
||||
bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
|
||||
char *msg, size_t msgsize);
|
||||
int perf_evsel__open_strerror(struct perf_evsel *evsel,
|
||||
struct perf_target *target,
|
||||
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
|
||||
int err, char *msg, size_t size);
|
||||
|
||||
static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
|
||||
|
|
|
@ -1396,12 +1396,12 @@ int machine__for_each_thread(struct machine *machine,
|
|||
}
|
||||
|
||||
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
|
||||
struct perf_target *target, struct thread_map *threads,
|
||||
struct target *target, struct thread_map *threads,
|
||||
perf_event__handler_t process, bool data_mmap)
|
||||
{
|
||||
if (perf_target__has_task(target))
|
||||
if (target__has_task(target))
|
||||
return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
|
||||
else if (perf_target__has_cpu(target))
|
||||
else if (target__has_cpu(target))
|
||||
return perf_event__synthesize_threads(tool, process, machine, data_mmap);
|
||||
/* command specified */
|
||||
return 0;
|
||||
|
|
|
@ -180,10 +180,10 @@ int machine__for_each_thread(struct machine *machine,
|
|||
void *priv);
|
||||
|
||||
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
|
||||
struct perf_target *target, struct thread_map *threads,
|
||||
struct target *target, struct thread_map *threads,
|
||||
perf_event__handler_t process, bool data_mmap);
|
||||
static inline
|
||||
int machine__synthesize_threads(struct machine *machine, struct perf_target *target,
|
||||
int machine__synthesize_threads(struct machine *machine, struct target *target,
|
||||
struct thread_map *threads, bool data_mmap)
|
||||
{
|
||||
return __machine__synthesize_threads(machine, NULL, target, threads,
|
||||
|
|
|
@ -13,9 +13,9 @@
|
|||
#include <string.h>
|
||||
|
||||
|
||||
enum perf_target_errno perf_target__validate(struct perf_target *target)
|
||||
enum target_errno target__validate(struct target *target)
|
||||
{
|
||||
enum perf_target_errno ret = PERF_ERRNO_TARGET__SUCCESS;
|
||||
enum target_errno ret = TARGET_ERRNO__SUCCESS;
|
||||
|
||||
if (target->pid)
|
||||
target->tid = target->pid;
|
||||
|
@ -23,42 +23,42 @@ enum perf_target_errno perf_target__validate(struct perf_target *target)
|
|||
/* CPU and PID are mutually exclusive */
|
||||
if (target->tid && target->cpu_list) {
|
||||
target->cpu_list = NULL;
|
||||
if (ret == PERF_ERRNO_TARGET__SUCCESS)
|
||||
ret = PERF_ERRNO_TARGET__PID_OVERRIDE_CPU;
|
||||
if (ret == TARGET_ERRNO__SUCCESS)
|
||||
ret = TARGET_ERRNO__PID_OVERRIDE_CPU;
|
||||
}
|
||||
|
||||
/* UID and PID are mutually exclusive */
|
||||
if (target->tid && target->uid_str) {
|
||||
target->uid_str = NULL;
|
||||
if (ret == PERF_ERRNO_TARGET__SUCCESS)
|
||||
ret = PERF_ERRNO_TARGET__PID_OVERRIDE_UID;
|
||||
if (ret == TARGET_ERRNO__SUCCESS)
|
||||
ret = TARGET_ERRNO__PID_OVERRIDE_UID;
|
||||
}
|
||||
|
||||
/* UID and CPU are mutually exclusive */
|
||||
if (target->uid_str && target->cpu_list) {
|
||||
target->cpu_list = NULL;
|
||||
if (ret == PERF_ERRNO_TARGET__SUCCESS)
|
||||
ret = PERF_ERRNO_TARGET__UID_OVERRIDE_CPU;
|
||||
if (ret == TARGET_ERRNO__SUCCESS)
|
||||
ret = TARGET_ERRNO__UID_OVERRIDE_CPU;
|
||||
}
|
||||
|
||||
/* PID and SYSTEM are mutually exclusive */
|
||||
if (target->tid && target->system_wide) {
|
||||
target->system_wide = false;
|
||||
if (ret == PERF_ERRNO_TARGET__SUCCESS)
|
||||
ret = PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM;
|
||||
if (ret == TARGET_ERRNO__SUCCESS)
|
||||
ret = TARGET_ERRNO__PID_OVERRIDE_SYSTEM;
|
||||
}
|
||||
|
||||
/* UID and SYSTEM are mutually exclusive */
|
||||
if (target->uid_str && target->system_wide) {
|
||||
target->system_wide = false;
|
||||
if (ret == PERF_ERRNO_TARGET__SUCCESS)
|
||||
ret = PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM;
|
||||
if (ret == TARGET_ERRNO__SUCCESS)
|
||||
ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
enum perf_target_errno perf_target__parse_uid(struct perf_target *target)
|
||||
enum target_errno target__parse_uid(struct target *target)
|
||||
{
|
||||
struct passwd pwd, *result;
|
||||
char buf[1024];
|
||||
|
@ -66,7 +66,7 @@ enum perf_target_errno perf_target__parse_uid(struct perf_target *target)
|
|||
|
||||
target->uid = UINT_MAX;
|
||||
if (str == NULL)
|
||||
return PERF_ERRNO_TARGET__SUCCESS;
|
||||
return TARGET_ERRNO__SUCCESS;
|
||||
|
||||
/* Try user name first */
|
||||
getpwnam_r(str, &pwd, buf, sizeof(buf), &result);
|
||||
|
@ -79,22 +79,22 @@ enum perf_target_errno perf_target__parse_uid(struct perf_target *target)
|
|||
int uid = strtol(str, &endptr, 10);
|
||||
|
||||
if (*endptr != '\0')
|
||||
return PERF_ERRNO_TARGET__INVALID_UID;
|
||||
return TARGET_ERRNO__INVALID_UID;
|
||||
|
||||
getpwuid_r(uid, &pwd, buf, sizeof(buf), &result);
|
||||
|
||||
if (result == NULL)
|
||||
return PERF_ERRNO_TARGET__USER_NOT_FOUND;
|
||||
return TARGET_ERRNO__USER_NOT_FOUND;
|
||||
}
|
||||
|
||||
target->uid = result->pw_uid;
|
||||
return PERF_ERRNO_TARGET__SUCCESS;
|
||||
return TARGET_ERRNO__SUCCESS;
|
||||
}
|
||||
|
||||
/*
|
||||
* This must have a same ordering as the enum perf_target_errno.
|
||||
* This must have a same ordering as the enum target_errno.
|
||||
*/
|
||||
static const char *perf_target__error_str[] = {
|
||||
static const char *target__error_str[] = {
|
||||
"PID/TID switch overriding CPU",
|
||||
"PID/TID switch overriding UID",
|
||||
"UID switch overriding CPU",
|
||||
|
@ -104,7 +104,7 @@ static const char *perf_target__error_str[] = {
|
|||
"Problems obtaining information for user %s",
|
||||
};
|
||||
|
||||
int perf_target__strerror(struct perf_target *target, int errnum,
|
||||
int target__strerror(struct target *target, int errnum,
|
||||
char *buf, size_t buflen)
|
||||
{
|
||||
int idx;
|
||||
|
@ -124,21 +124,19 @@ int perf_target__strerror(struct perf_target *target, int errnum,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (errnum < __PERF_ERRNO_TARGET__START ||
|
||||
errnum >= __PERF_ERRNO_TARGET__END)
|
||||
if (errnum < __TARGET_ERRNO__START || errnum >= __TARGET_ERRNO__END)
|
||||
return -1;
|
||||
|
||||
idx = errnum - __PERF_ERRNO_TARGET__START;
|
||||
msg = perf_target__error_str[idx];
|
||||
idx = errnum - __TARGET_ERRNO__START;
|
||||
msg = target__error_str[idx];
|
||||
|
||||
switch (errnum) {
|
||||
case PERF_ERRNO_TARGET__PID_OVERRIDE_CPU
|
||||
... PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM:
|
||||
case TARGET_ERRNO__PID_OVERRIDE_CPU ... TARGET_ERRNO__UID_OVERRIDE_SYSTEM:
|
||||
snprintf(buf, buflen, "%s", msg);
|
||||
break;
|
||||
|
||||
case PERF_ERRNO_TARGET__INVALID_UID:
|
||||
case PERF_ERRNO_TARGET__USER_NOT_FOUND:
|
||||
case TARGET_ERRNO__INVALID_UID:
|
||||
case TARGET_ERRNO__USER_NOT_FOUND:
|
||||
snprintf(buf, buflen, msg, target->uid_str);
|
||||
break;
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#include <stdbool.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
struct perf_target {
|
||||
struct target {
|
||||
const char *pid;
|
||||
const char *tid;
|
||||
const char *cpu_list;
|
||||
|
@ -14,8 +14,8 @@ struct perf_target {
|
|||
bool uses_mmap;
|
||||
};
|
||||
|
||||
enum perf_target_errno {
|
||||
PERF_ERRNO_TARGET__SUCCESS = 0,
|
||||
enum target_errno {
|
||||
TARGET_ERRNO__SUCCESS = 0,
|
||||
|
||||
/*
|
||||
* Choose an arbitrary negative big number not to clash with standard
|
||||
|
@ -24,42 +24,40 @@ enum perf_target_errno {
|
|||
*
|
||||
* http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
|
||||
*/
|
||||
__PERF_ERRNO_TARGET__START = -10000,
|
||||
__TARGET_ERRNO__START = -10000,
|
||||
|
||||
/* for target__validate() */
|
||||
TARGET_ERRNO__PID_OVERRIDE_CPU = __TARGET_ERRNO__START,
|
||||
TARGET_ERRNO__PID_OVERRIDE_UID,
|
||||
TARGET_ERRNO__UID_OVERRIDE_CPU,
|
||||
TARGET_ERRNO__PID_OVERRIDE_SYSTEM,
|
||||
TARGET_ERRNO__UID_OVERRIDE_SYSTEM,
|
||||
|
||||
/* for perf_target__validate() */
|
||||
PERF_ERRNO_TARGET__PID_OVERRIDE_CPU = __PERF_ERRNO_TARGET__START,
|
||||
PERF_ERRNO_TARGET__PID_OVERRIDE_UID,
|
||||
PERF_ERRNO_TARGET__UID_OVERRIDE_CPU,
|
||||
PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM,
|
||||
PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM,
|
||||
/* for target__parse_uid() */
|
||||
TARGET_ERRNO__INVALID_UID,
|
||||
TARGET_ERRNO__USER_NOT_FOUND,
|
||||
|
||||
/* for perf_target__parse_uid() */
|
||||
PERF_ERRNO_TARGET__INVALID_UID,
|
||||
PERF_ERRNO_TARGET__USER_NOT_FOUND,
|
||||
|
||||
__PERF_ERRNO_TARGET__END,
|
||||
__TARGET_ERRNO__END,
|
||||
};
|
||||
|
||||
enum perf_target_errno perf_target__validate(struct perf_target *target);
|
||||
enum perf_target_errno perf_target__parse_uid(struct perf_target *target);
|
||||
enum target_errno target__validate(struct target *target);
|
||||
enum target_errno target__parse_uid(struct target *target);
|
||||
|
||||
int perf_target__strerror(struct perf_target *target, int errnum, char *buf,
|
||||
size_t buflen);
|
||||
int target__strerror(struct target *target, int errnum, char *buf, size_t buflen);
|
||||
|
||||
static inline bool perf_target__has_task(struct perf_target *target)
|
||||
static inline bool target__has_task(struct target *target)
|
||||
{
|
||||
return target->tid || target->pid || target->uid_str;
|
||||
}
|
||||
|
||||
static inline bool perf_target__has_cpu(struct perf_target *target)
|
||||
static inline bool target__has_cpu(struct target *target)
|
||||
{
|
||||
return target->system_wide || target->cpu_list;
|
||||
}
|
||||
|
||||
static inline bool perf_target__none(struct perf_target *target)
|
||||
static inline bool target__none(struct target *target)
|
||||
{
|
||||
return !perf_target__has_task(target) && !perf_target__has_cpu(target);
|
||||
return !target__has_task(target) && !target__has_cpu(target);
|
||||
}
|
||||
|
||||
#endif /* _PERF_TARGET_H */
|
||||
|
|
|
@ -27,7 +27,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
|
|||
float ksamples_per_sec;
|
||||
float esamples_percent;
|
||||
struct perf_record_opts *opts = &top->record_opts;
|
||||
struct perf_target *target = &opts->target;
|
||||
struct target *target = &opts->target;
|
||||
size_t ret = 0;
|
||||
|
||||
if (top->samples) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче