sched/accounting: Change cpustat fields to an array
This patch changes fields in cpustat from a structure, to an u64 array. Math gets easier, and the code is more flexible. Signed-off-by: Glauber Costa <glommer@parallels.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Paul Tuner <pjt@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1322498719-2255-2-git-send-email-glommer@parallels.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Родитель
786d6dc7ae
Коммит
3292beb340
|
@ -115,21 +115,21 @@ static void appldata_get_os_data(void *data)
|
||||||
j = 0;
|
j = 0;
|
||||||
for_each_online_cpu(i) {
|
for_each_online_cpu(i) {
|
||||||
os_data->os_cpu[j].per_cpu_user =
|
os_data->os_cpu[j].per_cpu_user =
|
||||||
cputime_to_jiffies(kstat_cpu(i).cpustat.user);
|
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
|
||||||
os_data->os_cpu[j].per_cpu_nice =
|
os_data->os_cpu[j].per_cpu_nice =
|
||||||
cputime_to_jiffies(kstat_cpu(i).cpustat.nice);
|
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
|
||||||
os_data->os_cpu[j].per_cpu_system =
|
os_data->os_cpu[j].per_cpu_system =
|
||||||
cputime_to_jiffies(kstat_cpu(i).cpustat.system);
|
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
|
||||||
os_data->os_cpu[j].per_cpu_idle =
|
os_data->os_cpu[j].per_cpu_idle =
|
||||||
cputime_to_jiffies(kstat_cpu(i).cpustat.idle);
|
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
|
||||||
os_data->os_cpu[j].per_cpu_irq =
|
os_data->os_cpu[j].per_cpu_irq =
|
||||||
cputime_to_jiffies(kstat_cpu(i).cpustat.irq);
|
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
|
||||||
os_data->os_cpu[j].per_cpu_softirq =
|
os_data->os_cpu[j].per_cpu_softirq =
|
||||||
cputime_to_jiffies(kstat_cpu(i).cpustat.softirq);
|
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
|
||||||
os_data->os_cpu[j].per_cpu_iowait =
|
os_data->os_cpu[j].per_cpu_iowait =
|
||||||
cputime_to_jiffies(kstat_cpu(i).cpustat.iowait);
|
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
|
||||||
os_data->os_cpu[j].per_cpu_steal =
|
os_data->os_cpu[j].per_cpu_steal =
|
||||||
cputime_to_jiffies(kstat_cpu(i).cpustat.steal);
|
cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
|
||||||
os_data->os_cpu[j].cpu_id = i;
|
os_data->os_cpu[j].cpu_id = i;
|
||||||
j++;
|
j++;
|
||||||
}
|
}
|
||||||
|
|
|
@ -218,7 +218,7 @@ static inline void fpu_fxsave(struct fpu *fpu)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
#define safe_address (__per_cpu_offset[0])
|
#define safe_address (__per_cpu_offset[0])
|
||||||
#else
|
#else
|
||||||
#define safe_address (kstat_cpu(0).cpustat.user)
|
#define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER])
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -95,27 +95,26 @@ static struct dbs_tuners {
|
||||||
.freq_step = 5,
|
.freq_step = 5,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
|
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
|
||||||
cputime64_t *wall)
|
|
||||||
{
|
{
|
||||||
cputime64_t idle_time;
|
u64 idle_time;
|
||||||
cputime64_t cur_wall_time;
|
cputime64_t cur_wall_time;
|
||||||
cputime64_t busy_time;
|
u64 busy_time;
|
||||||
|
|
||||||
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
|
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
|
||||||
busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
|
busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER] +
|
||||||
kstat_cpu(cpu).cpustat.system);
|
kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
|
||||||
|
|
||||||
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
|
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
|
||||||
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
|
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
|
||||||
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
|
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
|
||||||
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
|
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
|
||||||
|
|
||||||
idle_time = cputime64_sub(cur_wall_time, busy_time);
|
idle_time = cputime64_sub(cur_wall_time, busy_time);
|
||||||
if (wall)
|
if (wall)
|
||||||
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
|
*wall = jiffies_to_usecs(cur_wall_time);
|
||||||
|
|
||||||
return (cputime64_t)jiffies_to_usecs(idle_time);
|
return jiffies_to_usecs(idle_time);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
|
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
|
||||||
|
@ -272,7 +271,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
|
||||||
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
|
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
|
||||||
&dbs_info->prev_cpu_wall);
|
&dbs_info->prev_cpu_wall);
|
||||||
if (dbs_tuners_ins.ignore_nice)
|
if (dbs_tuners_ins.ignore_nice)
|
||||||
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
|
dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||||
}
|
}
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -362,11 +361,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
||||||
j_dbs_info->prev_cpu_idle = cur_idle_time;
|
j_dbs_info->prev_cpu_idle = cur_idle_time;
|
||||||
|
|
||||||
if (dbs_tuners_ins.ignore_nice) {
|
if (dbs_tuners_ins.ignore_nice) {
|
||||||
cputime64_t cur_nice;
|
u64 cur_nice;
|
||||||
unsigned long cur_nice_jiffies;
|
unsigned long cur_nice_jiffies;
|
||||||
|
|
||||||
cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
|
cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
|
||||||
j_dbs_info->prev_cpu_nice);
|
j_dbs_info->prev_cpu_nice;
|
||||||
/*
|
/*
|
||||||
* Assumption: nice time between sampling periods will
|
* Assumption: nice time between sampling periods will
|
||||||
* be less than 2^32 jiffies for 32 bit sys
|
* be less than 2^32 jiffies for 32 bit sys
|
||||||
|
@ -374,7 +373,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
||||||
cur_nice_jiffies = (unsigned long)
|
cur_nice_jiffies = (unsigned long)
|
||||||
cputime64_to_jiffies64(cur_nice);
|
cputime64_to_jiffies64(cur_nice);
|
||||||
|
|
||||||
j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
|
j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||||
idle_time += jiffies_to_usecs(cur_nice_jiffies);
|
idle_time += jiffies_to_usecs(cur_nice_jiffies);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -501,10 +500,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||||
|
|
||||||
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
|
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
|
||||||
&j_dbs_info->prev_cpu_wall);
|
&j_dbs_info->prev_cpu_wall);
|
||||||
if (dbs_tuners_ins.ignore_nice) {
|
if (dbs_tuners_ins.ignore_nice)
|
||||||
j_dbs_info->prev_cpu_nice =
|
j_dbs_info->prev_cpu_nice =
|
||||||
kstat_cpu(j).cpustat.nice;
|
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||||
}
|
|
||||||
}
|
}
|
||||||
this_dbs_info->down_skip = 0;
|
this_dbs_info->down_skip = 0;
|
||||||
this_dbs_info->requested_freq = policy->cur;
|
this_dbs_info->requested_freq = policy->cur;
|
||||||
|
|
|
@ -119,27 +119,26 @@ static struct dbs_tuners {
|
||||||
.powersave_bias = 0,
|
.powersave_bias = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
|
static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
|
||||||
cputime64_t *wall)
|
|
||||||
{
|
{
|
||||||
cputime64_t idle_time;
|
u64 idle_time;
|
||||||
cputime64_t cur_wall_time;
|
cputime64_t cur_wall_time;
|
||||||
cputime64_t busy_time;
|
u64 busy_time;
|
||||||
|
|
||||||
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
|
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
|
||||||
busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
|
busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER] +
|
||||||
kstat_cpu(cpu).cpustat.system);
|
kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
|
||||||
|
|
||||||
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
|
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
|
||||||
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
|
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
|
||||||
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
|
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
|
||||||
busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
|
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
|
||||||
|
|
||||||
idle_time = cputime64_sub(cur_wall_time, busy_time);
|
idle_time = cputime64_sub(cur_wall_time, busy_time);
|
||||||
if (wall)
|
if (wall)
|
||||||
*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
|
*wall = jiffies_to_usecs(cur_wall_time);
|
||||||
|
|
||||||
return (cputime64_t)jiffies_to_usecs(idle_time);
|
return jiffies_to_usecs(idle_time);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
|
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
|
||||||
|
@ -345,7 +344,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
|
||||||
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
|
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
|
||||||
&dbs_info->prev_cpu_wall);
|
&dbs_info->prev_cpu_wall);
|
||||||
if (dbs_tuners_ins.ignore_nice)
|
if (dbs_tuners_ins.ignore_nice)
|
||||||
dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
|
dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||||
|
|
||||||
}
|
}
|
||||||
return count;
|
return count;
|
||||||
|
@ -455,11 +454,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
||||||
j_dbs_info->prev_cpu_iowait = cur_iowait_time;
|
j_dbs_info->prev_cpu_iowait = cur_iowait_time;
|
||||||
|
|
||||||
if (dbs_tuners_ins.ignore_nice) {
|
if (dbs_tuners_ins.ignore_nice) {
|
||||||
cputime64_t cur_nice;
|
u64 cur_nice;
|
||||||
unsigned long cur_nice_jiffies;
|
unsigned long cur_nice_jiffies;
|
||||||
|
|
||||||
cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
|
cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
|
||||||
j_dbs_info->prev_cpu_nice);
|
j_dbs_info->prev_cpu_nice;
|
||||||
/*
|
/*
|
||||||
* Assumption: nice time between sampling periods will
|
* Assumption: nice time between sampling periods will
|
||||||
* be less than 2^32 jiffies for 32 bit sys
|
* be less than 2^32 jiffies for 32 bit sys
|
||||||
|
@ -467,7 +466,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
||||||
cur_nice_jiffies = (unsigned long)
|
cur_nice_jiffies = (unsigned long)
|
||||||
cputime64_to_jiffies64(cur_nice);
|
cputime64_to_jiffies64(cur_nice);
|
||||||
|
|
||||||
j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
|
j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||||
idle_time += jiffies_to_usecs(cur_nice_jiffies);
|
idle_time += jiffies_to_usecs(cur_nice_jiffies);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -646,10 +645,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||||
|
|
||||||
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
|
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
|
||||||
&j_dbs_info->prev_cpu_wall);
|
&j_dbs_info->prev_cpu_wall);
|
||||||
if (dbs_tuners_ins.ignore_nice) {
|
if (dbs_tuners_ins.ignore_nice)
|
||||||
j_dbs_info->prev_cpu_nice =
|
j_dbs_info->prev_cpu_nice =
|
||||||
kstat_cpu(j).cpustat.nice;
|
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
|
||||||
}
|
|
||||||
}
|
}
|
||||||
this_dbs_info->cpu = cpu;
|
this_dbs_info->cpu = cpu;
|
||||||
this_dbs_info->rate_mult = 1;
|
this_dbs_info->rate_mult = 1;
|
||||||
|
|
|
@ -81,13 +81,13 @@ static int rackmeter_ignore_nice;
|
||||||
*/
|
*/
|
||||||
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
|
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
|
||||||
{
|
{
|
||||||
cputime64_t retval;
|
u64 retval;
|
||||||
|
|
||||||
retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
|
retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] +
|
||||||
kstat_cpu(cpu).cpustat.iowait);
|
kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
|
||||||
|
|
||||||
if (rackmeter_ignore_nice)
|
if (rackmeter_ignore_nice)
|
||||||
retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
|
retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
|
||||||
|
|
||||||
return retval;
|
return retval;
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,29 +22,27 @@
|
||||||
#define arch_idle_time(cpu) 0
|
#define arch_idle_time(cpu) 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static cputime64_t get_idle_time(int cpu)
|
static u64 get_idle_time(int cpu)
|
||||||
{
|
{
|
||||||
u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
|
u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
|
||||||
cputime64_t idle;
|
|
||||||
|
|
||||||
if (idle_time == -1ULL) {
|
if (idle_time == -1ULL) {
|
||||||
/* !NO_HZ so we can rely on cpustat.idle */
|
/* !NO_HZ so we can rely on cpustat.idle */
|
||||||
idle = kstat_cpu(cpu).cpustat.idle;
|
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
|
||||||
idle = cputime64_add(idle, arch_idle_time(cpu));
|
idle += arch_idle_time(cpu);
|
||||||
} else
|
} else
|
||||||
idle = usecs_to_cputime(idle_time);
|
idle = usecs_to_cputime(idle_time);
|
||||||
|
|
||||||
return idle;
|
return idle;
|
||||||
}
|
}
|
||||||
|
|
||||||
static cputime64_t get_iowait_time(int cpu)
|
static u64 get_iowait_time(int cpu)
|
||||||
{
|
{
|
||||||
u64 iowait_time = get_cpu_iowait_time_us(cpu, NULL);
|
u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL);
|
||||||
cputime64_t iowait;
|
|
||||||
|
|
||||||
if (iowait_time == -1ULL)
|
if (iowait_time == -1ULL)
|
||||||
/* !NO_HZ so we can rely on cpustat.iowait */
|
/* !NO_HZ so we can rely on cpustat.iowait */
|
||||||
iowait = kstat_cpu(cpu).cpustat.iowait;
|
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
|
||||||
else
|
else
|
||||||
iowait = usecs_to_cputime(iowait_time);
|
iowait = usecs_to_cputime(iowait_time);
|
||||||
|
|
||||||
|
@ -55,33 +53,30 @@ static int show_stat(struct seq_file *p, void *v)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j;
|
||||||
unsigned long jif;
|
unsigned long jif;
|
||||||
cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
|
u64 user, nice, system, idle, iowait, irq, softirq, steal;
|
||||||
cputime64_t guest, guest_nice;
|
u64 guest, guest_nice;
|
||||||
u64 sum = 0;
|
u64 sum = 0;
|
||||||
u64 sum_softirq = 0;
|
u64 sum_softirq = 0;
|
||||||
unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
|
unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
|
||||||
struct timespec boottime;
|
struct timespec boottime;
|
||||||
|
|
||||||
user = nice = system = idle = iowait =
|
user = nice = system = idle = iowait =
|
||||||
irq = softirq = steal = cputime64_zero;
|
irq = softirq = steal = 0;
|
||||||
guest = guest_nice = cputime64_zero;
|
guest = guest_nice = 0;
|
||||||
getboottime(&boottime);
|
getboottime(&boottime);
|
||||||
jif = boottime.tv_sec;
|
jif = boottime.tv_sec;
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
user = cputime64_add(user, kstat_cpu(i).cpustat.user);
|
user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
|
||||||
nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
|
nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
|
||||||
system = cputime64_add(system, kstat_cpu(i).cpustat.system);
|
system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
|
||||||
idle = cputime64_add(idle, get_idle_time(i));
|
idle += get_idle_time(i);
|
||||||
iowait = cputime64_add(iowait, get_iowait_time(i));
|
iowait += get_iowait_time(i);
|
||||||
irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
|
irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
|
||||||
softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
|
softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
|
||||||
steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
|
steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
|
||||||
guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
|
guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
|
||||||
guest_nice = cputime64_add(guest_nice,
|
guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
|
||||||
kstat_cpu(i).cpustat.guest_nice);
|
|
||||||
sum += kstat_cpu_irqs_sum(i);
|
|
||||||
sum += arch_irq_stat_cpu(i);
|
|
||||||
|
|
||||||
for (j = 0; j < NR_SOFTIRQS; j++) {
|
for (j = 0; j < NR_SOFTIRQS; j++) {
|
||||||
unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
|
unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
|
||||||
|
@ -106,16 +101,16 @@ static int show_stat(struct seq_file *p, void *v)
|
||||||
(unsigned long long)cputime64_to_clock_t(guest_nice));
|
(unsigned long long)cputime64_to_clock_t(guest_nice));
|
||||||
for_each_online_cpu(i) {
|
for_each_online_cpu(i) {
|
||||||
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
|
/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
|
||||||
user = kstat_cpu(i).cpustat.user;
|
user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
|
||||||
nice = kstat_cpu(i).cpustat.nice;
|
nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
|
||||||
system = kstat_cpu(i).cpustat.system;
|
system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
|
||||||
idle = get_idle_time(i);
|
idle = get_idle_time(i);
|
||||||
iowait = get_iowait_time(i);
|
iowait = get_iowait_time(i);
|
||||||
irq = kstat_cpu(i).cpustat.irq;
|
irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
|
||||||
softirq = kstat_cpu(i).cpustat.softirq;
|
softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
|
||||||
steal = kstat_cpu(i).cpustat.steal;
|
steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
|
||||||
guest = kstat_cpu(i).cpustat.guest;
|
guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
|
||||||
guest_nice = kstat_cpu(i).cpustat.guest_nice;
|
guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
|
||||||
seq_printf(p,
|
seq_printf(p,
|
||||||
"cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
|
"cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu "
|
||||||
"%llu\n",
|
"%llu\n",
|
||||||
|
|
|
@ -12,10 +12,10 @@ static int uptime_proc_show(struct seq_file *m, void *v)
|
||||||
struct timespec uptime;
|
struct timespec uptime;
|
||||||
struct timespec idle;
|
struct timespec idle;
|
||||||
int i;
|
int i;
|
||||||
cputime_t idletime = cputime_zero;
|
u64 idletime = 0;
|
||||||
|
|
||||||
for_each_possible_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle);
|
idletime += kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
|
||||||
|
|
||||||
do_posix_clock_monotonic_gettime(&uptime);
|
do_posix_clock_monotonic_gettime(&uptime);
|
||||||
monotonic_to_bootbased(&uptime);
|
monotonic_to_bootbased(&uptime);
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
#include <asm/irq.h>
|
#include <asm/irq.h>
|
||||||
#include <asm/cputime.h>
|
#include <asm/cputime.h>
|
||||||
|
|
||||||
|
@ -15,21 +16,25 @@
|
||||||
* used by rstatd/perfmeter
|
* used by rstatd/perfmeter
|
||||||
*/
|
*/
|
||||||
|
|
||||||
struct cpu_usage_stat {
|
enum cpu_usage_stat {
|
||||||
cputime64_t user;
|
CPUTIME_USER,
|
||||||
cputime64_t nice;
|
CPUTIME_NICE,
|
||||||
cputime64_t system;
|
CPUTIME_SYSTEM,
|
||||||
cputime64_t softirq;
|
CPUTIME_SOFTIRQ,
|
||||||
cputime64_t irq;
|
CPUTIME_IRQ,
|
||||||
cputime64_t idle;
|
CPUTIME_IDLE,
|
||||||
cputime64_t iowait;
|
CPUTIME_IOWAIT,
|
||||||
cputime64_t steal;
|
CPUTIME_STEAL,
|
||||||
cputime64_t guest;
|
CPUTIME_GUEST,
|
||||||
cputime64_t guest_nice;
|
CPUTIME_GUEST_NICE,
|
||||||
|
NR_STATS,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct kernel_cpustat {
|
||||||
|
u64 cpustat[NR_STATS];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kernel_stat {
|
struct kernel_stat {
|
||||||
struct cpu_usage_stat cpustat;
|
|
||||||
#ifndef CONFIG_GENERIC_HARDIRQS
|
#ifndef CONFIG_GENERIC_HARDIRQS
|
||||||
unsigned int irqs[NR_IRQS];
|
unsigned int irqs[NR_IRQS];
|
||||||
#endif
|
#endif
|
||||||
|
@ -38,10 +43,13 @@ struct kernel_stat {
|
||||||
};
|
};
|
||||||
|
|
||||||
DECLARE_PER_CPU(struct kernel_stat, kstat);
|
DECLARE_PER_CPU(struct kernel_stat, kstat);
|
||||||
|
DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
|
||||||
|
|
||||||
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
|
|
||||||
/* Must have preemption disabled for this to be meaningful. */
|
/* Must have preemption disabled for this to be meaningful. */
|
||||||
#define kstat_this_cpu __get_cpu_var(kstat)
|
#define kstat_this_cpu (&__get_cpu_var(kstat))
|
||||||
|
#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
|
||||||
|
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
|
||||||
|
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
|
||||||
|
|
||||||
extern unsigned long long nr_context_switches(void);
|
extern unsigned long long nr_context_switches(void);
|
||||||
|
|
||||||
|
|
|
@ -896,14 +896,14 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
|
||||||
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
|
||||||
static int irqtime_account_hi_update(void)
|
static int irqtime_account_hi_update(void)
|
||||||
{
|
{
|
||||||
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
u64 *cpustat = kcpustat_this_cpu->cpustat;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u64 latest_ns;
|
u64 latest_ns;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
latest_ns = this_cpu_read(cpu_hardirq_time);
|
latest_ns = this_cpu_read(cpu_hardirq_time);
|
||||||
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->irq))
|
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat[CPUTIME_IRQ]))
|
||||||
ret = 1;
|
ret = 1;
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -911,14 +911,14 @@ static int irqtime_account_hi_update(void)
|
||||||
|
|
||||||
static int irqtime_account_si_update(void)
|
static int irqtime_account_si_update(void)
|
||||||
{
|
{
|
||||||
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
u64 *cpustat = kcpustat_this_cpu->cpustat;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u64 latest_ns;
|
u64 latest_ns;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
latest_ns = this_cpu_read(cpu_softirq_time);
|
latest_ns = this_cpu_read(cpu_softirq_time);
|
||||||
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat->softirq))
|
if (cputime64_gt(nsecs_to_cputime64(latest_ns), cpustat[CPUTIME_SOFTIRQ]))
|
||||||
ret = 1;
|
ret = 1;
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -2500,8 +2500,10 @@ unlock:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
DEFINE_PER_CPU(struct kernel_stat, kstat);
|
DEFINE_PER_CPU(struct kernel_stat, kstat);
|
||||||
|
DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
|
||||||
|
|
||||||
EXPORT_PER_CPU_SYMBOL(kstat);
|
EXPORT_PER_CPU_SYMBOL(kstat);
|
||||||
|
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return any ns on the sched_clock that have not yet been accounted in
|
* Return any ns on the sched_clock that have not yet been accounted in
|
||||||
|
@ -2563,8 +2565,9 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
||||||
void account_user_time(struct task_struct *p, cputime_t cputime,
|
void account_user_time(struct task_struct *p, cputime_t cputime,
|
||||||
cputime_t cputime_scaled)
|
cputime_t cputime_scaled)
|
||||||
{
|
{
|
||||||
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
u64 *cpustat = kcpustat_this_cpu->cpustat;
|
||||||
cputime64_t tmp;
|
u64 tmp;
|
||||||
|
int index;
|
||||||
|
|
||||||
/* Add user time to process. */
|
/* Add user time to process. */
|
||||||
p->utime = cputime_add(p->utime, cputime);
|
p->utime = cputime_add(p->utime, cputime);
|
||||||
|
@ -2573,10 +2576,9 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
|
||||||
|
|
||||||
/* Add user time to cpustat. */
|
/* Add user time to cpustat. */
|
||||||
tmp = cputime_to_cputime64(cputime);
|
tmp = cputime_to_cputime64(cputime);
|
||||||
if (TASK_NICE(p) > 0)
|
|
||||||
cpustat->nice = cputime64_add(cpustat->nice, tmp);
|
index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
|
||||||
else
|
cpustat[index] += tmp;
|
||||||
cpustat->user = cputime64_add(cpustat->user, tmp);
|
|
||||||
|
|
||||||
cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
|
cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
|
||||||
/* Account for user time used */
|
/* Account for user time used */
|
||||||
|
@ -2592,8 +2594,8 @@ void account_user_time(struct task_struct *p, cputime_t cputime,
|
||||||
static void account_guest_time(struct task_struct *p, cputime_t cputime,
|
static void account_guest_time(struct task_struct *p, cputime_t cputime,
|
||||||
cputime_t cputime_scaled)
|
cputime_t cputime_scaled)
|
||||||
{
|
{
|
||||||
cputime64_t tmp;
|
u64 tmp;
|
||||||
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
u64 *cpustat = kcpustat_this_cpu->cpustat;
|
||||||
|
|
||||||
tmp = cputime_to_cputime64(cputime);
|
tmp = cputime_to_cputime64(cputime);
|
||||||
|
|
||||||
|
@ -2605,11 +2607,11 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
|
||||||
|
|
||||||
/* Add guest time to cpustat. */
|
/* Add guest time to cpustat. */
|
||||||
if (TASK_NICE(p) > 0) {
|
if (TASK_NICE(p) > 0) {
|
||||||
cpustat->nice = cputime64_add(cpustat->nice, tmp);
|
cpustat[CPUTIME_NICE] += tmp;
|
||||||
cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
|
cpustat[CPUTIME_GUEST_NICE] += tmp;
|
||||||
} else {
|
} else {
|
||||||
cpustat->user = cputime64_add(cpustat->user, tmp);
|
cpustat[CPUTIME_USER] += tmp;
|
||||||
cpustat->guest = cputime64_add(cpustat->guest, tmp);
|
cpustat[CPUTIME_GUEST] += tmp;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2622,9 +2624,10 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime,
|
||||||
*/
|
*/
|
||||||
static inline
|
static inline
|
||||||
void __account_system_time(struct task_struct *p, cputime_t cputime,
|
void __account_system_time(struct task_struct *p, cputime_t cputime,
|
||||||
cputime_t cputime_scaled, cputime64_t *target_cputime64)
|
cputime_t cputime_scaled, int index)
|
||||||
{
|
{
|
||||||
cputime64_t tmp = cputime_to_cputime64(cputime);
|
u64 tmp = cputime_to_cputime64(cputime);
|
||||||
|
u64 *cpustat = kcpustat_this_cpu->cpustat;
|
||||||
|
|
||||||
/* Add system time to process. */
|
/* Add system time to process. */
|
||||||
p->stime = cputime_add(p->stime, cputime);
|
p->stime = cputime_add(p->stime, cputime);
|
||||||
|
@ -2632,7 +2635,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
|
||||||
account_group_system_time(p, cputime);
|
account_group_system_time(p, cputime);
|
||||||
|
|
||||||
/* Add system time to cpustat. */
|
/* Add system time to cpustat. */
|
||||||
*target_cputime64 = cputime64_add(*target_cputime64, tmp);
|
cpustat[index] += tmp;
|
||||||
cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
|
cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
|
||||||
|
|
||||||
/* Account for system time used */
|
/* Account for system time used */
|
||||||
|
@ -2649,8 +2652,7 @@ void __account_system_time(struct task_struct *p, cputime_t cputime,
|
||||||
void account_system_time(struct task_struct *p, int hardirq_offset,
|
void account_system_time(struct task_struct *p, int hardirq_offset,
|
||||||
cputime_t cputime, cputime_t cputime_scaled)
|
cputime_t cputime, cputime_t cputime_scaled)
|
||||||
{
|
{
|
||||||
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
int index;
|
||||||
cputime64_t *target_cputime64;
|
|
||||||
|
|
||||||
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
|
if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
|
||||||
account_guest_time(p, cputime, cputime_scaled);
|
account_guest_time(p, cputime, cputime_scaled);
|
||||||
|
@ -2658,13 +2660,13 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hardirq_count() - hardirq_offset)
|
if (hardirq_count() - hardirq_offset)
|
||||||
target_cputime64 = &cpustat->irq;
|
index = CPUTIME_IRQ;
|
||||||
else if (in_serving_softirq())
|
else if (in_serving_softirq())
|
||||||
target_cputime64 = &cpustat->softirq;
|
index = CPUTIME_SOFTIRQ;
|
||||||
else
|
else
|
||||||
target_cputime64 = &cpustat->system;
|
index = CPUTIME_SYSTEM;
|
||||||
|
|
||||||
__account_system_time(p, cputime, cputime_scaled, target_cputime64);
|
__account_system_time(p, cputime, cputime_scaled, index);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2673,10 +2675,10 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
|
||||||
*/
|
*/
|
||||||
void account_steal_time(cputime_t cputime)
|
void account_steal_time(cputime_t cputime)
|
||||||
{
|
{
|
||||||
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
u64 *cpustat = kcpustat_this_cpu->cpustat;
|
||||||
cputime64_t cputime64 = cputime_to_cputime64(cputime);
|
u64 cputime64 = cputime_to_cputime64(cputime);
|
||||||
|
|
||||||
cpustat->steal = cputime64_add(cpustat->steal, cputime64);
|
cpustat[CPUTIME_STEAL] += cputime64;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2685,14 +2687,14 @@ void account_steal_time(cputime_t cputime)
|
||||||
*/
|
*/
|
||||||
void account_idle_time(cputime_t cputime)
|
void account_idle_time(cputime_t cputime)
|
||||||
{
|
{
|
||||||
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
u64 *cpustat = kcpustat_this_cpu->cpustat;
|
||||||
cputime64_t cputime64 = cputime_to_cputime64(cputime);
|
u64 cputime64 = cputime_to_cputime64(cputime);
|
||||||
struct rq *rq = this_rq();
|
struct rq *rq = this_rq();
|
||||||
|
|
||||||
if (atomic_read(&rq->nr_iowait) > 0)
|
if (atomic_read(&rq->nr_iowait) > 0)
|
||||||
cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
|
cpustat[CPUTIME_IOWAIT] += cputime64;
|
||||||
else
|
else
|
||||||
cpustat->idle = cputime64_add(cpustat->idle, cputime64);
|
cpustat[CPUTIME_IDLE] += cputime64;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline bool steal_account_process_tick(void)
|
static __always_inline bool steal_account_process_tick(void)
|
||||||
|
@ -2742,16 +2744,16 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
||||||
struct rq *rq)
|
struct rq *rq)
|
||||||
{
|
{
|
||||||
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
|
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
|
||||||
cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy);
|
u64 tmp = cputime_to_cputime64(cputime_one_jiffy);
|
||||||
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
|
u64 *cpustat = kcpustat_this_cpu->cpustat;
|
||||||
|
|
||||||
if (steal_account_process_tick())
|
if (steal_account_process_tick())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (irqtime_account_hi_update()) {
|
if (irqtime_account_hi_update()) {
|
||||||
cpustat->irq = cputime64_add(cpustat->irq, tmp);
|
cpustat[CPUTIME_IRQ] += tmp;
|
||||||
} else if (irqtime_account_si_update()) {
|
} else if (irqtime_account_si_update()) {
|
||||||
cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
|
cpustat[CPUTIME_SOFTIRQ] += tmp;
|
||||||
} else if (this_cpu_ksoftirqd() == p) {
|
} else if (this_cpu_ksoftirqd() == p) {
|
||||||
/*
|
/*
|
||||||
* ksoftirqd time do not get accounted in cpu_softirq_time.
|
* ksoftirqd time do not get accounted in cpu_softirq_time.
|
||||||
|
@ -2759,7 +2761,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
||||||
* Also, p->stime needs to be updated for ksoftirqd.
|
* Also, p->stime needs to be updated for ksoftirqd.
|
||||||
*/
|
*/
|
||||||
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
|
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
|
||||||
&cpustat->softirq);
|
CPUTIME_SOFTIRQ);
|
||||||
} else if (user_tick) {
|
} else if (user_tick) {
|
||||||
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
||||||
} else if (p == rq->idle) {
|
} else if (p == rq->idle) {
|
||||||
|
@ -2768,7 +2770,7 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
|
||||||
account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled);
|
||||||
} else {
|
} else {
|
||||||
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
|
__account_system_time(p, cputime_one_jiffy, one_jiffy_scaled,
|
||||||
&cpustat->system);
|
CPUTIME_SYSTEM);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче