sched, timers: cleanup avenrun users
avenrun is an rough estimate so we don't have to worry about consistency of the three avenrun values. Remove the xtime lock dependency and provide a function to scale the values. Cleanup the users. [ Impact: cleanup ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org>
This commit is contained in:
Родитель
dce48a84ad
Коммит
2d02494f5a
|
@ -12,20 +12,14 @@
|
|||
|
||||
static int loadavg_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
int a, b, c;
|
||||
unsigned long seq;
|
||||
unsigned long avnrun[3];
|
||||
|
||||
do {
|
||||
seq = read_seqbegin(&xtime_lock);
|
||||
a = avenrun[0] + (FIXED_1/200);
|
||||
b = avenrun[1] + (FIXED_1/200);
|
||||
c = avenrun[2] + (FIXED_1/200);
|
||||
} while (read_seqretry(&xtime_lock, seq));
|
||||
get_avenrun(avnrun, FIXED_1/200, 0);
|
||||
|
||||
seq_printf(m, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
|
||||
LOAD_INT(a), LOAD_FRAC(a),
|
||||
LOAD_INT(b), LOAD_FRAC(b),
|
||||
LOAD_INT(c), LOAD_FRAC(c),
|
||||
seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n",
|
||||
LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
|
||||
LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
|
||||
LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
|
||||
nr_running(), nr_threads,
|
||||
task_active_pid_ns(current)->last_pid);
|
||||
return 0;
|
||||
|
|
|
@ -116,6 +116,7 @@ struct fs_struct;
|
|||
* 11 bit fractions.
|
||||
*/
|
||||
extern unsigned long avenrun[]; /* Load averages */
|
||||
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
|
||||
|
||||
#define FSHIFT 11 /* nr of bits of precision */
|
||||
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
|
||||
|
|
|
@ -2868,6 +2868,21 @@ static unsigned long calc_load_update;
|
|||
unsigned long avenrun[3];
|
||||
EXPORT_SYMBOL(avenrun);
|
||||
|
||||
/**
|
||||
* get_avenrun - get the load average array
|
||||
* @loads: pointer to dest load array
|
||||
* @offset: offset to add
|
||||
* @shift: shift count to shift the result left
|
||||
*
|
||||
* These values are estimates at best, so no need for locking.
|
||||
*/
|
||||
void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
|
||||
{
|
||||
loads[0] = (avenrun[0] + offset) << shift;
|
||||
loads[1] = (avenrun[1] + offset) << shift;
|
||||
loads[2] = (avenrun[2] + offset) << shift;
|
||||
}
|
||||
|
||||
static unsigned long
|
||||
calc_load(unsigned long load, unsigned long exp, unsigned long active)
|
||||
{
|
||||
|
|
|
@ -1356,37 +1356,17 @@ int do_sysinfo(struct sysinfo *info)
|
|||
{
|
||||
unsigned long mem_total, sav_total;
|
||||
unsigned int mem_unit, bitcount;
|
||||
unsigned long seq;
|
||||
struct timespec tp;
|
||||
|
||||
memset(info, 0, sizeof(struct sysinfo));
|
||||
|
||||
do {
|
||||
struct timespec tp;
|
||||
seq = read_seqbegin(&xtime_lock);
|
||||
ktime_get_ts(&tp);
|
||||
monotonic_to_bootbased(&tp);
|
||||
info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
|
||||
|
||||
/*
|
||||
* This is annoying. The below is the same thing
|
||||
* posix_get_clock_monotonic() does, but it wants to
|
||||
* take the lock which we want to cover the loads stuff
|
||||
* too.
|
||||
*/
|
||||
get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
|
||||
|
||||
getnstimeofday(&tp);
|
||||
tp.tv_sec += wall_to_monotonic.tv_sec;
|
||||
tp.tv_nsec += wall_to_monotonic.tv_nsec;
|
||||
monotonic_to_bootbased(&tp);
|
||||
if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
|
||||
tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
|
||||
tp.tv_sec++;
|
||||
}
|
||||
info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
|
||||
|
||||
info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
|
||||
info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
|
||||
info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
|
||||
|
||||
info->procs = nr_threads;
|
||||
} while (read_seqretry(&xtime_lock, seq));
|
||||
info->procs = nr_threads;
|
||||
|
||||
si_meminfo(info);
|
||||
si_swapinfo(info);
|
||||
|
|
Загрузка…
Ссылка в новой задаче