This is just Venki's patch[*] for x86 ported to ia64.

* http://marc.info/?l=linux-kernel&m=120249201318159&w=2

Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
Tony Luck 2008-02-08 11:53:09 -08:00
Родитель aa91a2e900
Коммит 427639354f
1 изменённых файлов: 15 добавлений и 29 удалений

Просмотреть файл

@ -52,7 +52,6 @@
#include "sigframe.h"
void (*ia64_mark_idle)(int);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
unsigned long boot_option_idle_override = 0;
EXPORT_SYMBOL(boot_option_idle_override);
@ -254,33 +253,23 @@ static inline void play_dead(void)
}
#endif /* CONFIG_HOTPLUG_CPU */
static void do_nothing(void *unused)
{
}
/*
* cpu_idle_wait - Used to ensure that all the CPUs discard old value of
* pm_idle and update to new pm_idle value. Required while changing pm_idle
* handler on SMP systems.
*
* Caller must have changed pm_idle to the new value before the call. Old
* pm_idle value will not be used by any CPU after the return of this function.
*/
void cpu_idle_wait(void)
{
unsigned int cpu, this_cpu = get_cpu();
cpumask_t map;
cpumask_t tmp = current->cpus_allowed;
set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
put_cpu();
cpus_clear(map);
for_each_online_cpu(cpu) {
per_cpu(cpu_idle_state, cpu) = 1;
cpu_set(cpu, map);
}
__get_cpu_var(cpu_idle_state) = 0;
wmb();
do {
ssleep(1);
for_each_online_cpu(cpu) {
if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
cpu_clear(cpu, map);
}
cpus_and(map, map, cpu_online_map);
} while (!cpus_empty(map));
set_cpus_allowed(current, tmp);
smp_mb();
/* kick all the CPUs so that they exit out of pm_idle */
smp_call_function(do_nothing, NULL, 0, 1);
}
EXPORT_SYMBOL_GPL(cpu_idle_wait);
@ -308,9 +297,6 @@ cpu_idle (void)
#ifdef CONFIG_SMP
min_xtp();
#endif
if (__get_cpu_var(cpu_idle_state))
__get_cpu_var(cpu_idle_state) = 0;
rmb();
if (mark_idle)
(*mark_idle)(1);