2019-05-27 09:55:01 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2014-04-14 19:25:29 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2014 Imagination Technologies
|
2017-10-26 03:04:33 +03:00
|
|
|
* Author: Paul Burton <paul.burton@mips.com>
|
2014-04-14 19:25:29 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/cpu_pm.h>
|
|
|
|
#include <linux/cpuidle.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
|
|
|
|
#include <asm/idle.h>
|
|
|
|
#include <asm/pm-cps.h>
|
|
|
|
|
|
|
|
/* Enumeration of the various idle states this driver may enter */
|
|
|
|
enum cps_idle_state {
|
|
|
|
STATE_WAIT = 0, /* MIPS wait instruction, coherent */
|
|
|
|
STATE_NC_WAIT, /* MIPS wait instruction, non-coherent */
|
|
|
|
STATE_CLOCK_GATED, /* Core clock gated */
|
|
|
|
STATE_POWER_GATED, /* Core power gated */
|
|
|
|
STATE_COUNT
|
|
|
|
};
|
|
|
|
|
|
|
|
static int cps_nc_enter(struct cpuidle_device *dev,
|
|
|
|
struct cpuidle_driver *drv, int index)
|
|
|
|
{
|
|
|
|
enum cps_pm_state pm_state;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At least one core must remain powered up & clocked in order for the
|
|
|
|
* system to have any hope of functioning.
|
|
|
|
*
|
|
|
|
* TODO: don't treat core 0 specially, just prevent the final core
|
|
|
|
* TODO: remap interrupt affinity temporarily
|
|
|
|
*/
|
2017-08-13 05:49:37 +03:00
|
|
|
if (cpus_are_siblings(0, dev->cpu) && (index > STATE_NC_WAIT))
|
2014-04-14 19:25:29 +04:00
|
|
|
index = STATE_NC_WAIT;
|
|
|
|
|
|
|
|
/* Select the appropriate cps_pm_state */
|
|
|
|
switch (index) {
|
|
|
|
case STATE_NC_WAIT:
|
|
|
|
pm_state = CPS_PM_NC_WAIT;
|
|
|
|
break;
|
|
|
|
case STATE_CLOCK_GATED:
|
|
|
|
pm_state = CPS_PM_CLOCK_GATED;
|
|
|
|
break;
|
|
|
|
case STATE_POWER_GATED:
|
|
|
|
pm_state = CPS_PM_POWER_GATED;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Notify listeners the CPU is about to power down */
|
|
|
|
if ((pm_state == CPS_PM_POWER_GATED) && cpu_pm_enter())
|
|
|
|
return -EINTR;
|
|
|
|
|
|
|
|
/* Enter that state */
|
|
|
|
err = cps_pm_enter_state(pm_state);
|
|
|
|
|
|
|
|
/* Notify listeners the CPU is back up */
|
|
|
|
if (pm_state == CPS_PM_POWER_GATED)
|
|
|
|
cpu_pm_exit();
|
|
|
|
|
|
|
|
return err ?: index;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct cpuidle_driver cps_driver = {
|
|
|
|
.name = "cpc_cpuidle",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
.states = {
|
|
|
|
[STATE_WAIT] = MIPS_CPUIDLE_WAIT_STATE,
|
|
|
|
[STATE_NC_WAIT] = {
|
|
|
|
.enter = cps_nc_enter,
|
|
|
|
.exit_latency = 200,
|
|
|
|
.target_residency = 450,
|
|
|
|
.name = "nc-wait",
|
|
|
|
.desc = "non-coherent MIPS wait",
|
|
|
|
},
|
|
|
|
[STATE_CLOCK_GATED] = {
|
|
|
|
.enter = cps_nc_enter,
|
|
|
|
.exit_latency = 300,
|
|
|
|
.target_residency = 700,
|
2014-11-12 18:03:50 +03:00
|
|
|
.flags = CPUIDLE_FLAG_TIMER_STOP,
|
2014-04-14 19:25:29 +04:00
|
|
|
.name = "clock-gated",
|
|
|
|
.desc = "core clock gated",
|
|
|
|
},
|
|
|
|
[STATE_POWER_GATED] = {
|
|
|
|
.enter = cps_nc_enter,
|
|
|
|
.exit_latency = 600,
|
|
|
|
.target_residency = 1000,
|
2014-11-12 18:03:50 +03:00
|
|
|
.flags = CPUIDLE_FLAG_TIMER_STOP,
|
2014-04-14 19:25:29 +04:00
|
|
|
.name = "power-gated",
|
|
|
|
.desc = "core power gated",
|
|
|
|
},
|
|
|
|
},
|
|
|
|
.state_count = STATE_COUNT,
|
|
|
|
.safe_state_index = 0,
|
|
|
|
};
|
|
|
|
|
|
|
|
static void __init cps_cpuidle_unregister(void)
|
|
|
|
{
|
|
|
|
int cpu;
|
|
|
|
struct cpuidle_device *device;
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
device = &per_cpu(cpuidle_dev, cpu);
|
|
|
|
cpuidle_unregister_device(device);
|
|
|
|
}
|
|
|
|
|
|
|
|
cpuidle_unregister_driver(&cps_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init cps_cpuidle_init(void)
|
|
|
|
{
|
2017-04-19 14:20:54 +03:00
|
|
|
int err, cpu, i;
|
2014-04-14 19:25:29 +04:00
|
|
|
struct cpuidle_device *device;
|
|
|
|
|
|
|
|
/* Detect supported states */
|
|
|
|
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
|
|
|
|
cps_driver.state_count = STATE_CLOCK_GATED + 1;
|
|
|
|
if (!cps_pm_support_state(CPS_PM_CLOCK_GATED))
|
|
|
|
cps_driver.state_count = STATE_NC_WAIT + 1;
|
|
|
|
if (!cps_pm_support_state(CPS_PM_NC_WAIT))
|
|
|
|
cps_driver.state_count = STATE_WAIT + 1;
|
|
|
|
|
|
|
|
/* Inform the user if some states are unavailable */
|
|
|
|
if (cps_driver.state_count < STATE_COUNT) {
|
|
|
|
pr_info("cpuidle-cps: limited to ");
|
|
|
|
switch (cps_driver.state_count - 1) {
|
|
|
|
case STATE_WAIT:
|
|
|
|
pr_cont("coherent wait\n");
|
|
|
|
break;
|
|
|
|
case STATE_NC_WAIT:
|
|
|
|
pr_cont("non-coherent wait\n");
|
|
|
|
break;
|
|
|
|
case STATE_CLOCK_GATED:
|
|
|
|
pr_cont("clock gating\n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the coupled flag on the appropriate states if this system
|
|
|
|
* requires it.
|
|
|
|
*/
|
|
|
|
if (coupled_coherence)
|
|
|
|
for (i = STATE_NC_WAIT; i < cps_driver.state_count; i++)
|
|
|
|
cps_driver.states[i].flags |= CPUIDLE_FLAG_COUPLED;
|
|
|
|
|
|
|
|
err = cpuidle_register_driver(&cps_driver);
|
|
|
|
if (err) {
|
|
|
|
pr_err("Failed to register CPS cpuidle driver\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
device = &per_cpu(cpuidle_dev, cpu);
|
|
|
|
device->cpu = cpu;
|
2016-09-07 12:45:20 +03:00
|
|
|
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
|
2014-04-14 19:25:29 +04:00
|
|
|
cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
err = cpuidle_register_device(device);
|
|
|
|
if (err) {
|
|
|
|
pr_err("Failed to register CPU%d cpuidle device\n",
|
|
|
|
cpu);
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err_out:
|
|
|
|
cps_cpuidle_unregister();
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
device_initcall(cps_cpuidle_init);
|