Merge back cpufreq material for 5.10.
This commit is contained in:
Коммит
fccd2f0e62
|
@ -9,6 +9,7 @@
|
|||
|
||||
/* Replace task scheduler's default frequency-invariant accounting */
|
||||
#define arch_scale_freq_capacity topology_get_freq_scale
|
||||
#define arch_scale_freq_invariant topology_scale_freq_invariant
|
||||
|
||||
/* Replace task scheduler's default cpu-invariant accounting */
|
||||
#define arch_scale_cpu_capacity topology_get_cpu_scale
|
||||
|
|
|
@ -27,6 +27,7 @@ void topology_scale_freq_tick(void);
|
|||
|
||||
/* Replace task scheduler's default frequency-invariant accounting */
|
||||
#define arch_scale_freq_capacity topology_get_freq_scale
|
||||
#define arch_scale_freq_invariant topology_scale_freq_invariant
|
||||
|
||||
/* Replace task scheduler's default cpu-invariant accounting */
|
||||
#define arch_scale_cpu_capacity topology_get_cpu_scale
|
||||
|
|
|
@ -246,6 +246,13 @@ static int __init init_amu_fie(void)
|
|||
static_branch_enable(&amu_fie_key);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the system is not fully invariant after AMU init, disable
|
||||
* partial use of counters for frequency invariance.
|
||||
*/
|
||||
if (!topology_scale_freq_invariant())
|
||||
static_branch_disable(&amu_fie_key);
|
||||
|
||||
free_valid_mask:
|
||||
free_cpumask_var(valid_cpus);
|
||||
|
||||
|
@ -253,7 +260,7 @@ free_valid_mask:
|
|||
}
|
||||
late_initcall_sync(init_amu_fie);
|
||||
|
||||
bool arch_freq_counters_available(struct cpumask *cpus)
|
||||
bool arch_freq_counters_available(const struct cpumask *cpus)
|
||||
{
|
||||
return amu_freq_invariant() &&
|
||||
cpumask_subset(cpus, amu_fie_cpus);
|
||||
|
|
|
@ -21,18 +21,27 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
__weak bool arch_freq_counters_available(struct cpumask *cpus)
|
||||
bool topology_scale_freq_invariant(void)
|
||||
{
|
||||
return cpufreq_supports_freq_invariance() ||
|
||||
arch_freq_counters_available(cpu_online_mask);
|
||||
}
|
||||
|
||||
__weak bool arch_freq_counters_available(const struct cpumask *cpus)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
|
||||
|
||||
void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
|
||||
void arch_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
|
||||
unsigned long max_freq)
|
||||
{
|
||||
unsigned long scale;
|
||||
int i;
|
||||
|
||||
if (WARN_ON_ONCE(!cur_freq || !max_freq))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the use of counters for FIE is enabled, just return as we don't
|
||||
* want to update the scale factor with information from CPUFREQ.
|
||||
|
|
|
@ -40,16 +40,8 @@ static int set_target(struct cpufreq_policy *policy, unsigned int index)
|
|||
{
|
||||
struct private_data *priv = policy->driver_data;
|
||||
unsigned long freq = policy->freq_table[index].frequency;
|
||||
int ret;
|
||||
|
||||
ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
|
||||
|
||||
if (!ret) {
|
||||
arch_set_freq_scale(policy->related_cpus, freq,
|
||||
policy->cpuinfo.max_freq);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -61,6 +61,12 @@ static struct cpufreq_driver *cpufreq_driver;
|
|||
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
|
||||
static DEFINE_RWLOCK(cpufreq_driver_lock);
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
|
||||
bool cpufreq_supports_freq_invariance(void)
|
||||
{
|
||||
return static_branch_likely(&cpufreq_freq_invariance);
|
||||
}
|
||||
|
||||
/* Flag to suspend/resume CPUFreq governors */
|
||||
static bool cpufreq_suspended;
|
||||
|
||||
|
@ -154,8 +160,9 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
|
||||
|
||||
__weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
|
||||
unsigned long max_freq)
|
||||
__weak void arch_set_freq_scale(const struct cpumask *cpus,
|
||||
unsigned long cur_freq,
|
||||
unsigned long max_freq)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(arch_set_freq_scale);
|
||||
|
@ -446,6 +453,10 @@ void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
|
|||
|
||||
cpufreq_notify_post_transition(policy, freqs, transition_failed);
|
||||
|
||||
arch_set_freq_scale(policy->related_cpus,
|
||||
policy->cur,
|
||||
policy->cpuinfo.max_freq);
|
||||
|
||||
policy->transition_ongoing = false;
|
||||
policy->transition_task = NULL;
|
||||
|
||||
|
@ -2056,9 +2067,15 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
|
|||
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||
unsigned int freq;
|
||||
|
||||
return cpufreq_driver->fast_switch(policy, target_freq);
|
||||
target_freq = clamp_val(target_freq, policy->min, policy->max);
|
||||
freq = cpufreq_driver->fast_switch(policy, target_freq);
|
||||
|
||||
arch_set_freq_scale(policy->related_cpus, freq,
|
||||
policy->cpuinfo.max_freq);
|
||||
|
||||
return freq;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
|
||||
|
||||
|
@ -2710,6 +2727,15 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
|
|||
cpufreq_driver = driver_data;
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
/*
|
||||
* Mark support for the scheduler's frequency invariance engine for
|
||||
* drivers that implement target(), target_index() or fast_switch().
|
||||
*/
|
||||
if (!cpufreq_driver->setpolicy) {
|
||||
static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
|
||||
pr_debug("supports frequency invariance");
|
||||
}
|
||||
|
||||
if (driver_data->setpolicy)
|
||||
driver_data->flags |= CPUFREQ_CONST_LOOPS;
|
||||
|
||||
|
@ -2779,6 +2805,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
|
|||
cpus_read_lock();
|
||||
subsys_interface_unregister(&cpufreq_interface);
|
||||
remove_boost_sysfs_file();
|
||||
static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
|
||||
cpuhp_remove_state_nocalls_cpuslocked(hp_online);
|
||||
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
|
|
|
@ -85,8 +85,6 @@ static int qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
|
|||
if (icc_scaling_enabled)
|
||||
qcom_cpufreq_set_bw(policy, freq);
|
||||
|
||||
arch_set_freq_scale(policy->related_cpus, freq,
|
||||
policy->cpuinfo.max_freq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -113,16 +111,11 @@ static unsigned int qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
|
|||
{
|
||||
void __iomem *perf_state_reg = policy->driver_data;
|
||||
unsigned int index;
|
||||
unsigned long freq;
|
||||
|
||||
index = policy->cached_resolved_idx;
|
||||
writel_relaxed(index, perf_state_reg);
|
||||
|
||||
freq = policy->freq_table[index].frequency;
|
||||
arch_set_freq_scale(policy->related_cpus, freq,
|
||||
policy->cpuinfo.max_freq);
|
||||
|
||||
return freq;
|
||||
return policy->freq_table[index].frequency;
|
||||
}
|
||||
|
||||
static int qcom_cpufreq_hw_read_lut(struct device *cpu_dev,
|
||||
|
|
|
@ -48,16 +48,11 @@ static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
|
|||
static int
|
||||
scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_data *priv = policy->driver_data;
|
||||
struct scmi_perf_ops *perf_ops = handle->perf_ops;
|
||||
u64 freq = policy->freq_table[index].frequency;
|
||||
|
||||
ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
|
||||
if (!ret)
|
||||
arch_set_freq_scale(policy->related_cpus, freq,
|
||||
policy->cpuinfo.max_freq);
|
||||
return ret;
|
||||
return perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
|
||||
}
|
||||
|
||||
static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
|
||||
|
@ -67,11 +62,8 @@ static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
|
|||
struct scmi_perf_ops *perf_ops = handle->perf_ops;
|
||||
|
||||
if (!perf_ops->freq_set(handle, priv->domain_id,
|
||||
target_freq * 1000, true)) {
|
||||
arch_set_freq_scale(policy->related_cpus, target_freq,
|
||||
policy->cpuinfo.max_freq);
|
||||
target_freq * 1000, true))
|
||||
return target_freq;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -47,9 +47,8 @@ static unsigned int scpi_cpufreq_get_rate(unsigned int cpu)
|
|||
static int
|
||||
scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
|
||||
{
|
||||
unsigned long freq = policy->freq_table[index].frequency;
|
||||
u64 rate = policy->freq_table[index].frequency * 1000;
|
||||
struct scpi_data *priv = policy->driver_data;
|
||||
u64 rate = freq * 1000;
|
||||
int ret;
|
||||
|
||||
ret = clk_set_rate(priv->clk, rate);
|
||||
|
@ -60,9 +59,6 @@ scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
|
|||
if (clk_get_rate(priv->clk) != rate)
|
||||
return -EIO;
|
||||
|
||||
arch_set_freq_scale(policy->related_cpus, freq,
|
||||
policy->cpuinfo.max_freq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -182,7 +182,6 @@ static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy,
|
|||
{
|
||||
u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
|
||||
unsigned int freqs_new;
|
||||
int ret;
|
||||
|
||||
cur_cluster = cpu_to_cluster(cpu);
|
||||
new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
|
||||
|
@ -197,15 +196,8 @@ static int ve_spc_cpufreq_set_target(struct cpufreq_policy *policy,
|
|||
new_cluster = A15_CLUSTER;
|
||||
}
|
||||
|
||||
ret = ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
|
||||
freqs_new);
|
||||
|
||||
if (!ret) {
|
||||
arch_set_freq_scale(policy->related_cpus, freqs_new,
|
||||
policy->cpuinfo.max_freq);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
|
||||
freqs_new);
|
||||
}
|
||||
|
||||
static inline u32 get_table_count(struct cpufreq_frequency_table *table)
|
||||
|
|
|
@ -30,7 +30,9 @@ static inline unsigned long topology_get_freq_scale(int cpu)
|
|||
return per_cpu(freq_scale, cpu);
|
||||
}
|
||||
|
||||
bool arch_freq_counters_available(struct cpumask *cpus);
|
||||
bool topology_scale_freq_invariant(void);
|
||||
|
||||
bool arch_freq_counters_available(const struct cpumask *cpus);
|
||||
|
||||
DECLARE_PER_CPU(unsigned long, thermal_pressure);
|
||||
|
||||
|
|
|
@ -217,6 +217,7 @@ void refresh_frequency_limits(struct cpufreq_policy *policy);
|
|||
void cpufreq_update_policy(unsigned int cpu);
|
||||
void cpufreq_update_limits(unsigned int cpu);
|
||||
bool have_governor_per_policy(void);
|
||||
bool cpufreq_supports_freq_invariance(void);
|
||||
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
|
||||
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
|
||||
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
|
||||
|
@ -237,6 +238,10 @@ static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline bool cpufreq_supports_freq_invariance(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
static inline void disable_cpufreq(void) { }
|
||||
#endif
|
||||
|
||||
|
@ -1006,7 +1011,8 @@ static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
|
|||
extern void arch_freq_prepare_all(void);
|
||||
extern unsigned int arch_freq_get_on_cpu(int cpu);
|
||||
|
||||
extern void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
|
||||
extern void arch_set_freq_scale(const struct cpumask *cpus,
|
||||
unsigned long cur_freq,
|
||||
unsigned long max_freq);
|
||||
|
||||
/* the following are really really optional */
|
||||
|
|
Загрузка…
Ссылка в новой задаче