Skip to content
This repository has been archived by the owner on Dec 28, 2020. It is now read-only.

Commit

Permalink
cpufreq: schedutil: Remove CAF hispeed logic
Browse files Browse the repository at this point in the history
We don't use this, so remove it. One less thing to disable via ramdisk.

This reverts the following commits:
3ca7d7a ("cpufreq: schedutil: Add hispeed load tunable")
7493065 ("cpufreq: schedutil: Fix race condition in computing hispeed_util")
0f34ee9 ("cpufreq: schedutil: Keep track of average policy capacity")
36faa28 ("cpufreq: schedutil: Update hispeed load condition")
af541e1 ("cpufreq: schedutil: Add freq_to_util helper function")
5573e73 ("cpufreq: schedutil: Fix hispeed_freq condition")
3024963 ("cpufreq: schedutil: Fix for CR 2040904")

[@0ctobot: Adapted from YaroST12/VIOLENT_Kernel@b2af59d for msm-4.9]
Signed-off-by: Adam W. Willis <return.of.octobot@gmail.com>
  • Loading branch information
0ctobot committed Jun 6, 2019
1 parent a09da0b commit ad4ab20
Showing 1 changed file with 3 additions and 195 deletions.
198 changes: 3 additions & 195 deletions kernel/sched/cpufreq_schedutil.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@ struct sugov_tunables {
struct gov_attr_set attr_set;
unsigned int up_rate_limit_us;
unsigned int down_rate_limit_us;
unsigned int hispeed_load;
unsigned int hispeed_freq;
bool pl;
bool iowait_boost_enable;
};
Expand All @@ -47,14 +45,8 @@ struct sugov_policy {
s64 min_rate_limit_ns;
s64 up_rate_delay_ns;
s64 down_rate_delay_ns;
u64 last_ws;
u64 curr_cycles;
u64 last_cyc_update_time;
unsigned long avg_cap;
unsigned int next_freq;
unsigned int cached_raw_freq;
unsigned long hispeed_util;
unsigned long max;

/* The next fields are only needed if fast switch cannot be used. */
struct irq_work irq_work;
Expand Down Expand Up @@ -177,7 +169,6 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
}
}

#define TARGET_LOAD 80
/**
* get_next_freq - Compute a new frequency for a given cpufreq policy.
* @sg_policy: schedutil policy object to compute the new frequency for.
Expand Down Expand Up @@ -292,88 +283,6 @@ static void sugov_iowait_boost(struct sugov_cpu *sg_cpu, unsigned long *util,
}
}

static unsigned long freq_to_util(struct sugov_policy *sg_policy,
unsigned int freq)
{
return mult_frac(sg_policy->max, freq,
sg_policy->policy->cpuinfo.max_freq);
}

#define KHZ 1000
static void sugov_track_cycles(struct sugov_policy *sg_policy,
unsigned int prev_freq,
u64 upto)
{
u64 delta_ns, cycles;

if (unlikely(!sysctl_sched_use_walt_cpu_util))
return;

/* Track cycles in current window */
delta_ns = upto - sg_policy->last_cyc_update_time;
delta_ns *= prev_freq;
do_div(delta_ns, (NSEC_PER_SEC / KHZ));
cycles = delta_ns;
sg_policy->curr_cycles += cycles;
sg_policy->last_cyc_update_time = upto;
}

static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws,
unsigned int prev_freq)
{
u64 last_ws = sg_policy->last_ws;
unsigned int avg_freq;

if (unlikely(!sysctl_sched_use_walt_cpu_util))
return;

BUG_ON(curr_ws < last_ws);
if (curr_ws <= last_ws)
return;

/* If we skipped some windows */
if (curr_ws > (last_ws + sched_ravg_window)) {
avg_freq = prev_freq;
/* Reset tracking history */
sg_policy->last_cyc_update_time = curr_ws;
} else {
sugov_track_cycles(sg_policy, prev_freq, curr_ws);
avg_freq = sg_policy->curr_cycles;
avg_freq /= sched_ravg_window / (NSEC_PER_SEC / KHZ);
}
sg_policy->avg_cap = freq_to_util(sg_policy, avg_freq);
sg_policy->curr_cycles = 0;
sg_policy->last_ws = curr_ws;
}

#define NL_RATIO 75
#define DEFAULT_HISPEED_LOAD 90
static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
unsigned long *max)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
unsigned long nl = sg_cpu->walt_load.nl;
unsigned long cpu_util = sg_cpu->util;
bool is_hiload;

if (unlikely(!sysctl_sched_use_walt_cpu_util))
return;

is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
sg_policy->tunables->hispeed_load,
100));

if (is_hiload && !is_migration)
*util = max(*util, sg_policy->hispeed_util);

if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
*util = *max;

if (sg_policy->tunables->pl)
*util = max(*util, sg_cpu->walt_load.pl);
}

#ifdef CONFIG_NO_HZ_COMMON
static bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu)
{
Expand All @@ -393,7 +302,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
unsigned long util, max, hs_util;
unsigned long util, max;
unsigned int next_f;
bool busy;

Expand All @@ -417,25 +326,12 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
next_f = policy->cpuinfo.max_freq;
} else {
sugov_get_util(&util, &max, sg_cpu->cpu);
if (sg_policy->max != max) {
sg_policy->max = max;
hs_util = freq_to_util(sg_policy,
sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
}

sg_cpu->util = util;
sg_cpu->max = max;
sg_cpu->flags = flags;
sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
sg_policy->policy->cur);
trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util,
sg_policy->avg_cap,
max, sg_cpu->walt_load.nl,
sg_cpu->walt_load.pl, flags);

sugov_iowait_boost(sg_cpu, &util, &max);
sugov_walt_adjust(sg_cpu, &util, &max);
next_f = get_next_freq(sg_policy, util, max);
/*
* Do not reduce the frequency if the CPU has not been idle
Expand Down Expand Up @@ -490,7 +386,6 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
}

sugov_iowait_boost(j_sg_cpu, &util, &max);
sugov_walt_adjust(j_sg_cpu, &util, &max);
}

return get_next_freq(sg_policy, util, max);
Expand All @@ -501,7 +396,7 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
{
struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
unsigned long util, max, hs_util;
unsigned long util, max;
unsigned int next_f;

if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
Expand All @@ -513,28 +408,13 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,

raw_spin_lock(&sg_policy->update_lock);

if (sg_policy->max != max) {
sg_policy->max = max;
hs_util = freq_to_util(sg_policy,
sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
}

sg_cpu->util = util;
sg_cpu->max = max;
sg_cpu->flags = flags;

sugov_set_iowait_boost(sg_cpu, time);
sg_cpu->last_update = time;

sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
sg_policy->policy->cur);

trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, sg_policy->avg_cap,
max, sg_cpu->walt_load.nl,
sg_cpu->walt_load.pl, flags);

if (sugov_should_update_freq(sg_policy, time)) {
if (flags & SCHED_CPUFREQ_RT_DL) {
next_f = sg_policy->policy->cpuinfo.max_freq;
Expand All @@ -553,13 +433,8 @@ static void sugov_update_shared(struct update_util_data *hook, u64 time,
static void sugov_work(struct kthread_work *work)
{
struct sugov_policy *sg_policy = container_of(work, struct sugov_policy, work);
unsigned long flags;

mutex_lock(&sg_policy->work_lock);
raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
sugov_track_cycles(sg_policy, sg_policy->policy->cur,
sched_ktime_clock());
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
__cpufreq_driver_target(sg_policy->policy, sg_policy->next_freq,
CPUFREQ_RELATION_L);
mutex_unlock(&sg_policy->work_lock);
Expand Down Expand Up @@ -669,58 +544,6 @@ static ssize_t down_rate_limit_us_store(struct gov_attr_set *attr_set,
return count;
}

static ssize_t hispeed_load_show(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);

return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_load);
}

static ssize_t hispeed_load_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);

if (kstrtouint(buf, 10, &tunables->hispeed_load))
return -EINVAL;

tunables->hispeed_load = min(100U, tunables->hispeed_load);

return count;
}

static ssize_t hispeed_freq_show(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);

return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_freq);
}

static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
const char *buf, size_t count)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
unsigned int val;
struct sugov_policy *sg_policy;
unsigned long hs_util;
unsigned long flags;

if (kstrtouint(buf, 10, &val))
return -EINVAL;

tunables->hispeed_freq = val;
list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
hs_util = freq_to_util(sg_policy,
sg_policy->tunables->hispeed_freq);
hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
sg_policy->hispeed_util = hs_util;
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
}

return count;
}

static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
{
struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
Expand Down Expand Up @@ -762,16 +585,12 @@ static ssize_t iowait_boost_enable_store(struct gov_attr_set *attr_set,

static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us);
static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us);
static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load);
static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
static struct governor_attr pl = __ATTR_RW(pl);
static struct governor_attr iowait_boost_enable = __ATTR_RW(iowait_boost_enable);

static struct attribute *sugov_attributes[] = {
&up_rate_limit_us.attr,
&down_rate_limit_us.attr,
&hispeed_load.attr,
&hispeed_freq.attr,
&pl.attr,
&iowait_boost_enable.attr,
NULL
Expand Down Expand Up @@ -886,8 +705,6 @@ static void sugov_tunables_save(struct cpufreq_policy *policy,
}

cached->pl = tunables->pl;
cached->hispeed_load = tunables->hispeed_load;
cached->hispeed_freq = tunables->hispeed_freq;
cached->up_rate_limit_us = tunables->up_rate_limit_us;
cached->down_rate_limit_us = tunables->down_rate_limit_us;
}
Expand All @@ -910,8 +727,6 @@ static void sugov_tunables_restore(struct cpufreq_policy *policy)
return;

tunables->pl = cached->pl;
tunables->hispeed_load = cached->hispeed_load;
tunables->hispeed_freq = cached->hispeed_freq;
tunables->up_rate_limit_us = cached->up_rate_limit_us;
tunables->down_rate_limit_us = cached->down_rate_limit_us;
sg_policy->up_rate_delay_ns = cached->up_rate_limit_us;
Expand Down Expand Up @@ -964,8 +779,6 @@ static int sugov_init(struct cpufreq_policy *policy)

tunables->up_rate_limit_us = LATENCY_MULTIPLIER;
tunables->down_rate_limit_us = LATENCY_MULTIPLIER;
tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
tunables->hispeed_freq = 0;
lat = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
if (lat) {
tunables->up_rate_limit_us *= lat;
Expand Down Expand Up @@ -1092,14 +905,9 @@ static void sugov_stop(struct cpufreq_policy *policy)
static void sugov_limits(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy = policy->governor_data;
unsigned long flags;

if (!policy->fast_switch_enabled) {
mutex_lock(&sg_policy->work_lock);
raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
sugov_track_cycles(sg_policy, sg_policy->policy->cur,
sched_ktime_clock());
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
cpufreq_policy_apply_limits(policy);
mutex_unlock(&sg_policy->work_lock);
}
Expand Down

0 comments on commit ad4ab20

Please sign in to comment.