Remove the last of the MP_DECISION_PATCH
/drivers/cpufreq/cpufreq_ondemand.c
blob:0196b2b8e89de05bb09461280a06c014346b8ade -> blob:5e7c2f74e3379e424a804a2211c45f74f9dabbed
--- drivers/cpufreq/cpufreq_ondemand.c
+++ drivers/cpufreq/cpufreq_ondemand.c
@@ -25,17 +25,14 @@
#include <linux/input.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
-#ifdef CONFIG_EARLYSUSPEND
-#include <linux/earlysuspend.h>
-#endif
/*
* dbs is used in this file as a shortform for demandbased switching
* It helps to keep variable names smaller, simpler
*/
-#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
-#define DEF_FREQUENCY_UP_THRESHOLD (80)
+#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (26)
+#define DEF_FREQUENCY_UP_THRESHOLD (63)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (100000)
#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
@@ -59,10 +56,6 @@
#define MIN_SAMPLING_RATE_RATIO (2)
static unsigned int min_sampling_rate;
-#ifdef CONFIG_EARLYSUSPEND
-bool screen_is_on = true;
-static unsigned long stored_sampling_rate;
-#endif
#define LATENCY_MULTIPLIER (1000)
#define MIN_LATENCY_MULTIPLIER (100)
@@ -133,7 +126,9 @@ static struct dbs_tuners {
unsigned int sampling_down_factor;
int powersave_bias;
unsigned int io_is_busy;
+#if defined(__MP_DECISION_PATCH__)
unsigned int cpu_utilization;
+#endif
/* msm8960 tuners */
unsigned int freq_step;
} dbs_tuners_ins = {
@@ -142,7 +137,9 @@ static struct dbs_tuners {
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
.ignore_nice = 0,
.powersave_bias = 0,
+#if defined(__MP_DECISION_PATCH__)
.cpu_utilization = 0,
+#endif
.freq_step = DEF_FREQ_STEP,
};
@@ -311,62 +308,6 @@ static ssize_t show_powersave_bias
return snprintf(buf, PAGE_SIZE, "%d\n", dbs_tuners_ins.powersave_bias);
}
-/**
- * update_sampling_rate - update sampling rate effective immediately if needed.
- * @new_rate: new sampling rate
- *
- * If new rate is smaller than the old, simply updaing
- * dbs_tuners_int.sampling_rate might not be appropriate. For example,
- * if the original sampling_rate was 1 second and the requested new sampling
- * rate is 10 ms because the user needs immediate reaction from ondemand
- * governor, but not sure if higher frequency will be required or not,
- * then, the governor may change the sampling rate too late; up to 1 second
- * later. Thus, if we are reducing the sampling rate, we need to make the
- * new value effective immediately.
- */
-static void update_sampling_rate(unsigned int new_rate)
-{
- int cpu;
-
- dbs_tuners_ins.sampling_rate = new_rate
- = max(new_rate, min_sampling_rate);
-
- for_each_online_cpu(cpu) {
- struct cpufreq_policy *policy;
- struct cpu_dbs_info_s *dbs_info;
- unsigned long next_sampling, appointed_at;
-
- policy = cpufreq_cpu_get(cpu);
- if (!policy)
- continue;
- dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
- cpufreq_cpu_put(policy);
-
- mutex_lock(&dbs_info->timer_mutex);
-
- if (!delayed_work_pending(&dbs_info->work)) {
- mutex_unlock(&dbs_info->timer_mutex);
- continue;
- }
-
- next_sampling = jiffies + usecs_to_jiffies(new_rate);
- appointed_at = dbs_info->work.timer.expires;
-
-
- if (time_before(next_sampling, appointed_at)) {
-
- mutex_unlock(&dbs_info->timer_mutex);
- cancel_delayed_work_sync(&dbs_info->work);
- mutex_lock(&dbs_info->timer_mutex);
-
- schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
- usecs_to_jiffies(new_rate));
-
- }
- mutex_unlock(&dbs_info->timer_mutex);
- }
-}
-
static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -375,7 +316,7 @@ static ssize_t store_sampling_rate(struc
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
- update_sampling_rate(input);
+ dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
return count;
}
@@ -482,12 +423,9 @@ static ssize_t store_powersave_bias(stru
{
int input = 0;
int bypass = 0;
- int ret, cpu, reenable_timer, j;
+ int ret, cpu, reenable_timer;
struct cpu_dbs_info_s *dbs_info;
- struct cpumask cpus_timer_done;
- cpumask_clear(&cpus_timer_done);
-
ret = sscanf(buf, "%d", &input);
if (ret != 1)
@@ -520,25 +458,10 @@ static ssize_t store_powersave_bias(stru
continue;
dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
-
- for_each_cpu(j, &cpus_timer_done) {
- if (!dbs_info->cur_policy) {
- printk(KERN_ERR
- "%s Dbs policy is NULL\n",
- __func__);
- goto skip_this_cpu;
- }
- if (cpumask_test_cpu(j, dbs_info->
- cur_policy->cpus))
- goto skip_this_cpu;
- }
-
- cpumask_set_cpu(cpu, &cpus_timer_done);
if (dbs_info->cur_policy) {
/* restart dbs timer */
dbs_timer_init(dbs_info);
}
-skip_this_cpu:
unlock_policy_rwsem_write(cpu);
}
}
@@ -551,21 +474,6 @@ skip_this_cpu:
continue;
dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
-
- for_each_cpu(j, &cpus_timer_done) {
- if (!dbs_info->cur_policy) {
- printk(KERN_ERR
- "%s Dbs policy is NULL\n",
- __func__);
- goto skip_this_cpu_bypass;
- }
- if (cpumask_test_cpu(j, dbs_info->
- cur_policy->cpus))
- goto skip_this_cpu_bypass;
- }
-
- cpumask_set_cpu(cpu, &cpus_timer_done);
-
if (dbs_info->cur_policy) {
/* cpu using ondemand, cancel dbs timer */
mutex_lock(&dbs_info->timer_mutex);
@@ -578,7 +486,6 @@ skip_this_cpu:
mutex_unlock(&dbs_info->timer_mutex);
}
-skip_this_cpu_bypass:
unlock_policy_rwsem_write(cpu);
}
}
@@ -627,10 +534,12 @@ static void dbs_freq_increase(struct cpu
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
unsigned int max_load_freq;
+#if defined(__MP_DECISION_PATCH__)
/* Extrapolated load of this CPU */
unsigned int load_at_max_freq = 0;
/* Current load across this CPU */
unsigned int cur_load = 0;
+#endif
struct cpufreq_policy *policy;
unsigned int j;
@@ -640,10 +549,10 @@ static void dbs_check_cpu(struct cpu_dbs
/*
* Every sampling_rate, we check, if current idle time is less
- * than 20% (default), then we try to increase frequency
+ * than 37% (default), then we try to increase frequency
* Every sampling_rate, we look for a the lowest
* frequency which can sustain the load while keeping idle time over
- * 30%. If such a frequency exist, we try to decrease to this frequency.
+ * 63%. If such a frequency exist, we try to decrease to this frequency.
*
* Any frequency increase takes it to the maximum frequency.
* Frequency reduction happens at minimum steps of
@@ -657,7 +566,11 @@ static void dbs_check_cpu(struct cpu_dbs
struct cpu_dbs_info_s *j_dbs_info;
cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
unsigned int idle_time, wall_time, iowait_time;
+#if defined(__MP_DECISION_PATCH__)
unsigned int load_freq;
+#else
+ unsigned int load, load_freq;
+#endif
int freq_avg;
j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
@@ -707,22 +620,32 @@ static void dbs_check_cpu(struct cpu_dbs
if (unlikely(!wall_time || wall_time < idle_time))
continue;
+#if defined(__MP_DECISION_PATCH__)
cur_load = 100 * (wall_time - idle_time) / wall_time;
+#else
+ load = 100 * (wall_time - idle_time) / wall_time;
+#endif
freq_avg = __cpufreq_driver_getavg(policy, j);
if (freq_avg <= 0)
freq_avg = policy->cur;
+#if defined(__MP_DECISION_PATCH__)
load_freq = cur_load * freq_avg;
+#else
+ load_freq = load * freq_avg;
+#endif
if (load_freq > max_load_freq)
max_load_freq = load_freq;
}
+#if defined(__MP_DECISION_PATCH__)
/* calculate the scaled load across CPU */
load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq;
/* add cpu_utilization */
dbs_tuners_ins.cpu_utilization = load_at_max_freq;
cpufreq_notify_utilization(policy, dbs_tuners_ins.cpu_utilization);
+#endif
/* Check for frequency increase *//*increasing by freq_step*/
if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
@@ -1043,31 +966,6 @@ static int cpufreq_governor_dbs(struct c
return 0;
}
-#ifdef CONFIG_EARLYSUSPEND
-static void cpufreq_ondemand_early_suspend(struct early_suspend *h)
-{
- mutex_lock(&dbs_mutex);
- screen_is_on = false;
- stored_sampling_rate = min_sampling_rate;
- min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE * 6;
- mutex_unlock(&dbs_mutex);
-}
-
-static void cpufreq_ondemand_late_resume(struct early_suspend *h)
-{
- mutex_lock(&dbs_mutex);
- min_sampling_rate = stored_sampling_rate;
- screen_is_on = true;
- mutex_unlock(&dbs_mutex);
-}
-
-static struct early_suspend cpufreq_ondemand_early_suspend_info = {
- .suspend = cpufreq_ondemand_early_suspend,
- .resume = cpufreq_ondemand_late_resume,
- .level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
-};
-#endif
-
static int __init cpufreq_gov_dbs_init(void)
{
cputime64_t wall;
@@ -1103,18 +1001,12 @@ static int __init cpufreq_gov_dbs_init(v
INIT_WORK(&per_cpu(dbs_refresh_work, i), dbs_refresh_callback);
}
-#ifdef CONFIG_EARLYSUSPEND
- register_early_suspend(&cpufreq_ondemand_early_suspend_info);
-#endif
return cpufreq_register_governor(&cpufreq_gov_ondemand);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_ondemand);
-#ifdef CONFIG_EARLYSUSPEND
- unregister_early_suspend(&cpufreq_ondemand_early_suspend_info);
-#endif
destroy_workqueue(input_wq);
}