Initial SPH-L710 JB Source
/drivers/cpufreq/cpufreq_ondemand.c
blob:a27a9c62175646dfc6419cee121db6bb9ec8a9dc -> blob:0196b2b8e89de05bb09461280a06c014346b8ade
--- drivers/cpufreq/cpufreq_ondemand.c
+++ drivers/cpufreq/cpufreq_ondemand.c
@@ -25,6 +25,9 @@
#include <linux/input.h>
#include <linux/workqueue.h>
#include <linux/slab.h>
+#ifdef CONFIG_EARLYSUSPEND
+#include <linux/earlysuspend.h>
+#endif
/*
* dbs is used in this file as a shortform for demandbased switching
@@ -56,6 +59,10 @@
#define MIN_SAMPLING_RATE_RATIO (2)
static unsigned int min_sampling_rate;
+#ifdef CONFIG_EARLYSUSPEND
+bool screen_is_on = true;
+static unsigned long stored_sampling_rate;
+#endif
#define LATENCY_MULTIPLIER (1000)
#define MIN_LATENCY_MULTIPLIER (100)
@@ -126,9 +133,7 @@ static struct dbs_tuners {
unsigned int sampling_down_factor;
int powersave_bias;
unsigned int io_is_busy;
-#if defined(__MP_DECISION_PATCH__)
unsigned int cpu_utilization;
-#endif
/* msm8960 tuners */
unsigned int freq_step;
} dbs_tuners_ins = {
@@ -137,9 +142,7 @@ static struct dbs_tuners {
.down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL,
.ignore_nice = 0,
.powersave_bias = 0,
-#if defined(__MP_DECISION_PATCH__)
.cpu_utilization = 0,
-#endif
.freq_step = DEF_FREQ_STEP,
};
@@ -308,6 +311,62 @@ static ssize_t show_powersave_bias
return snprintf(buf, PAGE_SIZE, "%d\n", dbs_tuners_ins.powersave_bias);
}
+/**
+ * update_sampling_rate - update sampling rate effective immediately if needed.
+ * @new_rate: new sampling rate
+ *
+ * If new rate is smaller than the old, simply updaing
+ * dbs_tuners_int.sampling_rate might not be appropriate. For example,
+ * if the original sampling_rate was 1 second and the requested new sampling
+ * rate is 10 ms because the user needs immediate reaction from ondemand
+ * governor, but not sure if higher frequency will be required or not,
+ * then, the governor may change the sampling rate too late; up to 1 second
+ * later. Thus, if we are reducing the sampling rate, we need to make the
+ * new value effective immediately.
+ */
+static void update_sampling_rate(unsigned int new_rate)
+{
+ int cpu;
+
+ dbs_tuners_ins.sampling_rate = new_rate
+ = max(new_rate, min_sampling_rate);
+
+ for_each_online_cpu(cpu) {
+ struct cpufreq_policy *policy;
+ struct cpu_dbs_info_s *dbs_info;
+ unsigned long next_sampling, appointed_at;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu);
+ cpufreq_cpu_put(policy);
+
+ mutex_lock(&dbs_info->timer_mutex);
+
+ if (!delayed_work_pending(&dbs_info->work)) {
+ mutex_unlock(&dbs_info->timer_mutex);
+ continue;
+ }
+
+ next_sampling = jiffies + usecs_to_jiffies(new_rate);
+ appointed_at = dbs_info->work.timer.expires;
+
+
+ if (time_before(next_sampling, appointed_at)) {
+
+ mutex_unlock(&dbs_info->timer_mutex);
+ cancel_delayed_work_sync(&dbs_info->work);
+ mutex_lock(&dbs_info->timer_mutex);
+
+ schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work,
+ usecs_to_jiffies(new_rate));
+
+ }
+ mutex_unlock(&dbs_info->timer_mutex);
+ }
+}
+
static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
@@ -316,7 +375,7 @@ static ssize_t store_sampling_rate(struc
ret = sscanf(buf, "%u", &input);
if (ret != 1)
return -EINVAL;
- dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
+ update_sampling_rate(input);
return count;
}
@@ -423,9 +482,12 @@ static ssize_t store_powersave_bias(stru
{
int input = 0;
int bypass = 0;
- int ret, cpu, reenable_timer;
+ int ret, cpu, reenable_timer, j;
struct cpu_dbs_info_s *dbs_info;
+ struct cpumask cpus_timer_done;
+ cpumask_clear(&cpus_timer_done);
+
ret = sscanf(buf, "%d", &input);
if (ret != 1)
@@ -458,10 +520,25 @@ static ssize_t store_powersave_bias(stru
continue;
dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+
+ for_each_cpu(j, &cpus_timer_done) {
+ if (!dbs_info->cur_policy) {
+ printk(KERN_ERR
+ "%s Dbs policy is NULL\n",
+ __func__);
+ goto skip_this_cpu;
+ }
+ if (cpumask_test_cpu(j, dbs_info->
+ cur_policy->cpus))
+ goto skip_this_cpu;
+ }
+
+ cpumask_set_cpu(cpu, &cpus_timer_done);
if (dbs_info->cur_policy) {
/* restart dbs timer */
dbs_timer_init(dbs_info);
}
+skip_this_cpu:
unlock_policy_rwsem_write(cpu);
}
}
@@ -474,6 +551,21 @@ static ssize_t store_powersave_bias(stru
continue;
dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
+
+ for_each_cpu(j, &cpus_timer_done) {
+ if (!dbs_info->cur_policy) {
+ printk(KERN_ERR
+ "%s Dbs policy is NULL\n",
+ __func__);
+ goto skip_this_cpu_bypass;
+ }
+ if (cpumask_test_cpu(j, dbs_info->
+ cur_policy->cpus))
+ goto skip_this_cpu_bypass;
+ }
+
+ cpumask_set_cpu(cpu, &cpus_timer_done);
+
if (dbs_info->cur_policy) {
/* cpu using ondemand, cancel dbs timer */
mutex_lock(&dbs_info->timer_mutex);
@@ -486,6 +578,7 @@ static ssize_t store_powersave_bias(stru
mutex_unlock(&dbs_info->timer_mutex);
}
+skip_this_cpu_bypass:
unlock_policy_rwsem_write(cpu);
}
}
@@ -534,12 +627,10 @@ static void dbs_freq_increase(struct cpu
static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
unsigned int max_load_freq;
-#if defined(__MP_DECISION_PATCH__)
/* Extrapolated load of this CPU */
unsigned int load_at_max_freq = 0;
/* Current load across this CPU */
unsigned int cur_load = 0;
-#endif
struct cpufreq_policy *policy;
unsigned int j;
@@ -566,11 +657,7 @@ static void dbs_check_cpu(struct cpu_dbs
struct cpu_dbs_info_s *j_dbs_info;
cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
unsigned int idle_time, wall_time, iowait_time;
-#if defined(__MP_DECISION_PATCH__)
unsigned int load_freq;
-#else
- unsigned int load, load_freq;
-#endif
int freq_avg;
j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
@@ -620,32 +707,22 @@ static void dbs_check_cpu(struct cpu_dbs
if (unlikely(!wall_time || wall_time < idle_time))
continue;
-#if defined(__MP_DECISION_PATCH__)
cur_load = 100 * (wall_time - idle_time) / wall_time;
-#else
- load = 100 * (wall_time - idle_time) / wall_time;
-#endif
freq_avg = __cpufreq_driver_getavg(policy, j);
if (freq_avg <= 0)
freq_avg = policy->cur;
-#if defined(__MP_DECISION_PATCH__)
load_freq = cur_load * freq_avg;
-#else
- load_freq = load * freq_avg;
-#endif
if (load_freq > max_load_freq)
max_load_freq = load_freq;
}
-#if defined(__MP_DECISION_PATCH__)
/* calculate the scaled load across CPU */
load_at_max_freq = (cur_load * policy->cur)/policy->cpuinfo.max_freq;
/* add cpu_utilization */
dbs_tuners_ins.cpu_utilization = load_at_max_freq;
cpufreq_notify_utilization(policy, dbs_tuners_ins.cpu_utilization);
-#endif
/* Check for frequency increase *//*increasing by freq_step*/
if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) {
@@ -966,6 +1043,31 @@ static int cpufreq_governor_dbs(struct c
return 0;
}
+#ifdef CONFIG_EARLYSUSPEND
+static void cpufreq_ondemand_early_suspend(struct early_suspend *h)
+{
+ mutex_lock(&dbs_mutex);
+ screen_is_on = false;
+ stored_sampling_rate = min_sampling_rate;
+ min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE * 6;
+ mutex_unlock(&dbs_mutex);
+}
+
+static void cpufreq_ondemand_late_resume(struct early_suspend *h)
+{
+ mutex_lock(&dbs_mutex);
+ min_sampling_rate = stored_sampling_rate;
+ screen_is_on = true;
+ mutex_unlock(&dbs_mutex);
+}
+
+static struct early_suspend cpufreq_ondemand_early_suspend_info = {
+ .suspend = cpufreq_ondemand_early_suspend,
+ .resume = cpufreq_ondemand_late_resume,
+ .level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
+};
+#endif
+
static int __init cpufreq_gov_dbs_init(void)
{
cputime64_t wall;
@@ -1001,12 +1103,18 @@ static int __init cpufreq_gov_dbs_init(v
INIT_WORK(&per_cpu(dbs_refresh_work, i), dbs_refresh_callback);
}
+#ifdef CONFIG_EARLYSUSPEND
+ register_early_suspend(&cpufreq_ondemand_early_suspend_info);
+#endif
return cpufreq_register_governor(&cpufreq_gov_ondemand);
}
static void __exit cpufreq_gov_dbs_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_ondemand);
+#ifdef CONFIG_EARLYSUSPEND
+ unregister_early_suspend(&cpufreq_ondemand_early_suspend_info);
+#endif
destroy_workqueue(input_wq);
}