PATCH: apply some patches from my CM kernel (that came from CAF, not CM)
/arch/arm/mach-msm/cpufreq.c
blob:8fc50e1c19b79c2be04cf26a8a3b29f400ab0899 -> blob:1d832be4a6b7e70584620289b1d2f90f640a0214
--- arch/arm/mach-msm/cpufreq.c
+++ arch/arm/mach-msm/cpufreq.c
@@ -19,6 +19,7 @@
#include <linux/earlysuspend.h>
#include <linux/init.h>
+#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
@@ -27,6 +28,7 @@
#include <linux/sched.h>
#include <linux/suspend.h>
#include <mach/socinfo.h>
+#include <mach/cpufreq.h>
#include "acpuclock.h"
@@ -52,10 +54,36 @@ static DEFINE_PER_CPU(struct cpufreq_sus
static int override_cpu;
+struct cpu_freq {
+ uint32_t max;
+ uint32_t min;
+ uint32_t allowed_max;
+ uint32_t allowed_min;
+ uint32_t limits_init;
+};
+
+static DEFINE_PER_CPU(struct cpu_freq, cpu_freq_info);
+
static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq)
{
int ret = 0;
+ int saved_sched_policy = -EINVAL;
+ int saved_sched_rt_prio = -EINVAL;
struct cpufreq_freqs freqs;
+ struct cpu_freq *limit = &per_cpu(cpu_freq_info, policy->cpu);
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+ if (limit->limits_init) {
+ if (new_freq > limit->allowed_max) {
+ new_freq = limit->allowed_max;
+ pr_debug("max: limiting freq to %d\n", new_freq);
+ }
+
+ if (new_freq < limit->allowed_min) {
+ new_freq = limit->allowed_min;
+ pr_debug("min: limiting freq to %d\n", new_freq);
+ }
+ }
freqs.old = policy->cur;
if (override_cpu) {
@@ -66,15 +94,61 @@ static int set_cpu_freq(struct cpufreq_p
} else
freqs.new = new_freq;
freqs.cpu = policy->cpu;
+
+ /*
+ * Put the caller into SCHED_FIFO priority to avoid cpu starvation
+ * in the acpuclk_set_rate path while increasing frequencies
+ */
+
+ if (freqs.new > freqs.old && current->policy != SCHED_FIFO) {
+ saved_sched_policy = current->policy;
+ saved_sched_rt_prio = current->rt_priority;
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+ }
+
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
ret = acpuclk_set_rate(policy->cpu, new_freq, SETRATE_CPUFREQ);
if (!ret)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ /* Restore priority after clock ramp-up */
+ if (freqs.new > freqs.old && saved_sched_policy >= 0) {
+ param.sched_priority = saved_sched_rt_prio;
+ sched_setscheduler_nocheck(current, saved_sched_policy, &param);
+ }
return ret;
}
#ifdef CONFIG_SMP
+static int __cpuinit msm_cpufreq_cpu_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ mutex_lock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
+ per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
+ mutex_unlock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
+ break;
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __refdata msm_cpufreq_cpu_notifier = {
+ .notifier_call = msm_cpufreq_cpu_callback,
+};
+
static void set_cpu_work(struct work_struct *work)
{
struct cpufreq_work_struct *cpu_work =
@@ -166,6 +240,72 @@ static int msm_cpufreq_verify(struct cpu
return 0;
}
+static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
+{
+ return acpuclk_get_rate(cpu);
+}
+
+static inline int msm_cpufreq_limits_init(void)
+{
+ int cpu = 0;
+ int i = 0;
+ struct cpufreq_frequency_table *table = NULL;
+ uint32_t min = (uint32_t) -1;
+ uint32_t max = 0;
+ struct cpu_freq *limit = NULL;
+
+ for_each_possible_cpu(cpu) {
+ limit = &per_cpu(cpu_freq_info, cpu);
+ table = cpufreq_frequency_get_table(cpu);
+ if (table == NULL) {
+ pr_err("%s: error reading cpufreq table for cpu %d\n",
+ __func__, cpu);
+ continue;
+ }
+ for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
+ if (table[i].frequency > max)
+ max = table[i].frequency;
+ if (table[i].frequency < min)
+ min = table[i].frequency;
+ }
+ limit->allowed_min = min;
+ limit->allowed_max = max;
+ limit->min = min;
+ limit->max = max;
+ limit->limits_init = 1;
+ }
+
+ return 0;
+}
+
+int msm_cpufreq_set_freq_limits(uint32_t cpu, uint32_t min, uint32_t max)
+{
+ struct cpu_freq *limit = &per_cpu(cpu_freq_info, cpu);
+
+ if (!limit->limits_init)
+ msm_cpufreq_limits_init();
+
+ if ((min != MSM_CPUFREQ_NO_LIMIT) &&
+ min >= limit->min && min <= limit->max)
+ limit->allowed_min = min;
+ else
+ limit->allowed_min = limit->min;
+
+
+ if ((max != MSM_CPUFREQ_NO_LIMIT) &&
+ max <= limit->max && max >= limit->min)
+ limit->allowed_max = max;
+ else
+ limit->allowed_max = limit->max;
+
+ pr_debug("%s: Limiting cpu %d min = %d, max = %d\n",
+ __func__, cpu,
+ limit->allowed_min, limit->allowed_max);
+
+ return 0;
+}
+EXPORT_SYMBOL(msm_cpufreq_set_freq_limits);
+
static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy)
{
int cur_freq;
@@ -301,6 +441,7 @@ static struct cpufreq_driver msm_cpufreq
.init = msm_cpufreq_init,
.verify = msm_cpufreq_verify,
.target = msm_cpufreq_target,
+ .get = msm_cpufreq_get_freq,
.name = "msm",
.attr = msm_freq_attr,
};
@@ -327,6 +468,8 @@ static int __init msm_cpufreq_register(v
msm_cpufreq_wq = create_workqueue("msm-cpufreq");
if (!msm_cpufreq_wq)
return -1;
+
+ register_hotcpu_notifier(&msm_cpufreq_cpu_notifier);
#endif
register_pm_notifier(&msm_cpufreq_pm_notifier);
@@ -334,4 +477,3 @@ static int __init msm_cpufreq_register(v
}
late_initcall(msm_cpufreq_register);
-