PATCH: apply some patches from my CM kernel (that came from CAF, not CM)
/drivers/cpufreq/cpufreq.c
blob:1283d333349c2443bdfc2cd3949e074fab9f9498 -> blob:5281c533746f977590e0e35f37aa6d3b5f789204
--- drivers/cpufreq/cpufreq.c
+++ drivers/cpufreq/cpufreq.c
@@ -28,11 +28,36 @@
#include <linux/cpu.h>
#include <linux/completion.h>
#include <linux/mutex.h>
+#include <linux/sched.h>
#include <linux/syscore_ops.h>
#include <trace/events/power.h>
#include <linux/semaphore.h>
+#if !defined(__MP_DECISION_PATCH__)
+#error "__MP_DECISION_PATCH__ must be defined in cpufreq.c"
+#endif
+/* Description of __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ *
+ * When the kobject of cpufreq's ref count is zero in show/store function,
+ * cpufreq_cpu_put() causes a deadlock because the active count of the
+ * accessing file is incremented just before calling show/store at
+ * fill_read(write)_buffer.
+ * (This happens when show/store is called first and then the cpu_down is called
+ * before the show/store function is finished)
+ * So basically, cpufreq_cpu_put() in show/store must not release the kobject
+ * of cpufreq. To make sure that kobj ref count of the cpufreq is not 0 in this
+ * case, a per cpu mutex is used.
+ * This per cpu mutex wraps the whole show/store function and kobject_put()
+ * function in __cpufreq_remove_dev().
+ */
+ #define __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+
+
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+static DEFINE_PER_CPU(struct mutex, cpufreq_remove_mutex);
+#endif
+
/**
* The "cpufreq driver" - the arch- or hardware-dependent low
* level driver of CPUFreq support, and its spinlock. This lock
@@ -201,6 +226,27 @@ static void cpufreq_cpu_put_sysfs(struct
__cpufreq_cpu_put(data, 1);
}
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+/* just peek to see if the cpufreq policy is available.
+ * The caller must hold cpufreq_driver_lock
+ */
+struct cpufreq_policy *cpufreq_cpu_peek(unsigned int cpu)
+{
+ struct cpufreq_policy *data;
+
+ if (cpu >= nr_cpu_ids)
+ return NULL;
+
+ if (!cpufreq_driver)
+ return NULL;
+
+ /* get the CPU */
+ data = per_cpu(cpufreq_cpu_data, cpu);
+
+ return data;
+}
+#endif
+
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -293,31 +339,30 @@ void cpufreq_notify_transition(struct cp
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
- if (likely(policy) && likely(policy->cpu == freqs->cpu)) {
+ if (likely(policy) && likely(policy->cpu == freqs->cpu))
policy->cur = freqs->new;
- sysfs_notify(&policy->kobj, NULL, "scaling_cur_freq");
- }
break;
}
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
-/**
- * cpufreq_notify_utilization - notify CPU userspace about CPU utilization
+
+#if defined(__MP_DECISION_PATCH__)
+/*
+ * cpufreq_notify_utilization - notify CPU userspace abt CPU utilization
* change
*
- * This function is called everytime the CPU load is evaluated by the
- * ondemand governor. It notifies userspace of cpu load changes via sysfs.
+ * This function calls the sysfs notifiers function.
+ * It is called every ondemand load evaluation to compute CPU loading.
*/
void cpufreq_notify_utilization(struct cpufreq_policy *policy,
- unsigned int util)
+ unsigned int utils)
{
if (policy)
- policy->util = util;
-
- if (policy->util >= MIN_CPU_UTIL_NOTIFY)
- sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
+ policy->utils = utils;
+ sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
}
+#endif
/*********************************************************************
* SYSFS INTERFACE *
@@ -405,7 +450,9 @@ show_one(cpuinfo_transition_latency, cpu
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
-show_one(cpu_utilization, util);
+#if defined(__MP_DECISION_PATCH__)
+show_one(cpu_utilization, utils);
+#endif
static int __cpufreq_set_policy(struct cpufreq_policy *data,
struct cpufreq_policy *policy);
@@ -517,9 +564,6 @@ static ssize_t store_scaling_governor(st
unsigned int ret = -EINVAL;
char str_governor[16];
struct cpufreq_policy new_policy;
- char *envp[3];
- char buf1[64];
- char buf2[64];
ret = cpufreq_get_policy(&new_policy, policy->cpu);
if (ret)
@@ -540,15 +584,6 @@ static ssize_t store_scaling_governor(st
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
- sysfs_notify(&policy->kobj, NULL, "scaling_governor");
-
- snprintf(buf1, sizeof(buf1), "GOV=%s", policy->governor->name);
- snprintf(buf2, sizeof(buf2), "CPU=%u", policy->cpu);
- envp[0] = buf1;
- envp[1] = buf2;
- envp[2] = NULL;
- kobject_uevent_env(cpufreq_global_kobject, KOBJ_ADD, envp);
-
if (ret)
return ret;
else
@@ -664,59 +699,6 @@ static ssize_t show_bios_limit(struct cp
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
}
-#ifdef CONFIG_VDD_USERSPACE
-extern ssize_t acpuclk_get_vdd_levels_str(char *buf);
-static ssize_t show_vdd_levels(struct kobject *a, struct attribute *b, char *buf)
-{
- return acpuclk_get_vdd_levels_str(buf);
-}
-
-extern void acpuclk_set_vdd(unsigned acpu_khz, int vdd);
-static ssize_t store_vdd_levels(struct kobject *a, struct attribute *b, const char *buf, size_t count)
-{
- int i = 0, j;
- int pair[2] = { 0, 0 };
- int sign = 0;
- if (count < 1)
- return 0;
- if (buf[0] == '-') {
- sign = -1;
- i++;
- }
- else if (buf[0] == '+') {
- sign = 1;
- i++;
- }
- for (j = 0; i < count; i++) {
- char c = buf[i];
- if ((c >= '0') && (c <= '9')) {
- pair[j] *= 10;
- pair[j] += (c - '0');
- }
- else if ((c == ' ') || (c == '\t')) {
- if (pair[j] != 0) {
- j++;
- if ((sign != 0) || (j > 1))
- break;
- }
- }
- else
- break;
- }
- if (sign != 0) {
- if (pair[0] > 0)
- acpuclk_set_vdd(0, sign * pair[0]);
- }
- else {
- if ((pair[0] > 0) && (pair[1] > 0))
- acpuclk_set_vdd((unsigned)pair[0], pair[1]);
- else
- return -EINVAL;
- }
- return count;
-}
-#endif /* CONFIG_VDD_USERSPACE */
-
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
@@ -727,24 +709,24 @@ cpufreq_freq_attr_ro(scaling_cur_freq);
cpufreq_freq_attr_ro(bios_limit);
cpufreq_freq_attr_ro(related_cpus);
cpufreq_freq_attr_ro(affected_cpus);
+#if defined(__MP_DECISION_PATCH__)
cpufreq_freq_attr_ro(cpu_utilization);
+#endif
cpufreq_freq_attr_rw(scaling_min_freq);
cpufreq_freq_attr_rw(scaling_max_freq);
cpufreq_freq_attr_rw(scaling_governor);
cpufreq_freq_attr_rw(scaling_setspeed);
-#ifdef CONFIG_VDD_USERSPACE
-define_one_global_rw(vdd_levels);
-#endif
-
static struct attribute *default_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
&cpuinfo_transition_latency.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
- &affected_cpus.attr,
+#if defined(__MP_DECISION_PATCH__)
&cpu_utilization.attr,
+#endif
+ &affected_cpus.attr,
&related_cpus.attr,
&scaling_governor.attr,
&scaling_driver.attr,
@@ -753,18 +735,6 @@ static struct attribute *default_attrs[]
NULL
};
-#ifdef CONFIG_VDD_USERSPACE
-static struct attribute *vddtbl_attrs[] = {
- &vdd_levels.attr,
- NULL
-};
-
-static struct attribute_group vddtbl_attr_group = {
- .attrs = vddtbl_attrs,
- .name = "vdd_table",
-};
-#endif /* CONFIG_VDD_USERSPACE */
-
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
@@ -776,6 +746,27 @@ static ssize_t show(struct kobject *kobj
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ unsigned int cpu;
+ unsigned long flags;
+#endif
+
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ policy = cpufreq_cpu_peek(policy->cpu);
+ if (!policy) {
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return -EINVAL;
+ }
+ cpu = policy->cpu;
+ if (mutex_trylock(&per_cpu(cpufreq_remove_mutex, cpu)) == 0) {
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ pr_info("!WARN %s failed because cpu%u is going down\n",
+ __func__, cpu);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+#endif
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -792,6 +783,9 @@ static ssize_t show(struct kobject *kobj
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
+#endif
return ret;
}
@@ -801,6 +795,27 @@ static ssize_t store(struct kobject *kob
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ unsigned int cpu;
+ unsigned long flags;
+#endif
+
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ policy = cpufreq_cpu_peek(policy->cpu);
+ if (!policy) {
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return -EINVAL;
+ }
+ cpu = policy->cpu;
+ if (mutex_trylock(&per_cpu(cpufreq_remove_mutex, cpu)) == 0) {
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ pr_info("!WARN %s failed because cpu%u is going down\n",
+ __func__, cpu);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+#endif
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -817,6 +832,9 @@ static ssize_t store(struct kobject *kob
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
+#endif
return ret;
}
@@ -1118,9 +1136,6 @@ static int cpufreq_add_dev(struct sys_de
pr_debug("initialization failed\n");
goto err_unlock_policy;
}
-
- if (policy->max > 1512000) policy->max = 1512000;
-
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
@@ -1277,8 +1292,13 @@ static int __cpufreq_remove_dev(struct s
kobj = &data->kobj;
cmp = &data->kobj_unregister;
unlock_policy_rwsem_write(cpu);
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ mutex_lock(&per_cpu(cpufreq_remove_mutex, cpu));
+#endif
kobject_put(kobj);
-
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
+#endif
/* we need to make sure that the underlying kobj is actually
* not referenced anymore by anybody before we proceed with
* unloading.
@@ -1612,6 +1632,12 @@ int __cpufreq_driver_target(struct cpufr
target_freq, relation);
if (cpu_online(policy->cpu) && cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
+ if (likely(retval != -EINVAL)) {
+ if (target_freq == policy->max)
+ cpu_nonscaling(policy->cpu);
+ else
+ cpu_scaling(policy->cpu);
+ }
return retval;
}
@@ -1855,7 +1881,6 @@ static int __cpufreq_set_policy(struct c
/* start new governor */
data->governor = policy->governor;
- if (!cpu_online(1)) cpu_up(1);
if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
/* new governor failed, so re-start old one */
pr_debug("starting governor %s failed\n",
@@ -1980,7 +2005,6 @@ no_policy:
return ret;
}
-
int cpufreq_set_limit(unsigned int flag, unsigned int value)
{
unsigned int max_value = 0;
@@ -2039,12 +2063,6 @@ int cpufreq_set_limit(unsigned int flag,
max_value = user_max_freq_limit;
}
- /* thermald */
- if (freq_limit_start_flag & USER_MAX_BIT) {
- if (max_value > user_max_freq_limit)
- max_value = user_max_freq_limit;
- }
-
/* set min freq */
if (freq_limit_start_flag & TOUCH_BOOSTER_FIRST_BIT)
min_value = TOUCH_BOOSTER_FIRST_FREQ_LIMIT;
@@ -2111,7 +2129,6 @@ int cpufreq_set_limit_defered(unsigned i
}
#endif
-
/**
* cpufreq_update_policy - re-evaluate an existing cpufreq policy
* @cpu: CPU which shall be re-evaluated
@@ -2197,7 +2214,7 @@ static int __cpuinit cpufreq_cpu_callbac
}
static struct notifier_block __refdata cpufreq_cpu_notifier = {
- .notifier_call = cpufreq_cpu_callback,
+ .notifier_call = cpufreq_cpu_callback,
};
/*********************************************************************
@@ -2260,14 +2277,14 @@ int cpufreq_register_driver(struct cpufr
}
}
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ pr_debug("driver %s up and running\n", driver_data->name);
+
#ifdef CONFIG_SEC_DVFS
cpufreq_queue_priv.wq = create_workqueue("cpufreq_queue");
INIT_WORK(&cpufreq_queue_priv.work, cpufreq_set_limit_work);
#endif
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- pr_debug("driver %s up and running\n", driver_data->name);
-
return 0;
err_sysdev_unreg:
sysdev_driver_unregister(&cpu_sysdev_class,
@@ -2319,13 +2336,13 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_dri
static int __init cpufreq_core_init(void)
{
int cpu;
-#ifdef CONFIG_VDD_USERSPACE
- int rc;
-#endif /* CONFIG_VDD_USERSPACE */
for_each_possible_cpu(cpu) {
per_cpu(cpufreq_policy_cpu, cpu) = -1;
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ mutex_init(&per_cpu(cpufreq_remove_mutex, cpu));
+#endif
}
cpufreq_global_kobject = kobject_create_and_add("cpufreq",
@@ -2336,10 +2353,6 @@ static int __init cpufreq_core_init(void
#endif
register_syscore_ops(&cpufreq_syscore_ops);
-#ifdef CONFIG_VDD_USERSPACE
- rc = sysfs_create_group(cpufreq_global_kobject, &vddtbl_attr_group);
-#endif /* CONFIG_VDD_USERSPACE */
-
return 0;
}
core_initcall(cpufreq_core_init);