PATCH: apply some patches from my CM kernel (that came from CAF, not CM)
/drivers/cpufreq/cpufreq.c
blob:1283d333349c2443bdfc2cd3949e074fab9f9498 -> blob:6837af6dc8e3840a333602575931bd6f98251e09
--- drivers/cpufreq/cpufreq.c
+++ drivers/cpufreq/cpufreq.c
@@ -33,6 +33,30 @@
#include <trace/events/power.h>
#include <linux/semaphore.h>
+#if !defined(__MP_DECISION_PATCH__)
+#error "__MP_DECISION_PATCH__ must be defined in cpufreq.c"
+#endif
+/* Description of __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ *
+ * When the kobject of cpufreq's ref count is zero in show/store function,
+ * cpufreq_cpu_put() causes a deadlock because the active count of the
+ * accessing file is incremented just before calling show/store at
+ * fill_read(write)_buffer.
+ * (This happens when show/store is called first and then the cpu_down is called
+ * before the show/store function is finished)
+ * So basically, cpufreq_cpu_put() in show/store must not release the kobject
+ * of cpufreq. To make sure that kobj ref count of the cpufreq is not 0 in this
+ * case, a per cpu mutex is used.
+ * This per cpu mutex wraps the whole show/store function and kobject_put()
+ * function in __cpufreq_remove_dev().
+ */
+ #define __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+
+
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+static DEFINE_PER_CPU(struct mutex, cpufreq_remove_mutex);
+#endif
+
/**
* The "cpufreq driver" - the arch- or hardware-dependent low
* level driver of CPUFreq support, and its spinlock. This lock
@@ -201,6 +225,27 @@ static void cpufreq_cpu_put_sysfs(struct
__cpufreq_cpu_put(data, 1);
}
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+/* just peek to see if the cpufreq policy is available.
+ * The caller must hold cpufreq_driver_lock
+ */
+struct cpufreq_policy *cpufreq_cpu_peek(unsigned int cpu)
+{
+ struct cpufreq_policy *data;
+
+ if (cpu >= nr_cpu_ids)
+ return NULL;
+
+ if (!cpufreq_driver)
+ return NULL;
+
+ /* get the CPU */
+ data = per_cpu(cpufreq_cpu_data, cpu);
+
+ return data;
+}
+#endif
+
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -293,31 +338,30 @@ void cpufreq_notify_transition(struct cp
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
- if (likely(policy) && likely(policy->cpu == freqs->cpu)) {
+ if (likely(policy) && likely(policy->cpu == freqs->cpu))
policy->cur = freqs->new;
- sysfs_notify(&policy->kobj, NULL, "scaling_cur_freq");
- }
break;
}
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
-/**
- * cpufreq_notify_utilization - notify CPU userspace about CPU utilization
+
+#if defined(__MP_DECISION_PATCH__)
+/*
+ * cpufreq_notify_utilization - notify CPU userspace abt CPU utilization
* change
*
- * This function is called everytime the CPU load is evaluated by the
- * ondemand governor. It notifies userspace of cpu load changes via sysfs.
+ * This function calls the sysfs notifiers function.
+ * It is called every ondemand load evaluation to compute CPU loading.
*/
void cpufreq_notify_utilization(struct cpufreq_policy *policy,
- unsigned int util)
+ unsigned int utils)
{
if (policy)
- policy->util = util;
-
- if (policy->util >= MIN_CPU_UTIL_NOTIFY)
- sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
+ policy->utils = utils;
+ sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
}
+#endif
/*********************************************************************
* SYSFS INTERFACE *
@@ -405,7 +449,9 @@ show_one(cpuinfo_transition_latency, cpu
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
-show_one(cpu_utilization, util);
+#if defined(__MP_DECISION_PATCH__)
+show_one(cpu_utilization, utils);
+#endif
static int __cpufreq_set_policy(struct cpufreq_policy *data,
struct cpufreq_policy *policy);
@@ -517,9 +563,6 @@ static ssize_t store_scaling_governor(st
unsigned int ret = -EINVAL;
char str_governor[16];
struct cpufreq_policy new_policy;
- char *envp[3];
- char buf1[64];
- char buf2[64];
ret = cpufreq_get_policy(&new_policy, policy->cpu);
if (ret)
@@ -540,15 +583,6 @@ static ssize_t store_scaling_governor(st
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
- sysfs_notify(&policy->kobj, NULL, "scaling_governor");
-
- snprintf(buf1, sizeof(buf1), "GOV=%s", policy->governor->name);
- snprintf(buf2, sizeof(buf2), "CPU=%u", policy->cpu);
- envp[0] = buf1;
- envp[1] = buf2;
- envp[2] = NULL;
- kobject_uevent_env(cpufreq_global_kobject, KOBJ_ADD, envp);
-
if (ret)
return ret;
else
@@ -727,7 +761,9 @@ cpufreq_freq_attr_ro(scaling_cur_freq);
cpufreq_freq_attr_ro(bios_limit);
cpufreq_freq_attr_ro(related_cpus);
cpufreq_freq_attr_ro(affected_cpus);
+#if defined(__MP_DECISION_PATCH__)
cpufreq_freq_attr_ro(cpu_utilization);
+#endif
cpufreq_freq_attr_rw(scaling_min_freq);
cpufreq_freq_attr_rw(scaling_max_freq);
cpufreq_freq_attr_rw(scaling_governor);
@@ -743,8 +779,10 @@ static struct attribute *default_attrs[]
&cpuinfo_transition_latency.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
- &affected_cpus.attr,
+#if defined(__MP_DECISION_PATCH__)
&cpu_utilization.attr,
+#endif
+ &affected_cpus.attr,
&related_cpus.attr,
&scaling_governor.attr,
&scaling_driver.attr,
@@ -776,6 +814,27 @@ static ssize_t show(struct kobject *kobj
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ unsigned int cpu;
+ unsigned long flags;
+#endif
+
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ policy = cpufreq_cpu_peek(policy->cpu);
+ if (!policy) {
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return -EINVAL;
+ }
+ cpu = policy->cpu;
+ if (mutex_trylock(&per_cpu(cpufreq_remove_mutex, cpu)) == 0) {
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ pr_info("!WARN %s failed because cpu%u is going down\n",
+ __func__, cpu);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+#endif
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -792,6 +851,9 @@ static ssize_t show(struct kobject *kobj
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
+#endif
return ret;
}
@@ -801,6 +863,27 @@ static ssize_t store(struct kobject *kob
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ unsigned int cpu;
+ unsigned long flags;
+#endif
+
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ spin_lock_irqsave(&cpufreq_driver_lock, flags);
+ policy = cpufreq_cpu_peek(policy->cpu);
+ if (!policy) {
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ return -EINVAL;
+ }
+ cpu = policy->cpu;
+ if (mutex_trylock(&per_cpu(cpufreq_remove_mutex, cpu)) == 0) {
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ pr_info("!WARN %s failed because cpu%u is going down\n",
+ __func__, cpu);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
+#endif
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -817,6 +900,9 @@ static ssize_t store(struct kobject *kob
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
+#endif
return ret;
}
@@ -1277,8 +1363,13 @@ static int __cpufreq_remove_dev(struct s
kobj = &data->kobj;
cmp = &data->kobj_unregister;
unlock_policy_rwsem_write(cpu);
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ mutex_lock(&per_cpu(cpufreq_remove_mutex, cpu));
+#endif
kobject_put(kobj);
-
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
+#endif
/* we need to make sure that the underlying kobj is actually
* not referenced anymore by anybody before we proceed with
* unloading.
@@ -1980,7 +2071,6 @@ no_policy:
return ret;
}
-
int cpufreq_set_limit(unsigned int flag, unsigned int value)
{
unsigned int max_value = 0;
@@ -2039,13 +2129,7 @@ int cpufreq_set_limit(unsigned int flag,
max_value = user_max_freq_limit;
}
- /* thermald */
- if (freq_limit_start_flag & USER_MAX_BIT) {
- if (max_value > user_max_freq_limit)
- max_value = user_max_freq_limit;
- }
-
- /* set min freq */
+ /* set min freq
if (freq_limit_start_flag & TOUCH_BOOSTER_FIRST_BIT)
min_value = TOUCH_BOOSTER_FIRST_FREQ_LIMIT;
else if (freq_limit_start_flag & TOUCH_BOOSTER_SECOND_BIT)
@@ -2053,13 +2137,13 @@ int cpufreq_set_limit(unsigned int flag,
else if (freq_limit_start_flag & TOUCH_BOOSTER_BIT)
min_value = TOUCH_BOOSTER_FREQ_LIMIT;
else
- min_value = MIN_FREQ_LIMIT;
+ min_value = MIN_FREQ_LIMIT;*/
- /* cpufreq_min_limit */
+ /* cpufreq_min_limit
if (freq_limit_start_flag & APPS_MIN_BIT) {
if (min_value < app_min_freq_limit)
min_value = app_min_freq_limit;
- }
+ }*/
/* user */
if (freq_limit_start_flag & USER_MIN_BIT) {
@@ -2111,7 +2195,6 @@ int cpufreq_set_limit_defered(unsigned i
}
#endif
-
/**
* cpufreq_update_policy - re-evaluate an existing cpufreq policy
* @cpu: CPU which shall be re-evaluated
@@ -2197,7 +2280,7 @@ static int __cpuinit cpufreq_cpu_callbac
}
static struct notifier_block __refdata cpufreq_cpu_notifier = {
- .notifier_call = cpufreq_cpu_callback,
+ .notifier_call = cpufreq_cpu_callback,
};
/*********************************************************************
@@ -2260,14 +2343,14 @@ int cpufreq_register_driver(struct cpufr
}
}
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ pr_debug("driver %s up and running\n", driver_data->name);
+
#ifdef CONFIG_SEC_DVFS
cpufreq_queue_priv.wq = create_workqueue("cpufreq_queue");
INIT_WORK(&cpufreq_queue_priv.work, cpufreq_set_limit_work);
#endif
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- pr_debug("driver %s up and running\n", driver_data->name);
-
return 0;
err_sysdev_unreg:
sysdev_driver_unregister(&cpu_sysdev_class,
@@ -2326,6 +2409,9 @@ static int __init cpufreq_core_init(void
for_each_possible_cpu(cpu) {
per_cpu(cpufreq_policy_cpu, cpu) = -1;
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
+#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
+ mutex_init(&per_cpu(cpufreq_remove_mutex, cpu));
+#endif
}
cpufreq_global_kobject = kobject_create_and_add("cpufreq",