PATCH: apply some patches from my CM kernel (that came from CAF, not CM)
/drivers/cpufreq/cpufreq.c
blob:1bccb3f0c9010a659efcc039fac178d32f8b95e3 -> blob:1283d333349c2443bdfc2cd3949e074fab9f9498
--- drivers/cpufreq/cpufreq.c
+++ drivers/cpufreq/cpufreq.c
@@ -33,32 +33,6 @@
#include <trace/events/power.h>
#include <linux/semaphore.h>
-unsigned int thermal_max = 1512000;
-
-#if !defined(__MP_DECISION_PATCH__)
-#error "__MP_DECISION_PATCH__ must be defined in cpufreq.c"
-#endif
-/* Description of __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- *
- * When the kobject of cpufreq's ref count is zero in show/store function,
- * cpufreq_cpu_put() causes a deadlock because the active count of the
- * accessing file is incremented just before calling show/store at
- * fill_read(write)_buffer.
- * (This happens when show/store is called first and then the cpu_down is called
- * before the show/store function is finished)
- * So basically, cpufreq_cpu_put() in show/store must not release the kobject
- * of cpufreq. To make sure that kobj ref count of the cpufreq is not 0 in this
- * case, a per cpu mutex is used.
- * This per cpu mutex wraps the whole show/store function and kobject_put()
- * function in __cpufreq_remove_dev().
- */
- #define __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
-
-
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
-static DEFINE_PER_CPU(struct mutex, cpufreq_remove_mutex);
-#endif
-
/**
* The "cpufreq driver" - the arch- or hardware-dependent low
* level driver of CPUFreq support, and its spinlock. This lock
@@ -227,27 +201,6 @@ static void cpufreq_cpu_put_sysfs(struct
__cpufreq_cpu_put(data, 1);
}
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
-/* just peek to see if the cpufreq policy is available.
- * The caller must hold cpufreq_driver_lock
- */
-struct cpufreq_policy *cpufreq_cpu_peek(unsigned int cpu)
-{
- struct cpufreq_policy *data;
-
- if (cpu >= nr_cpu_ids)
- return NULL;
-
- if (!cpufreq_driver)
- return NULL;
-
- /* get the CPU */
- data = per_cpu(cpufreq_cpu_data, cpu);
-
- return data;
-}
-#endif
-
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -340,30 +293,31 @@ void cpufreq_notify_transition(struct cp
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
- if (likely(policy) && likely(policy->cpu == freqs->cpu))
+ if (likely(policy) && likely(policy->cpu == freqs->cpu)) {
policy->cur = freqs->new;
+ sysfs_notify(&policy->kobj, NULL, "scaling_cur_freq");
+ }
break;
}
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
-
-#if defined(__MP_DECISION_PATCH__)
-/*
- * cpufreq_notify_utilization - notify CPU userspace abt CPU utilization
+/**
+ * cpufreq_notify_utilization - notify CPU userspace about CPU utilization
* change
*
- * This function calls the sysfs notifiers function.
- * It is called every ondemand load evaluation to compute CPU loading.
+ * This function is called everytime the CPU load is evaluated by the
+ * ondemand governor. It notifies userspace of cpu load changes via sysfs.
*/
void cpufreq_notify_utilization(struct cpufreq_policy *policy,
- unsigned int utils)
+ unsigned int util)
{
if (policy)
- policy->utils = utils;
+ policy->util = util;
+
+ if (policy->util >= MIN_CPU_UTIL_NOTIFY)
+ sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
- sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
}
-#endif
/*********************************************************************
* SYSFS INTERFACE *
@@ -445,23 +399,13 @@ static ssize_t show_##file_name \
return sprintf(buf, "%u\n", policy->object); \
}
-#define findmax( a, b ) ( ((a) > (b)) ? (a) : (b) )
-
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
-
-static ssize_t show_thermal_max_freq(struct cpufreq_policy *policy, char *buf)
-{
- return sprintf(buf, "%u\n", thermal_max);
-}
-
-#if defined(__MP_DECISION_PATCH__)
-show_one(cpu_utilization, utils);
-#endif
+show_one(cpu_utilization, util);
static int __cpufreq_set_policy(struct cpufreq_policy *data,
struct cpufreq_policy *policy);
@@ -528,27 +472,12 @@ static ssize_t store_scaling_max_freq
cpufreq_set_limit_defered(USER_MAX_START, value);
}
- thermal_max = findmax(policy->max, thermal_max);
-
return count;
}
#else
store_one(scaling_min_freq, min);
store_one(scaling_max_freq, max);
#endif
-static ssize_t store_thermal_max_freq
- (struct cpufreq_policy *policy, const char *buf, size_t count)
-{
- unsigned int ret = -EINVAL;
- unsigned int value = 0;
-
- ret = sscanf(buf, "%u", &value);
- if (ret != 1)
- return -EINVAL;
-
- thermal_max = value;
- return count;
-}
/**
* show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
@@ -588,6 +517,9 @@ static ssize_t store_scaling_governor(st
unsigned int ret = -EINVAL;
char str_governor[16];
struct cpufreq_policy new_policy;
+ char *envp[3];
+ char buf1[64];
+ char buf2[64];
ret = cpufreq_get_policy(&new_policy, policy->cpu);
if (ret)
@@ -608,6 +540,15 @@ static ssize_t store_scaling_governor(st
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
+ sysfs_notify(&policy->kobj, NULL, "scaling_governor");
+
+ snprintf(buf1, sizeof(buf1), "GOV=%s", policy->governor->name);
+ snprintf(buf2, sizeof(buf2), "CPU=%u", policy->cpu);
+ envp[0] = buf1;
+ envp[1] = buf2;
+ envp[2] = NULL;
+ kobject_uevent_env(cpufreq_global_kobject, KOBJ_ADD, envp);
+
if (ret)
return ret;
else
@@ -786,30 +727,24 @@ cpufreq_freq_attr_ro(scaling_cur_freq);
cpufreq_freq_attr_ro(bios_limit);
cpufreq_freq_attr_ro(related_cpus);
cpufreq_freq_attr_ro(affected_cpus);
-#if defined(__MP_DECISION_PATCH__)
cpufreq_freq_attr_ro(cpu_utilization);
-#endif
cpufreq_freq_attr_rw(scaling_min_freq);
cpufreq_freq_attr_rw(scaling_max_freq);
cpufreq_freq_attr_rw(scaling_governor);
cpufreq_freq_attr_rw(scaling_setspeed);
#ifdef CONFIG_VDD_USERSPACE
-cpufreq_freq_attr_rw(thermal_max_freq);
define_one_global_rw(vdd_levels);
#endif
static struct attribute *default_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
- &thermal_max_freq.attr,
&cpuinfo_transition_latency.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
-#if defined(__MP_DECISION_PATCH__)
- &cpu_utilization.attr,
-#endif
&affected_cpus.attr,
+ &cpu_utilization.attr,
&related_cpus.attr,
&scaling_governor.attr,
&scaling_driver.attr,
@@ -841,27 +776,6 @@ static ssize_t show(struct kobject *kobj
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- unsigned int cpu;
- unsigned long flags;
-#endif
-
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_peek(policy->cpu);
- if (!policy) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return -EINVAL;
- }
- cpu = policy->cpu;
- if (mutex_trylock(&per_cpu(cpufreq_remove_mutex, cpu)) == 0) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- pr_info("!WARN %s failed because cpu%u is going down\n",
- __func__, cpu);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -878,9 +792,6 @@ static ssize_t show(struct kobject *kobj
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
return ret;
}
@@ -890,27 +801,6 @@ static ssize_t store(struct kobject *kob
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- unsigned int cpu;
- unsigned long flags;
-#endif
-
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_peek(policy->cpu);
- if (!policy) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return -EINVAL;
- }
- cpu = policy->cpu;
- if (mutex_trylock(&per_cpu(cpufreq_remove_mutex, cpu)) == 0) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- pr_info("!WARN %s failed because cpu%u is going down\n",
- __func__, cpu);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -927,9 +817,6 @@ static ssize_t store(struct kobject *kob
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
return ret;
}
@@ -1390,13 +1277,8 @@ static int __cpufreq_remove_dev(struct s
kobj = &data->kobj;
cmp = &data->kobj_unregister;
unlock_policy_rwsem_write(cpu);
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_lock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
kobject_put(kobj);
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
+
/* we need to make sure that the underlying kobj is actually
* not referenced anymore by anybody before we proceed with
* unloading.
@@ -2098,6 +1980,7 @@ no_policy:
return ret;
}
+
int cpufreq_set_limit(unsigned int flag, unsigned int value)
{
unsigned int max_value = 0;
@@ -2156,6 +2039,12 @@ int cpufreq_set_limit(unsigned int flag,
max_value = user_max_freq_limit;
}
+ /* thermald */
+ if (freq_limit_start_flag & USER_MAX_BIT) {
+ if (max_value > user_max_freq_limit)
+ max_value = user_max_freq_limit;
+ }
+
/* set min freq */
if (freq_limit_start_flag & TOUCH_BOOSTER_FIRST_BIT)
min_value = TOUCH_BOOSTER_FIRST_FREQ_LIMIT;
@@ -2222,6 +2111,7 @@ int cpufreq_set_limit_defered(unsigned i
}
#endif
+
/**
* cpufreq_update_policy - re-evaluate an existing cpufreq policy
* @cpu: CPU which shall be re-evaluated
@@ -2307,7 +2197,7 @@ static int __cpuinit cpufreq_cpu_callbac
}
static struct notifier_block __refdata cpufreq_cpu_notifier = {
- .notifier_call = cpufreq_cpu_callback,
+ .notifier_call = cpufreq_cpu_callback,
};
/*********************************************************************
@@ -2370,14 +2260,14 @@ int cpufreq_register_driver(struct cpufr
}
}
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- pr_debug("driver %s up and running\n", driver_data->name);
-
#ifdef CONFIG_SEC_DVFS
cpufreq_queue_priv.wq = create_workqueue("cpufreq_queue");
INIT_WORK(&cpufreq_queue_priv.work, cpufreq_set_limit_work);
#endif
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ pr_debug("driver %s up and running\n", driver_data->name);
+
return 0;
err_sysdev_unreg:
sysdev_driver_unregister(&cpu_sysdev_class,
@@ -2436,9 +2326,6 @@ static int __init cpufreq_core_init(void
for_each_possible_cpu(cpu) {
per_cpu(cpufreq_policy_cpu, cpu) = -1;
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_init(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
}
cpufreq_global_kobject = kobject_create_and_add("cpufreq",