Initial SPH-L710 JB Source
/drivers/cpufreq/cpufreq.c
blob:a522683c579457f51eb8452b3eb804d32181f44c -> blob:bbde88eedb91db2c78b5d23a1ea63466dea28611
--- drivers/cpufreq/cpufreq.c
+++ drivers/cpufreq/cpufreq.c
@@ -33,30 +33,6 @@
#include <trace/events/power.h>
#include <linux/semaphore.h>
-#if !defined(__MP_DECISION_PATCH__)
-#error "__MP_DECISION_PATCH__ must be defined in cpufreq.c"
-#endif
-/* Description of __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- *
- * When the kobject of cpufreq's ref count is zero in show/store function,
- * cpufreq_cpu_put() causes a deadlock because the active count of the
- * accessing file is incremented just before calling show/store at
- * fill_read(write)_buffer.
- * (This happens when show/store is called first and then the cpu_down is called
- * before the show/store function is finished)
- * So basically, cpufreq_cpu_put() in show/store must not release the kobject
- * of cpufreq. To make sure that kobj ref count of the cpufreq is not 0 in this
- * case, a per cpu mutex is used.
- * This per cpu mutex wraps the whole show/store function and kobject_put()
- * function in __cpufreq_remove_dev().
- */
- #define __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
-
-
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
-static DEFINE_PER_CPU(struct mutex, cpufreq_remove_mutex);
-#endif
-
/**
* The "cpufreq driver" - the arch- or hardware-dependent low
* level driver of CPUFreq support, and its spinlock. This lock
@@ -225,27 +201,6 @@ static void cpufreq_cpu_put_sysfs(struct
__cpufreq_cpu_put(data, 1);
}
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
-/* just peek to see if the cpufreq policy is available.
- * The caller must hold cpufreq_driver_lock
- */
-struct cpufreq_policy *cpufreq_cpu_peek(unsigned int cpu)
-{
- struct cpufreq_policy *data;
-
- if (cpu >= nr_cpu_ids)
- return NULL;
-
- if (!cpufreq_driver)
- return NULL;
-
- /* get the CPU */
- data = per_cpu(cpufreq_cpu_data, cpu);
-
- return data;
-}
-#endif
-
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -338,30 +293,31 @@ void cpufreq_notify_transition(struct cp
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
- if (likely(policy) && likely(policy->cpu == freqs->cpu))
+ if (likely(policy) && likely(policy->cpu == freqs->cpu)) {
policy->cur = freqs->new;
+ sysfs_notify(&policy->kobj, NULL, "scaling_cur_freq");
+ }
break;
}
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
-
-#if defined(__MP_DECISION_PATCH__)
-/*
- * cpufreq_notify_utilization - notify CPU userspace abt CPU utilization
+/**
+ * cpufreq_notify_utilization - notify CPU userspace about CPU utilization
* change
*
- * This function calls the sysfs notifiers function.
- * It is called every ondemand load evaluation to compute CPU loading.
+ * This function is called everytime the CPU load is evaluated by the
+ * ondemand governor. It notifies userspace of cpu load changes via sysfs.
*/
void cpufreq_notify_utilization(struct cpufreq_policy *policy,
- unsigned int utils)
+ unsigned int util)
{
if (policy)
- policy->utils = utils;
+ policy->util = util;
+
+ if (policy->util >= MIN_CPU_UTIL_NOTIFY)
+ sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
- sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
}
-#endif
/*********************************************************************
* SYSFS INTERFACE *
@@ -449,9 +405,7 @@ show_one(cpuinfo_transition_latency, cpu
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
-#if defined(__MP_DECISION_PATCH__)
-show_one(cpu_utilization, utils);
-#endif
+show_one(cpu_utilization, util);
static int __cpufreq_set_policy(struct cpufreq_policy *data,
struct cpufreq_policy *policy);
@@ -563,6 +517,9 @@ static ssize_t store_scaling_governor(st
unsigned int ret = -EINVAL;
char str_governor[16];
struct cpufreq_policy new_policy;
+ char *envp[3];
+ char buf1[64];
+ char buf2[64];
ret = cpufreq_get_policy(&new_policy, policy->cpu);
if (ret)
@@ -583,6 +540,15 @@ static ssize_t store_scaling_governor(st
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
+ sysfs_notify(&policy->kobj, NULL, "scaling_governor");
+
+ snprintf(buf1, sizeof(buf1), "GOV=%s", policy->governor->name);
+ snprintf(buf2, sizeof(buf2), "CPU=%u", policy->cpu);
+ envp[0] = buf1;
+ envp[1] = buf2;
+ envp[2] = NULL;
+ kobject_uevent_env(cpufreq_global_kobject, KOBJ_ADD, envp);
+
if (ret)
return ret;
else
@@ -698,6 +664,59 @@ static ssize_t show_bios_limit(struct cp
return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
}
+#ifdef CONFIG_VDD_USERSPACE
+extern ssize_t acpuclk_get_vdd_levels_str(char *buf);
+static ssize_t show_vdd_levels(struct kobject *a, struct attribute *b, char *buf)
+{
+ return acpuclk_get_vdd_levels_str(buf);
+}
+
+extern void acpuclk_set_vdd(unsigned acpu_khz, int vdd);
+static ssize_t store_vdd_levels(struct kobject *a, struct attribute *b, const char *buf, size_t count)
+{
+ int i = 0, j;
+ int pair[2] = { 0, 0 };
+ int sign = 0;
+ if (count < 1)
+ return 0;
+ if (buf[0] == '-') {
+ sign = -1;
+ i++;
+ }
+ else if (buf[0] == '+') {
+ sign = 1;
+ i++;
+ }
+ for (j = 0; i < count; i++) {
+ char c = buf[i];
+ if ((c >= '0') && (c <= '9')) {
+ pair[j] *= 10;
+ pair[j] += (c - '0');
+ }
+ else if ((c == ' ') || (c == '\t')) {
+ if (pair[j] != 0) {
+ j++;
+ if ((sign != 0) || (j > 1))
+ break;
+ }
+ }
+ else
+ break;
+ }
+ if (sign != 0) {
+ if (pair[0] > 0)
+ acpuclk_set_vdd(0, sign * pair[0]);
+ }
+ else {
+ if ((pair[0] > 0) && (pair[1] > 0))
+ acpuclk_set_vdd((unsigned)pair[0], pair[1]);
+ else
+ return -EINVAL;
+ }
+ return count;
+}
+#endif /* CONFIG_VDD_USERSPACE */
+
cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
@@ -708,24 +727,24 @@ cpufreq_freq_attr_ro(scaling_cur_freq);
cpufreq_freq_attr_ro(bios_limit);
cpufreq_freq_attr_ro(related_cpus);
cpufreq_freq_attr_ro(affected_cpus);
-#if defined(__MP_DECISION_PATCH__)
cpufreq_freq_attr_ro(cpu_utilization);
-#endif
cpufreq_freq_attr_rw(scaling_min_freq);
cpufreq_freq_attr_rw(scaling_max_freq);
cpufreq_freq_attr_rw(scaling_governor);
cpufreq_freq_attr_rw(scaling_setspeed);
+#ifdef CONFIG_VDD_USERSPACE
+define_one_global_rw(vdd_levels);
+#endif
+
static struct attribute *default_attrs[] = {
&cpuinfo_min_freq.attr,
&cpuinfo_max_freq.attr,
&cpuinfo_transition_latency.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
-#if defined(__MP_DECISION_PATCH__)
- &cpu_utilization.attr,
-#endif
&affected_cpus.attr,
+ &cpu_utilization.attr,
&related_cpus.attr,
&scaling_governor.attr,
&scaling_driver.attr,
@@ -734,6 +753,18 @@ static struct attribute *default_attrs[]
NULL
};
+#ifdef CONFIG_VDD_USERSPACE
+static struct attribute *vddtbl_attrs[] = {
+ &vdd_levels.attr,
+ NULL
+};
+
+static struct attribute_group vddtbl_attr_group = {
+ .attrs = vddtbl_attrs,
+ .name = "vdd_table",
+};
+#endif /* CONFIG_VDD_USERSPACE */
+
struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);
@@ -745,27 +776,6 @@ static ssize_t show(struct kobject *kobj
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- unsigned int cpu;
- unsigned long flags;
-#endif
-
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_peek(policy->cpu);
- if (!policy) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return -EINVAL;
- }
- cpu = policy->cpu;
- if (mutex_trylock(&per_cpu(cpufreq_remove_mutex, cpu)) == 0) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- pr_info("!WARN %s failed because cpu%u is going down\n",
- __func__, cpu);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -782,9 +792,6 @@ static ssize_t show(struct kobject *kobj
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
return ret;
}
@@ -794,27 +801,6 @@ static ssize_t store(struct kobject *kob
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- unsigned int cpu;
- unsigned long flags;
-#endif
-
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_peek(policy->cpu);
- if (!policy) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return -EINVAL;
- }
- cpu = policy->cpu;
- if (mutex_trylock(&per_cpu(cpufreq_remove_mutex, cpu)) == 0) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- pr_info("!WARN %s failed because cpu%u is going down\n",
- __func__, cpu);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -831,9 +817,6 @@ static ssize_t store(struct kobject *kob
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
return ret;
}
@@ -1291,13 +1274,8 @@ static int __cpufreq_remove_dev(struct s
kobj = &data->kobj;
cmp = &data->kobj_unregister;
unlock_policy_rwsem_write(cpu);
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_lock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
kobject_put(kobj);
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
+
/* we need to make sure that the underlying kobj is actually
* not referenced anymore by anybody before we proceed with
* unloading.
@@ -1874,6 +1852,7 @@ static int __cpufreq_set_policy(struct c
/* start new governor */
data->governor = policy->governor;
+ if (!cpu_online(1)) cpu_up(1);
if (__cpufreq_governor(data, CPUFREQ_GOV_START)) {
/* new governor failed, so re-start old one */
pr_debug("starting governor %s failed\n",
@@ -1998,6 +1977,7 @@ no_policy:
return ret;
}
+
int cpufreq_set_limit(unsigned int flag, unsigned int value)
{
unsigned int max_value = 0;
@@ -2056,6 +2036,12 @@ int cpufreq_set_limit(unsigned int flag,
max_value = user_max_freq_limit;
}
+ /* thermald */
+ if (freq_limit_start_flag & USER_MAX_BIT) {
+ if (max_value > user_max_freq_limit)
+ max_value = user_max_freq_limit;
+ }
+
/* set min freq */
if (freq_limit_start_flag & TOUCH_BOOSTER_FIRST_BIT)
min_value = TOUCH_BOOSTER_FIRST_FREQ_LIMIT;
@@ -2120,8 +2106,10 @@ int cpufreq_set_limit_defered(unsigned i
return ret;
}
+EXPORT_SYMBOL(cpufreq_set_limit_defered);
#endif
+
/**
* cpufreq_update_policy - re-evaluate an existing cpufreq policy
* @cpu: CPU which shall be re-evaluated
@@ -2207,7 +2195,7 @@ static int __cpuinit cpufreq_cpu_callbac
}
static struct notifier_block __refdata cpufreq_cpu_notifier = {
- .notifier_call = cpufreq_cpu_callback,
+ .notifier_call = cpufreq_cpu_callback,
};
/*********************************************************************
@@ -2270,14 +2258,14 @@ int cpufreq_register_driver(struct cpufr
}
}
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- pr_debug("driver %s up and running\n", driver_data->name);
-
#ifdef CONFIG_SEC_DVFS
cpufreq_queue_priv.wq = create_workqueue("cpufreq_queue");
INIT_WORK(&cpufreq_queue_priv.work, cpufreq_set_limit_work);
#endif
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ pr_debug("driver %s up and running\n", driver_data->name);
+
return 0;
err_sysdev_unreg:
sysdev_driver_unregister(&cpu_sysdev_class,
@@ -2329,13 +2317,13 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_dri
static int __init cpufreq_core_init(void)
{
int cpu;
+#ifdef CONFIG_VDD_USERSPACE
+ int rc;
+#endif /* CONFIG_VDD_USERSPACE */
for_each_possible_cpu(cpu) {
per_cpu(cpufreq_policy_cpu, cpu) = -1;
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_init(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
}
cpufreq_global_kobject = kobject_create_and_add("cpufreq",
@@ -2346,6 +2334,10 @@ static int __init cpufreq_core_init(void
#endif
register_syscore_ops(&cpufreq_syscore_ops);
+#ifdef CONFIG_VDD_USERSPACE
+ rc = sysfs_create_group(cpufreq_global_kobject, &vddtbl_attr_group);
+#endif /* CONFIG_VDD_USERSPACE */
+
return 0;
}
core_initcall(cpufreq_core_init);