Implement sysfs interface for vdd
/drivers/cpufreq/cpufreq.c
blob:8cebe90f8c495377ac45bf5a6d887fe8b1f15754 -> blob:bbde88eedb91db2c78b5d23a1ea63466dea28611
--- drivers/cpufreq/cpufreq.c
+++ drivers/cpufreq/cpufreq.c
@@ -28,36 +28,11 @@
#include <linux/cpu.h>
#include <linux/completion.h>
#include <linux/mutex.h>
-#include <linux/sched.h>
#include <linux/syscore_ops.h>
#include <trace/events/power.h>
#include <linux/semaphore.h>
-#if !defined(__MP_DECISION_PATCH__)
-#error "__MP_DECISION_PATCH__ must be defined in cpufreq.c"
-#endif
-/* Description of __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- *
- * When the kobject of cpufreq's ref count is zero in show/store function,
- * cpufreq_cpu_put() causes a deadlock because the active count of the
- * accessing file is incremented just before calling show/store at
- * fill_read(write)_buffer.
- * (This happens when show/store is called first and then the cpu_down is called
- * before the show/store function is finished)
- * So basically, cpufreq_cpu_put() in show/store must not release the kobject
- * of cpufreq. To make sure that kobj ref count of the cpufreq is not 0 in this
- * case, a per cpu mutex is used.
- * This per cpu mutex wraps the whole show/store function and kobject_put()
- * function in __cpufreq_remove_dev().
- */
- #define __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
-
-
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
-static DEFINE_PER_CPU(struct mutex, cpufreq_remove_mutex);
-#endif
-
/**
* The "cpufreq driver" - the arch- or hardware-dependent low
* level driver of CPUFreq support, and its spinlock. This lock
@@ -226,27 +201,6 @@ static void cpufreq_cpu_put_sysfs(struct
__cpufreq_cpu_put(data, 1);
}
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
-/* just peek to see if the cpufreq policy is available.
- * The caller must hold cpufreq_driver_lock
- */
-struct cpufreq_policy *cpufreq_cpu_peek(unsigned int cpu)
-{
- struct cpufreq_policy *data;
-
- if (cpu >= nr_cpu_ids)
- return NULL;
-
- if (!cpufreq_driver)
- return NULL;
-
- /* get the CPU */
- data = per_cpu(cpufreq_cpu_data, cpu);
-
- return data;
-}
-#endif
-
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -339,30 +293,31 @@ void cpufreq_notify_transition(struct cp
trace_cpu_frequency(freqs->new, freqs->cpu);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
- if (likely(policy) && likely(policy->cpu == freqs->cpu))
+ if (likely(policy) && likely(policy->cpu == freqs->cpu)) {
policy->cur = freqs->new;
+ sysfs_notify(&policy->kobj, NULL, "scaling_cur_freq");
+ }
break;
}
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
-
-#if defined(__MP_DECISION_PATCH__)
-/*
- * cpufreq_notify_utilization - notify CPU userspace abt CPU utilization
+/**
+ * cpufreq_notify_utilization - notify CPU userspace about CPU utilization
* change
*
- * This function calls the sysfs notifiers function.
- * It is called every ondemand load evaluation to compute CPU loading.
+ * This function is called everytime the CPU load is evaluated by the
+ * ondemand governor. It notifies userspace of cpu load changes via sysfs.
*/
void cpufreq_notify_utilization(struct cpufreq_policy *policy,
- unsigned int utils)
+ unsigned int util)
{
if (policy)
- policy->utils = utils;
+ policy->util = util;
+
+ if (policy->util >= MIN_CPU_UTIL_NOTIFY)
+ sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
- sysfs_notify(&policy->kobj, NULL, "cpu_utilization");
}
-#endif
/*********************************************************************
* SYSFS INTERFACE *
@@ -450,9 +405,7 @@ show_one(cpuinfo_transition_latency, cpu
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
-#if defined(__MP_DECISION_PATCH__)
-show_one(cpu_utilization, utils);
-#endif
+show_one(cpu_utilization, util);
static int __cpufreq_set_policy(struct cpufreq_policy *data,
struct cpufreq_policy *policy);
@@ -564,6 +517,9 @@ static ssize_t store_scaling_governor(st
unsigned int ret = -EINVAL;
char str_governor[16];
struct cpufreq_policy new_policy;
+ char *envp[3];
+ char buf1[64];
+ char buf2[64];
ret = cpufreq_get_policy(&new_policy, policy->cpu);
if (ret)
@@ -584,6 +540,15 @@ static ssize_t store_scaling_governor(st
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
+ sysfs_notify(&policy->kobj, NULL, "scaling_governor");
+
+ snprintf(buf1, sizeof(buf1), "GOV=%s", policy->governor->name);
+ snprintf(buf2, sizeof(buf2), "CPU=%u", policy->cpu);
+ envp[0] = buf1;
+ envp[1] = buf2;
+ envp[2] = NULL;
+ kobject_uevent_env(cpufreq_global_kobject, KOBJ_ADD, envp);
+
if (ret)
return ret;
else
@@ -762,9 +727,7 @@ cpufreq_freq_attr_ro(scaling_cur_freq);
cpufreq_freq_attr_ro(bios_limit);
cpufreq_freq_attr_ro(related_cpus);
cpufreq_freq_attr_ro(affected_cpus);
-#if defined(__MP_DECISION_PATCH__)
cpufreq_freq_attr_ro(cpu_utilization);
-#endif
cpufreq_freq_attr_rw(scaling_min_freq);
cpufreq_freq_attr_rw(scaling_max_freq);
cpufreq_freq_attr_rw(scaling_governor);
@@ -780,10 +743,8 @@ static struct attribute *default_attrs[]
&cpuinfo_transition_latency.attr,
&scaling_min_freq.attr,
&scaling_max_freq.attr,
-#if defined(__MP_DECISION_PATCH__)
- &cpu_utilization.attr,
-#endif
&affected_cpus.attr,
+ &cpu_utilization.attr,
&related_cpus.attr,
&scaling_governor.attr,
&scaling_driver.attr,
@@ -815,27 +776,6 @@ static ssize_t show(struct kobject *kobj
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- unsigned int cpu;
- unsigned long flags;
-#endif
-
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_peek(policy->cpu);
- if (!policy) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return -EINVAL;
- }
- cpu = policy->cpu;
- if (mutex_trylock(&per_cpu(cpufreq_remove_mutex, cpu)) == 0) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- pr_info("!WARN %s failed because cpu%u is going down\n",
- __func__, cpu);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -852,9 +792,6 @@ static ssize_t show(struct kobject *kobj
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
return ret;
}
@@ -864,27 +801,6 @@ static ssize_t store(struct kobject *kob
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- unsigned int cpu;
- unsigned long flags;
-#endif
-
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- spin_lock_irqsave(&cpufreq_driver_lock, flags);
- policy = cpufreq_cpu_peek(policy->cpu);
- if (!policy) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return -EINVAL;
- }
- cpu = policy->cpu;
- if (mutex_trylock(&per_cpu(cpufreq_remove_mutex, cpu)) == 0) {
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- pr_info("!WARN %s failed because cpu%u is going down\n",
- __func__, cpu);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
-#endif
policy = cpufreq_cpu_get_sysfs(policy->cpu);
if (!policy)
goto no_policy;
@@ -901,9 +817,6 @@ static ssize_t store(struct kobject *kob
fail:
cpufreq_cpu_put_sysfs(policy);
no_policy:
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
return ret;
}
@@ -1205,9 +1118,6 @@ static int cpufreq_add_dev(struct sys_de
pr_debug("initialization failed\n");
goto err_unlock_policy;
}
-
- if (policy->max > 1512000) policy->max = 1512000;
-
policy->user_policy.min = policy->min;
policy->user_policy.max = policy->max;
@@ -1364,13 +1274,8 @@ static int __cpufreq_remove_dev(struct s
kobj = &data->kobj;
cmp = &data->kobj_unregister;
unlock_policy_rwsem_write(cpu);
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_lock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
kobject_put(kobj);
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_unlock(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
+
/* we need to make sure that the underlying kobj is actually
* not referenced anymore by anybody before we proceed with
* unloading.
@@ -1704,12 +1609,6 @@ int __cpufreq_driver_target(struct cpufr
target_freq, relation);
if (cpu_online(policy->cpu) && cpufreq_driver->target)
retval = cpufreq_driver->target(policy, target_freq, relation);
- if (likely(retval != -EINVAL)) {
- if (target_freq == policy->max)
- cpu_nonscaling(policy->cpu);
- else
- cpu_scaling(policy->cpu);
- }
return retval;
}
@@ -2078,6 +1977,7 @@ no_policy:
return ret;
}
+
int cpufreq_set_limit(unsigned int flag, unsigned int value)
{
unsigned int max_value = 0;
@@ -2136,6 +2036,12 @@ int cpufreq_set_limit(unsigned int flag,
max_value = user_max_freq_limit;
}
+ /* thermald */
+ if (freq_limit_start_flag & USER_MAX_BIT) {
+ if (max_value > user_max_freq_limit)
+ max_value = user_max_freq_limit;
+ }
+
/* set min freq */
if (freq_limit_start_flag & TOUCH_BOOSTER_FIRST_BIT)
min_value = TOUCH_BOOSTER_FIRST_FREQ_LIMIT;
@@ -2200,8 +2106,10 @@ int cpufreq_set_limit_defered(unsigned i
return ret;
}
+EXPORT_SYMBOL(cpufreq_set_limit_defered);
#endif
+
/**
* cpufreq_update_policy - re-evaluate an existing cpufreq policy
* @cpu: CPU which shall be re-evaluated
@@ -2287,7 +2195,7 @@ static int __cpuinit cpufreq_cpu_callbac
}
static struct notifier_block __refdata cpufreq_cpu_notifier = {
- .notifier_call = cpufreq_cpu_callback,
+ .notifier_call = cpufreq_cpu_callback,
};
/*********************************************************************
@@ -2350,14 +2258,14 @@ int cpufreq_register_driver(struct cpufr
}
}
- register_hotcpu_notifier(&cpufreq_cpu_notifier);
- pr_debug("driver %s up and running\n", driver_data->name);
-
#ifdef CONFIG_SEC_DVFS
cpufreq_queue_priv.wq = create_workqueue("cpufreq_queue");
INIT_WORK(&cpufreq_queue_priv.work, cpufreq_set_limit_work);
#endif
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
+ pr_debug("driver %s up and running\n", driver_data->name);
+
return 0;
err_sysdev_unreg:
sysdev_driver_unregister(&cpu_sysdev_class,
@@ -2416,9 +2324,6 @@ static int __init cpufreq_core_init(void
for_each_possible_cpu(cpu) {
per_cpu(cpufreq_policy_cpu, cpu) = -1;
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
-#ifdef __CPUFREQ_KOBJ_DEL_DEADLOCK_FIX
- mutex_init(&per_cpu(cpufreq_remove_mutex, cpu));
-#endif
}
cpufreq_global_kobject = kobject_create_and_add("cpufreq",