PATCH: apply some patches from my CM kernel (that came from CAF, not CM)
/arch/arm/mach-msm/msm_rq_stats.c
blob:4112a317f3afbec7424bb64058cc9371a65b7785 -> blob:9dda257dfa4be74929ad72f34325cfb78459a995
--- arch/arm/mach-msm/msm_rq_stats.c
+++ arch/arm/mach-msm/msm_rq_stats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,7 +11,7 @@
*
*/
/*
- * Qualcomm MSM Runqueue Stats and cpu utilization Interface for Userspace
+ * Qualcomm MSM Runqueue Stats Interface for Userspace
*/
#include <linux/kernel.h>
#include <linux/init.h>
@@ -26,9 +26,6 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/rq_stats.h>
-#include <linux/cpufreq.h>
-#include <linux/kernel_stat.h>
-#include <linux/tick.h>
#ifdef CONFIG_SEC_DVFS_DUAL
#include <linux/cpufreq.h>
@@ -58,174 +55,6 @@ unsigned int get_rq_info(void)
EXPORT_SYMBOL(get_rq_info);
#endif
-struct notifier_block freq_transition;
-struct notifier_block cpu_hotplug;
-
-struct cpu_load_data {
- cputime64_t prev_cpu_idle;
- cputime64_t prev_cpu_wall;
- cputime64_t prev_cpu_iowait;
- unsigned int avg_load_maxfreq;
- unsigned int samples;
- unsigned int window_size;
- unsigned int cur_freq;
- unsigned int policy_max;
- cpumask_var_t related_cpus;
- struct mutex cpu_load_mutex;
-};
-
-static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
-
-static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
-{
- u64 idle_time;
- u64 cur_wall_time;
- u64 busy_time;
-
- cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
-
- busy_time = kstat_cpu(cpu).cpustat.user;
- busy_time += kstat_cpu(cpu).cpustat.system;
- busy_time += kstat_cpu(cpu).cpustat.irq;
- busy_time += kstat_cpu(cpu).cpustat.softirq;
- busy_time += kstat_cpu(cpu).cpustat.steal;
- busy_time += kstat_cpu(cpu).cpustat.nice;
-
- idle_time = cur_wall_time - busy_time;
- if (wall)
- *wall = jiffies_to_usecs(cur_wall_time);
-
- return jiffies_to_usecs(idle_time);
-}
-
-static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
-{
- u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
-
- if (idle_time == -1ULL)
- return get_cpu_idle_time_jiffy(cpu, wall);
- else
- idle_time += get_cpu_iowait_time_us(cpu, wall);
-
- return idle_time;
-}
-
-static inline cputime64_t get_cpu_iowait_time(unsigned int cpu,
- cputime64_t *wall)
-{
- u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
-
- if (iowait_time == -1ULL)
- return 0;
-
- return iowait_time;
-}
-
-static int update_average_load(unsigned int freq, unsigned int cpu)
-{
-
- struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
- cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
- unsigned int idle_time, wall_time, iowait_time;
- unsigned int cur_load, load_at_max_freq;
-
- cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time);
- cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);
-
- wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
- pcpu->prev_cpu_wall = cur_wall_time;
-
- idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
- pcpu->prev_cpu_idle = cur_idle_time;
-
- iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
- pcpu->prev_cpu_iowait = cur_iowait_time;
-
- if (idle_time >= iowait_time)
- idle_time -= iowait_time;
-
- if (unlikely(!wall_time || wall_time < idle_time))
- return 0;
-
- cur_load = 100 * (wall_time - idle_time) / wall_time;
-
- /* Calculate the scaled load across CPU */
- load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
-
- if (!pcpu->avg_load_maxfreq) {
- /* This is the first sample in this window*/
- pcpu->avg_load_maxfreq = load_at_max_freq;
- pcpu->window_size = wall_time;
- } else {
- /*
- * The is already a sample available in this window.
- * Compute weighted average with prev entry, so that we get
- * the precise weighted load.
- */
- pcpu->avg_load_maxfreq =
- ((pcpu->avg_load_maxfreq * pcpu->window_size) +
- (load_at_max_freq * wall_time)) /
- (wall_time + pcpu->window_size);
-
- pcpu->window_size += wall_time;
- }
-
- return 0;
-}
-
-static unsigned int report_load_at_max_freq(void)
-{
- int cpu;
- struct cpu_load_data *pcpu;
- unsigned int total_load = 0;
-
- for_each_online_cpu(cpu) {
- pcpu = &per_cpu(cpuload, cpu);
- mutex_lock(&pcpu->cpu_load_mutex);
- update_average_load(pcpu->cur_freq, cpu);
- total_load += pcpu->avg_load_maxfreq;
- pcpu->avg_load_maxfreq = 0;
- mutex_unlock(&pcpu->cpu_load_mutex);
- }
- return total_load;
-}
-
-static int cpufreq_transition_handler(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_freqs *freqs = data;
- struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
- int j;
-
- switch (val) {
- case CPUFREQ_POSTCHANGE:
- for_each_cpu(j, this_cpu->related_cpus) {
- struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
- mutex_lock(&pcpu->cpu_load_mutex);
- update_average_load(freqs->old, freqs->cpu);
- pcpu->cur_freq = freqs->new;
- mutex_unlock(&pcpu->cpu_load_mutex);
- }
- break;
- }
- return 0;
-}
-
-static int cpu_hotplug_handler(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- unsigned int cpu = (unsigned long)data;
- struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
-
- switch (val) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- this_cpu->avg_load_maxfreq = 0;
- }
-
- return NOTIFY_OK;
-}
-
static void def_work_fn(struct work_struct *work)
{
int64_t diff;
@@ -329,7 +158,7 @@ void dual_boost(unsigned int boost_on)
}
#endif
-static ssize_t run_queue_avg_show(struct kobject *kobj,
+static ssize_t show_run_queue_avg(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
unsigned int val = 0;
@@ -349,8 +178,6 @@ static ssize_t run_queue_avg_show(struct
return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
}
-static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
-
static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -385,10 +212,6 @@ static ssize_t store_run_queue_poll_ms(s
return count;
}
-static struct kobj_attribute run_queue_poll_ms_attr =
- __ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
- store_run_queue_poll_ms);
-
static ssize_t show_def_timer_ms(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -407,44 +230,67 @@ static ssize_t store_def_timer_ms(struct
return count;
}
-static struct kobj_attribute def_timer_ms_attr =
- __ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
- store_def_timer_ms);
+#define MSM_RQ_STATS_RO_ATTRIB(att) ({ \
+ struct attribute *attrib = NULL; \
+ struct kobj_attribute *ptr = NULL; \
+ ptr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL); \
+ if (ptr) { \
+ ptr->attr.name = #att; \
+ ptr->attr.mode = S_IRUGO; \
+ ptr->show = show_##att; \
+ ptr->store = NULL; \
+ attrib = &ptr->attr; \
+ } \
+ attrib; })
+
+#define MSM_RQ_STATS_RW_ATTRIB(att) ({ \
+ struct attribute *attrib = NULL; \
+ struct kobj_attribute *ptr = NULL; \
+ ptr = kzalloc(sizeof(struct kobj_attribute), GFP_KERNEL); \
+ if (ptr) { \
+ ptr->attr.name = #att; \
+ ptr->attr.mode = S_IWUSR|S_IRUSR; \
+ ptr->show = show_##att; \
+ ptr->store = store_##att; \
+ attrib = &ptr->attr; \
+ } \
+ attrib; })
-static ssize_t show_cpu_normalized_load(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
+static int init_rq_attribs(void)
{
- return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
-}
+ int i;
+ int err = 0;
+ const int attr_count = 4;
-static struct kobj_attribute cpu_normalized_load_attr =
- __ATTR(cpu_normalized_load, S_IWUSR | S_IRUSR, show_cpu_normalized_load,
- NULL);
-
-static struct attribute *rq_attrs[] = {
- &cpu_normalized_load_attr.attr,
- &def_timer_ms_attr.attr,
- &run_queue_avg_attr.attr,
- &run_queue_poll_ms_attr.attr,
- NULL,
-};
-
-static struct attribute_group rq_attr_group = {
- .attrs = rq_attrs,
-};
+ struct attribute **attribs =
+ kzalloc(sizeof(struct attribute *) * attr_count, GFP_KERNEL);
-static int init_rq_attribs(void)
-{
- int err;
+ if (!attribs)
+ goto rel;
rq_info.rq_avg = 0;
- rq_info.attr_group = &rq_attr_group;
+
+ attribs[0] = MSM_RQ_STATS_RW_ATTRIB(def_timer_ms);
+ attribs[1] = MSM_RQ_STATS_RO_ATTRIB(run_queue_avg);
+ attribs[2] = MSM_RQ_STATS_RW_ATTRIB(run_queue_poll_ms);
+ attribs[3] = NULL;
+
+ for (i = 0; i < attr_count - 1 ; i++) {
+ if (!attribs[i])
+ goto rel2;
+ }
+
+ rq_info.attr_group = kzalloc(sizeof(struct attribute_group),
+ GFP_KERNEL);
+ if (!rq_info.attr_group)
+ goto rel3;
+ rq_info.attr_group->attrs = attribs;
/* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
rq_info.kobj = kobject_create_and_add("rq-stats",
&get_cpu_sysdev(0)->kobj);
if (!rq_info.kobj)
- return -ENOMEM;
+ goto rel3;
err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
if (err)
@@ -452,14 +298,24 @@ static int init_rq_attribs(void)
else
kobject_uevent(rq_info.kobj, KOBJ_ADD);
- return err;
+ if (!err)
+ return err;
+
+rel3:
+ kfree(rq_info.attr_group);
+ kfree(rq_info.kobj);
+rel2:
+ for (i = 0; i < attr_count - 1; i++)
+ kfree(attribs[i]);
+rel:
+ kfree(attribs);
+
+ return -ENOMEM;
}
static int __init msm_rq_stats_init(void)
{
int ret;
- int i;
- struct cpufreq_policy cpu_policy;
rq_wq = create_singlethread_workqueue("rq_stats");
BUG_ON(!rq_wq);
@@ -476,20 +332,6 @@ static int __init msm_rq_stats_init(void
ret = init_rq_attribs();
rq_info.init = 1;
-
- for_each_possible_cpu(i) {
- struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
- mutex_init(&pcpu->cpu_load_mutex);
- cpufreq_get_policy(&cpu_policy, i);
- pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
- cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
- }
- freq_transition.notifier_call = cpufreq_transition_handler;
- cpu_hotplug.notifier_call = cpu_hotplug_handler;
- cpufreq_register_notifier(&freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
- register_hotcpu_notifier(&cpu_hotplug);
-
return ret;
}
late_initcall(msm_rq_stats_init);