Patch 2.6.32.33 to 2.6.32.35
/kernel/trace/ftrace.c
blob:0afddc21b69673ac1034ecc94f3914fa86ccf59e -> blob:0cccb6c460da74d4f8e03214002baf41b8577ca9
--- kernel/trace/ftrace.c
+++ kernel/trace/ftrace.c
@@ -369,18 +369,11 @@ static int function_stat_show(struct seq
{
struct ftrace_profile *rec = v;
char str[KSYM_SYMBOL_LEN];
- int ret = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ static DEFINE_MUTEX(mutex);
static struct trace_seq s;
unsigned long long avg;
#endif
- mutex_lock(&ftrace_profile_lock);
-
- /* we raced with function_profile_reset() */
- if (unlikely(rec->counter == 0)) {
- ret = -EBUSY;
- goto out;
- }
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
seq_printf(m, " %-30.30s %10lu", str, rec->counter);
@@ -390,17 +383,17 @@ static int function_stat_show(struct seq
avg = rec->time;
do_div(avg, rec->counter);
+ mutex_lock(&mutex);
trace_seq_init(&s);
trace_print_graph_duration(rec->time, &s);
trace_seq_puts(&s, " ");
trace_print_graph_duration(avg, &s);
trace_print_seq(m, &s);
+ mutex_unlock(&mutex);
#endif
seq_putc(m, '\n');
-out:
- mutex_unlock(&ftrace_profile_lock);
- return ret;
+ return 0;
}
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
@@ -1480,8 +1473,6 @@ static void *t_start(struct seq_file *m,
if (*pos > 0)
return t_hash_start(m, pos);
iter->flags |= FTRACE_ITER_PRINTALL;
- /* reset in case of seek/pread */
- iter->flags &= ~FTRACE_ITER_HASH;
return iter;
}
@@ -2402,7 +2393,7 @@ static const struct file_operations ftra
.open = ftrace_filter_open,
.read = seq_read,
.write = ftrace_filter_write,
- .llseek = no_llseek,
+ .llseek = ftrace_regex_lseek,
.release = ftrace_filter_release,
};
@@ -3172,7 +3163,7 @@ static int start_graph_tracing(void)
/* The cpu_boot init_task->ret_stack will never be freed */
for_each_online_cpu(cpu) {
if (!idle_task(cpu)->ret_stack)
- ftrace_graph_init_idle_task(idle_task(cpu), cpu);
+ ftrace_graph_init_task(idle_task(cpu));
}
do {
@@ -3262,49 +3253,6 @@ void unregister_ftrace_graph(void)
mutex_unlock(&ftrace_lock);
}
-static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
-
-static void
-graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
-{
- atomic_set(&t->tracing_graph_pause, 0);
- atomic_set(&t->trace_overrun, 0);
- t->ftrace_timestamp = 0;
- /* make curr_ret_stack visable before we add the ret_stack */
- smp_wmb();
- t->ret_stack = ret_stack;
-}
-
-/*
- * Allocate a return stack for the idle task. May be the first
- * time through, or it may be done by CPU hotplug online.
- */
-void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
-{
- t->curr_ret_stack = -1;
- /*
- * The idle task has no parent, it either has its own
- * stack or no stack at all.
- */
- if (t->ret_stack)
- WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
-
- if (ftrace_graph_active) {
- struct ftrace_ret_stack *ret_stack;
-
- ret_stack = per_cpu(idle_ret_stack, cpu);
- if (!ret_stack) {
- ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
- * sizeof(struct ftrace_ret_stack),
- GFP_KERNEL);
- if (!ret_stack)
- return;
- per_cpu(idle_ret_stack, cpu) = ret_stack;
- }
- graph_init_task(t, ret_stack);
- }
-}
-
/* Allocate a return stack for newly created task */
void ftrace_graph_init_task(struct task_struct *t)
{
@@ -3320,7 +3268,12 @@ void ftrace_graph_init_task(struct task_
GFP_KERNEL);
if (!ret_stack)
return;
- graph_init_task(t, ret_stack);
+ atomic_set(&t->tracing_graph_pause, 0);
+ atomic_set(&t->trace_overrun, 0);
+ t->ftrace_timestamp = 0;
+ /* make curr_ret_stack visable before we add the ret_stack */
+ smp_wmb();
+ t->ret_stack = ret_stack;
}
}