Updated to 2.6.32.27
/kernel/sched.c
blob:12390f287ac72d6bd67d43c6ea1dea4820d4dd89 -> blob:95297f2c284ebeed72c63256397c783efbc289d4
--- kernel/sched.c
+++ kernel/sched.c
@@ -1,3 +1,6 @@
+#ifdef CONFIG_SCHED_BFS
+#include "sched_bfs.c"
+#else
/*
* kernel/sched.c
*
@@ -76,6 +79,7 @@
#include <asm/irq_regs.h>
#include "sched_cpupri.h"
+#include "sched_autogroup.h"
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
@@ -352,13 +356,20 @@ static inline struct task_group *task_gr
rcu_read_lock();
tg = __task_cred(p)->user->tg;
rcu_read_unlock();
+
+ return tg;
#elif defined(CONFIG_CGROUP_SCHED)
- tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
- struct task_group, css);
+ struct cgroup_subsys_state *css;
+
+ css = task_subsys_state(p, cpu_cgroup_subsys_id);
+ tg = container_of(css, struct task_group, css);
+
+ return autogroup_task_group(p, tg);
#else
tg = &init_task_group;
-#endif
+
return tg;
+#endif
}
/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
@@ -1849,6 +1860,7 @@ static inline void __set_task_cpu(struct
#include "sched_idletask.c"
#include "sched_fair.c"
#include "sched_rt.c"
+#include "sched_autogroup.c"
#ifdef CONFIG_SCHED_DEBUG
# include "sched_debug.c"
#endif
@@ -5209,21 +5221,9 @@ void account_idle_time(cputime_t cputime
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
cputime64_t cputime64 = cputime_to_cputime64(cputime);
struct rq *rq = this_rq();
- struct task_struct *task;
- if (atomic_read(&rq->nr_iowait) > 0) {
- for (task = current; task != &init_task; task = task->parent)
- ;
- /* task now points to init */
- for_each_process(task) {
- /* this pointlessly prints the name and PID of each task */
- if (task->in_iowait) {
- task->iowait = cputime64_add(task->iowait, cputime64);
- //printk("%s[%d]\n", task->comm, task->pid);
- }
- }
+ if (atomic_read(&rq->nr_iowait) > 0)
cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
- }
else
cpustat->idle = cputime64_add(cpustat->idle, cputime64);
}
@@ -7050,7 +7050,7 @@ void sched_show_task(struct task_struct
unsigned state;
state = p->state ? __ffs(p->state) + 1 : 0;
- printk(KERN_INFO "%-15.15s %c", p->comm,
+ printk(KERN_INFO "%-13.13s %c", p->comm,
state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
#if BITS_PER_LONG == 32
if (state == TASK_RUNNING)
@@ -9599,6 +9599,8 @@ void __init sched_init(void)
init_task_group.parent = &root_task_group;
list_add(&init_task_group.siblings, &root_task_group.children);
#endif /* CONFIG_USER_SCHED */
+
+ autogroup_init(&init_task);
#endif /* CONFIG_GROUP_SCHED */
#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
@@ -9756,24 +9758,13 @@ static inline int preempt_count_equals(i
return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
}
-static int __might_sleep_init_called;
-int __init __might_sleep_init(void)
-{
- __might_sleep_init_called = 1;
- return 0;
-}
-early_initcall(__might_sleep_init);
-
void __might_sleep(char *file, int line, int preempt_offset)
{
#ifdef in_atomic
static unsigned long prev_jiffy; /* ratelimiting */
if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
- oops_in_progress)
- return;
- if (system_state != SYSTEM_RUNNING &&
- (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
+ system_state != SYSTEM_RUNNING || oops_in_progress)
return;
if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
return;
@@ -10149,15 +10140,11 @@ void sched_destroy_group(struct task_gro
/* change task's runqueue when it moves between groups.
* The caller of this function should have put the task in its new group
* by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
- * reflect its new group.
+ * reflect its new group. Called with the runqueue lock held.
*/
-void sched_move_task(struct task_struct *tsk)
+void __sched_move_task(struct task_struct *tsk, struct rq *rq)
{
int on_rq, running;
- unsigned long flags;
- struct rq *rq;
-
- rq = task_rq_lock(tsk, &flags);
update_rq_clock(rq);
@@ -10180,6 +10167,15 @@ void sched_move_task(struct task_struct
tsk->sched_class->set_curr_task(rq);
if (on_rq)
enqueue_task(rq, tsk, 0, false);
+}
+
+void sched_move_task(struct task_struct *tsk)
+{
+ struct rq *rq;
+ unsigned long flags;
+
+ rq = task_rq_lock(tsk, &flags);
+ __sched_move_task(tsk, rq);
task_rq_unlock(rq, &flags);
}
@@ -10593,15 +10589,6 @@ cpu_cgroup_destroy(struct cgroup_subsys
static int
cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
{
- if ((current != tsk) && (!capable(CAP_SYS_NICE))) {
- const struct cred *cred = current_cred(), *tcred;
-
- tcred = __task_cred(tsk);
-
- if (cred->euid != tcred->uid && cred->euid != tcred->suid)
- return -EPERM;
- }
-
#ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
return -EINVAL;
@@ -11116,3 +11103,4 @@ void synchronize_sched_expedited(void)
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
#endif /* #else #ifndef CONFIG_SMP */
+#endif /* CONFIG_SCHED_BFS */