Patch 2.6.32.28 to 2.6.32.32
/kernel/sched_fair.c
blob:cd9a40b5d50e08cdb61e417fbec5485497f73c16 -> blob:01e311e6b47fd6b9368e49327f2467d8d148bc60
--- kernel/sched_fair.c
+++ kernel/sched_fair.c
@@ -496,7 +496,7 @@ __update_curr(struct cfs_rq *cfs_rq, str
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
- u64 now = rq_of(cfs_rq)->clock_task;
+ u64 now = rq_of(cfs_rq)->clock;
unsigned long delta_exec;
if (unlikely(!curr))
@@ -579,7 +579,7 @@ update_stats_curr_start(struct cfs_rq *c
/*
* We are starting a new run period:
*/
- se->exec_start = rq_of(cfs_rq)->clock_task;
+ se->exec_start = rq_of(cfs_rq)->clock;
}
/**************************************************
@@ -1222,6 +1222,7 @@ static int wake_affine(struct sched_doma
unsigned long this_load, load;
int idx, this_cpu, prev_cpu;
unsigned long tl_per_task;
+ unsigned int imbalance;
struct task_group *tg;
unsigned long weight;
int balanced;
@@ -1261,6 +1262,8 @@ static int wake_affine(struct sched_doma
tg = task_group(p);
weight = p->se.load.weight;
+ imbalance = 100 + (sd->imbalance_pct - 100) / 2;
+
/*
* In low-load situations, where prev_cpu is idle and this_cpu is idle
* due to the sync cause above having dropped this_load to 0, we'll
@@ -1270,22 +1273,9 @@ static int wake_affine(struct sched_doma
* Otherwise check if either cpus are near enough in load to allow this
* task to be woken on this_cpu.
*/
- if (this_load) {
- unsigned long this_eff_load, prev_eff_load;
-
- this_eff_load = 100;
- this_eff_load *= power_of(prev_cpu);
- this_eff_load *= this_load +
- effective_load(tg, this_cpu, weight, weight);
-
- prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
- prev_eff_load *= power_of(this_cpu);
- prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
-
- balanced = this_eff_load <= prev_eff_load;
- } else
- balanced = true;
-
+ balanced = !this_load ||
+ 100*(this_load + effective_load(tg, this_cpu, weight, weight)) <=
+ imbalance*(load + effective_load(tg, prev_cpu, 0, weight));
rcu_read_unlock();
/*
@@ -2002,11 +1992,8 @@ static void task_fork_fair(struct task_s
update_rq_clock(rq);
- if (unlikely(task_cpu(p) != this_cpu)) {
- rcu_read_lock();
+ if (unlikely(task_cpu(p) != this_cpu))
__set_task_cpu(p, this_cpu);
- rcu_read_unlock();
- }
update_curr(cfs_rq);
@@ -2078,26 +2065,13 @@ static void set_curr_task_fair(struct rq
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-static void task_move_group_fair(struct task_struct *p, int on_rq)
+static void moved_group_fair(struct task_struct *p, int on_rq)
{
- /*
- * If the task was not on the rq at the time of this cgroup movement
- * it must have been asleep, sleeping tasks keep their ->vruntime
- * absolute on their old rq until wakeup (needed for the fair sleeper
- * bonus in place_entity()).
- *
- * If it was on the rq, we've just 'preempted' it, which does convert
- * ->vruntime to a relative base.
- *
- * Make sure both cases convert their relative position when migrating
- * to another cgroup's rq. This does somewhat interfere with the
- * fair sleeper stuff for the first placement, but who cares.
- */
- if (!on_rq)
- p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
- set_task_rq(p, task_cpu(p));
+ struct cfs_rq *cfs_rq = task_cfs_rq(p);
+
+ update_curr(cfs_rq);
if (!on_rq)
- p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
+ place_entity(cfs_rq, &p->se, 1);
}
#endif
@@ -2151,7 +2125,7 @@ static const struct sched_class fair_sch
.get_rr_interval = get_rr_interval_fair,
#ifdef CONFIG_FAIR_GROUP_SCHED
- .task_move_group = task_move_group_fair,
+ .moved_group = moved_group_fair,
#endif
};