SCHED: Improve the ->group_imb logic
/kernel/sched_fair.c
blob:4fdf1921e24823c91f850b491c95ddcf6f1ee74c -> blob:906d1a8f9051d9366791283a337e5ba51f1a9a9b
--- kernel/sched_fair.c
+++ kernel/sched_fair.c
@@ -2717,8 +2717,7 @@ static inline void update_sg_lb_stats(st
int local_group, const struct cpumask *cpus,
int *balance, struct sg_lb_stats *sgs)
{
- unsigned long nr_running, max_nr_running, min_nr_running;
- unsigned long load, max_cpu_load, min_cpu_load;
+ unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
int i;
unsigned int balance_cpu = -1, first_idle_cpu = 0;
unsigned long avg_load_per_task = 0;
@@ -2730,13 +2729,10 @@ static inline void update_sg_lb_stats(st
max_cpu_load = 0;
min_cpu_load = ~0UL;
max_nr_running = 0;
- min_nr_running = ~0UL;
for_each_cpu_and(i, sched_group_cpus(group), cpus) {
struct rq *rq = cpu_rq(i);
- nr_running = rq->nr_running;
-
/* Bias balancing toward cpus of our domain */
if (local_group) {
if (idle_cpu(i) && !first_idle_cpu) {
@@ -2747,19 +2743,16 @@ static inline void update_sg_lb_stats(st
load = target_load(i, load_idx);
} else {
load = source_load(i, load_idx);
- if (load > max_cpu_load)
+ if (load > max_cpu_load) {
max_cpu_load = load;
+ max_nr_running = rq->nr_running;
+ }
if (min_cpu_load > load)
min_cpu_load = load;
-
- if (nr_running > max_nr_running)
- max_nr_running = nr_running;
- if (min_nr_running > nr_running)
- min_nr_running = nr_running;
}
sgs->group_load += load;
- sgs->sum_nr_running += nr_running;
+ sgs->sum_nr_running += rq->nr_running;
sgs->sum_weighted_load += weighted_cpuload(i);
if (idle_cpu(i))
sgs->idle_cpus++;
@@ -2794,8 +2787,7 @@ static inline void update_sg_lb_stats(st
if (sgs->sum_nr_running)
avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
- if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
- (max_nr_running - min_nr_running) > 1)
+ if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
sgs->group_imb = 1;
sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,