SCHED: fix OOPS when build_sched_domains percpu allocation fails
/kernel/sched.c
blob:eac5ff3f61aa3df6027250ba231002f979639b84 -> blob:b8657eb0f8312bd09c4eb6eea0ab18d210bfb777
--- kernel/sched.c
+++ kernel/sched.c
@@ -128,7 +128,7 @@
static inline int rt_policy(int policy)
{
- if (policy == SCHED_FIFO || policy == SCHED_RR)
+ if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
return 1;
return 0;
}
@@ -2501,7 +2501,7 @@ ttwu_do_wakeup(struct rq *rq, struct tas
if (p->sched_class->task_woken)
p->sched_class->task_woken(rq, p);
- if (rq->idle_stamp) {
+ if (unlikely(rq->idle_stamp)) {
u64 delta = rq->clock - rq->idle_stamp;
u64 max = 2*sysctl_sched_migration_cost;
@@ -7415,26 +7415,16 @@ static void __sdt_free(const struct cpum
struct sd_data *sdd = &tl->data;
for_each_cpu(j, cpu_map) {
- struct sched_domain *sd;
-
- if (sdd->sd) {
- sd = *per_cpu_ptr(sdd->sd, j);
- if (sd && (sd->flags & SD_OVERLAP))
- free_sched_groups(sd->groups, 0);
- kfree(*per_cpu_ptr(sdd->sd, j));
- }
-
- if (sdd->sg)
- kfree(*per_cpu_ptr(sdd->sg, j));
- if (sdd->sgp)
- kfree(*per_cpu_ptr(sdd->sgp, j));
+ struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j);
+ if (sd && (sd->flags & SD_OVERLAP))
+ free_sched_groups(sd->groups, 0);
+ kfree(*per_cpu_ptr(sdd->sd, j));
+ kfree(*per_cpu_ptr(sdd->sg, j));
+ kfree(*per_cpu_ptr(sdd->sgp, j));
}
free_percpu(sdd->sd);
- sdd->sd = NULL;
free_percpu(sdd->sg);
- sdd->sg = NULL;
free_percpu(sdd->sgp);
- sdd->sgp = NULL;
}
}
@@ -9459,4 +9449,3 @@ struct cgroup_subsys cpuacct_subsys = {
.subsys_id = cpuacct_subsys_id,
};
#endif /* CONFIG_CGROUP_CPUACCT */
-