--- f145ec3eedc2f0fc846b06cbe63f726fb46865c0 +++ 7002ac0cae5a4a16fa6b49d34dc2f0e3c48e9902 @@ -36,8 +36,15 @@ #define SCHED_FIFO 1 #define SCHED_RR 2 #define SCHED_BATCH 3 -/* SCHED_ISO: reserved but not implemented yet */ +/* SCHED_ISO: Implemented on BFS only */ #define SCHED_IDLE 5 +#ifdef CONFIG_SCHED_BFS +#define SCHED_ISO 4 +#define SCHED_IDLEPRIO SCHED_IDLE +#define SCHED_MAX (SCHED_IDLEPRIO) +#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX) +#endif + /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ #define SCHED_RESET_ON_FORK 0x40000000 @@ -142,6 +149,7 @@ extern unsigned long nr_uninterruptible( extern unsigned long nr_iowait(void); extern unsigned long nr_iowait_cpu(void); extern unsigned long this_cpu_load(void); +extern int above_background_load(void); extern void calc_global_load(void); @@ -260,9 +268,6 @@ extern asmlinkage void schedule_tail(str extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle_bootup_task(struct task_struct *idle); -extern int runqueue_is_locked(int cpu); -extern void task_rq_unlock_wait(struct task_struct *p); - extern cpumask_var_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern int select_nohz_load_balancer(int cpu); @@ -552,6 +557,8 @@ struct thread_group_cputimer { spinlock_t lock; }; +struct autogroup; + /* * NOTE! "signal_struct" does not have it's own * locking, because a shared signal_struct always @@ -618,6 +625,10 @@ struct signal_struct { struct tty_struct *tty; /* NULL if no tty */ +#ifdef CONFIG_SCHED_AUTOGROUP + struct autogroup *autogroup; +#endif + /* * Cumulative resource counters for dead threads in the group, * and for reaped dead child processes forked by this group. @@ -728,14 +739,6 @@ struct user_struct { uid_t uid; struct user_namespace *user_ns; -#ifdef CONFIG_USER_SCHED - struct task_group *tg; -#ifdef CONFIG_SYSFS - struct kobject kobj; - struct delayed_work work; -#endif -#endif - #ifdef CONFIG_PERF_EVENTS atomic_long_t locked_vm; #endif @@ -902,6 +905,7 @@ struct sched_group { * single CPU. */ unsigned int cpu_power; + unsigned int group_weight; /* * The CPUs this group covers. @@ -1121,7 +1125,7 @@ struct sched_class { struct task_struct *task); #ifdef CONFIG_FAIR_GROUP_SCHED - void (*moved_group) (struct task_struct *p, int on_rq); + void (*task_move_group) (struct task_struct *p, int on_rq); #endif }; @@ -1230,17 +1234,33 @@ struct task_struct { int lock_depth; /* BKL lock depth */ +#ifndef CONFIG_SCHED_BFS #ifdef CONFIG_SMP #ifdef __ARCH_WANT_UNLOCKED_CTXSW int oncpu; #endif #endif +#else /* CONFIG_SCHED_BFS */ + int oncpu; +#endif int prio, static_prio, normal_prio; unsigned int rt_priority; +#ifdef CONFIG_SCHED_BFS + int time_slice; + u64 deadline; + struct list_head run_list; + u64 last_ran; + u64 sched_time; /* sched_clock time spent running */ +#ifdef CONFIG_SMP + int sticky; /* Soft affined flag */ +#endif + unsigned long rt_timeout; +#else /* CONFIG_SCHED_BFS */ const struct sched_class *sched_class; struct sched_entity se; struct sched_rt_entity rt; +#endif #ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ @@ -1339,6 +1359,9 @@ struct task_struct { int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ cputime_t utime, stime, utimescaled, stimescaled; +#ifdef CONFIG_SCHED_BFS + unsigned long utime_pc, stime_pc; +#endif cputime_t gtime; cputime_t prev_utime, prev_stime; unsigned long nvcsw, nivcsw; /* context switch counts */ @@ -1550,6 +1573,73 @@ struct task_struct { cputime64_t iowait; }; +#ifdef CONFIG_SCHED_BFS +extern int grunqueue_is_locked(void); +extern void grq_unlock_wait(void); +extern void cpu_scaling(int cpu); +extern void cpu_nonscaling(int cpu); +#define tsk_seruntime(t) ((t)->sched_time) +#define tsk_rttimeout(t) ((t)->rt_timeout) +#define task_rq_unlock_wait(tsk) grq_unlock_wait() + +static inline void set_oom_timeslice(struct task_struct *p) +{ + p->time_slice = HZ; +} + +static inline void tsk_cpus_current(struct task_struct *p) +{ +} + +#define runqueue_is_locked(cpu) grunqueue_is_locked() + +static inline void print_scheduler_version(void) +{ + printk(KERN_INFO"BFS CPU scheduler v0.401 by Con Kolivas.\n"); +} + +static inline int iso_task(struct task_struct *p) +{ + return (p->policy == SCHED_ISO); +} +#else +extern int runqueue_is_locked(int cpu); +extern void task_rq_unlock_wait(struct task_struct *p); +static inline void cpu_scaling(int cpu) +{ +} + +static inline void cpu_nonscaling(int cpu) +{ +} +#define tsk_seruntime(t) ((t)->se.sum_exec_runtime) +#define tsk_rttimeout(t) ((t)->rt.timeout) + +static inline void sched_exit(struct task_struct *p) +{ +} + +static inline void set_oom_timeslice(struct task_struct *p) +{ + p->rt.time_slice = HZ; +} + +static inline void tsk_cpus_current(struct task_struct *p) +{ + p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; +} + +static inline void print_scheduler_version(void) +{ + printk(KERN_INFO"CFS CPU scheduler.\n"); +} + +static inline int iso_task(struct task_struct *p) +{ + return 0; +} +#endif + /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) @@ -1568,9 +1658,19 @@ struct task_struct { #define MAX_USER_RT_PRIO 100 #define MAX_RT_PRIO MAX_USER_RT_PRIO +#define DEFAULT_PRIO (MAX_RT_PRIO + 20) +#ifdef CONFIG_SCHED_BFS +#define PRIO_RANGE (40) +#define MAX_PRIO (MAX_RT_PRIO + PRIO_RANGE) +#define ISO_PRIO (MAX_RT_PRIO) +#define NORMAL_PRIO (MAX_RT_PRIO + 1) +#define IDLE_PRIO (MAX_RT_PRIO + 2) +#define PRIO_LIMIT ((IDLE_PRIO) + 1) +#else /* CONFIG_SCHED_BFS */ #define MAX_PRIO (MAX_RT_PRIO + 40) -#define DEFAULT_PRIO (MAX_RT_PRIO + 20) +#define NORMAL_PRIO DEFAULT_PRIO +#endif /* CONFIG_SCHED_BFS */ static inline int rt_prio(int prio) { @@ -1740,8 +1840,7 @@ extern int task_free_unregister(struct n /* * Per process flags */ -#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ - /* Not implemented yet, only for 486*/ +#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */ #define PF_STARTING 0x00000002 /* being created */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ @@ -1878,12 +1977,25 @@ extern void sched_clock_idle_wakeup_even */ extern unsigned long long cpu_clock(int cpu); +#ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * An i/f to runtime opt-in for irq time accounting based off of sched_clock. + * The reason for this explicit opt-in is not to have perf penalty with + * slow sched_clocks. + */ +extern void enable_sched_clock_irqtime(void); +extern void disable_sched_clock_irqtime(void); +#else +static inline void enable_sched_clock_irqtime(void) {} +static inline void disable_sched_clock_irqtime(void) {} +#endif + extern unsigned long long task_sched_runtime(struct task_struct *task); extern unsigned long long thread_group_sched_runtime(struct task_struct *task); /* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_BFS) extern void sched_exec(void); #else #define sched_exec() {} @@ -1944,6 +2056,19 @@ int sched_rt_handler(struct ctl_table *t extern unsigned int sysctl_sched_compat_yield; +#ifdef CONFIG_SCHED_AUTOGROUP +extern unsigned int sysctl_sched_autogroup_enabled; +extern void sched_autogroup_create_attach(struct task_struct *p); +extern void sched_autogroup_detach(struct task_struct *p); +extern void sched_autogroup_fork(struct signal_struct *sig); +extern void sched_autogroup_exit(struct signal_struct *sig); +#else +static inline void sched_autogroup_create_attach(struct task_struct *p) { } +static inline void sched_autogroup_detach(struct task_struct *p) { } +static inline void sched_autogroup_fork(struct signal_struct *sig) { } +static inline void sched_autogroup_exit(struct signal_struct *sig) { } +#endif + #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); @@ -2039,6 +2164,9 @@ extern void wake_up_new_task(struct task static inline void kick_process(struct task_struct *tsk) { } #endif extern void sched_fork(struct task_struct *p, int clone_flags); +#ifdef CONFIG_SCHED_BFS +extern void sched_exit(struct task_struct *p); +#endif extern void sched_dead(struct task_struct *p); extern void proc_caches_init(void); @@ -2413,9 +2541,9 @@ extern int __cond_resched_lock(spinlock_ extern int __cond_resched_softirq(void); -#define cond_resched_softirq() ({ \ - __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ - __cond_resched_softirq(); \ +#define cond_resched_softirq() ({ \ + __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + __cond_resched_softirq(); \ }) /* @@ -2504,13 +2632,9 @@ extern long sched_getaffinity(pid_t pid, extern void normalize_rt_tasks(void); -#ifdef CONFIG_GROUP_SCHED +#ifdef CONFIG_CGROUP_SCHED extern struct task_group init_task_group; -#ifdef CONFIG_USER_SCHED -extern struct task_group root_task_group; -extern void set_tg_uid(struct user_struct *user); -#endif extern struct task_group *sched_create_group(struct task_group *parent); extern void sched_destroy_group(struct task_group *tg);