--- 35895e3ff675ef712898fe63d788dea18d468e6a +++ fc2c49246d1067cde6af1b66e31668d1b8e9dc41 @@ -39,6 +39,8 @@ #define SCHED_BATCH 3 /* SCHED_ISO: reserved but not implemented yet */ #define SCHED_IDLE 5 +#define SCHED_IDLEPRIO SCHED_IDLE + /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ #define SCHED_RESET_ON_FORK 0x40000000 @@ -268,8 +270,6 @@ extern asmlinkage void schedule_tail(str extern void init_idle(struct task_struct *idle, int cpu); extern void init_idle_bootup_task(struct task_struct *idle); -extern int runqueue_is_locked(int cpu); - extern cpumask_var_t nohz_cpu_mask; #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) extern void select_nohz_load_balancer(int stop_tick); @@ -1226,9 +1226,12 @@ struct task_struct { #ifdef CONFIG_SMP struct task_struct *wake_entry; - int on_cpu; #endif - int on_rq; +#if defined(CONFIG_SMP) + bool on_cpu; +#endif +#endif + bool on_rq; int prio, static_prio, normal_prio; unsigned int rt_priority; @@ -1572,6 +1575,42 @@ struct task_struct { #endif }; +extern int runqueue_is_locked(int cpu); +static inline void cpu_scaling(int cpu) +{ +} + +static inline void cpu_nonscaling(int cpu) +{ +} +#define tsk_seruntime(t) ((t)->se.sum_exec_runtime) +#define tsk_rttimeout(t) ((t)->rt.timeout) + +static inline void tsk_cpus_current(struct task_struct *p) +{ + p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; +} + +static inline void print_scheduler_version(void) +{ + printk(KERN_INFO"CFS CPU scheduler.\n"); +} + +static inline bool iso_task(struct task_struct *p) +{ + return false; +} + +static inline void remove_cpu(int cpu) +{ +} + +/* Anyone feel like implementing this? */ +static inline int above_background_load(void) +{ + return 1; +} + /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) @@ -1589,10 +1628,11 @@ struct task_struct { */ #define MAX_USER_RT_PRIO 100 -#define MAX_RT_PRIO MAX_USER_RT_PRIO +#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1) +#define DEFAULT_PRIO (MAX_RT_PRIO + 20) #define MAX_PRIO (MAX_RT_PRIO + 40) -#define DEFAULT_PRIO (MAX_RT_PRIO + 20) +#define NORMAL_PRIO DEFAULT_PRIO static inline int rt_prio(int prio) { @@ -1942,7 +1982,7 @@ extern unsigned long long task_sched_runtime(struct task_struct *task); /* sched_exec is called by processes performing an exec */ -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) extern void sched_exec(void); #else #define sched_exec() {} @@ -2571,7 +2611,7 @@ extern void signal_wake_up(struct task_s */ #ifdef CONFIG_SMP -static inline unsigned int task_cpu(const struct task_struct *p) +static inline int task_cpu(const struct task_struct *p) { return task_thread_info(p)->cpu; } @@ -2580,12 +2620,12 @@ extern void set_task_cpu(struct task_str #else -static inline unsigned int task_cpu(const struct task_struct *p) +static inline int task_cpu(const struct task_struct *p) { return 0; } -static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) +static inline void set_task_cpu(struct task_struct *p, int cpu) { } @@ -2699,5 +2739,3 @@ static inline unsigned long rlimit_max(u } #endif /* __KERNEL__ */ - -#endif