Added ck2 patchset with BFS357
/include/linux/sched.h
blob:ae682f7d123ce0d4cdfffeac82ae34534b847833 -> blob:7002ac0cae5a4a16fa6b49d34dc2f0e3c48e9902
--- include/linux/sched.h
+++ include/linux/sched.h
@@ -739,14 +739,6 @@ struct user_struct {
uid_t uid;
struct user_namespace *user_ns;
-#ifdef CONFIG_USER_SCHED
- struct task_group *tg;
-#ifdef CONFIG_SYSFS
- struct kobject kobj;
- struct delayed_work work;
-#endif
-#endif
-
#ifdef CONFIG_PERF_EVENTS
atomic_long_t locked_vm;
#endif
@@ -913,6 +905,7 @@ struct sched_group {
* single CPU.
*/
unsigned int cpu_power;
+ unsigned int group_weight;
/*
* The CPUs this group covers.
@@ -1132,7 +1125,7 @@ struct sched_class {
struct task_struct *task);
#ifdef CONFIG_FAIR_GROUP_SCHED
- void (*moved_group) (struct task_struct *p, int on_rq);
+ void (*task_move_group) (struct task_struct *p, int on_rq);
#endif
};
@@ -1259,7 +1252,9 @@ struct task_struct {
struct list_head run_list;
u64 last_ran;
u64 sched_time; /* sched_clock time spent running */
-
+#ifdef CONFIG_SMP
+ int sticky; /* Soft affined flag */
+#endif
unsigned long rt_timeout;
#else /* CONFIG_SCHED_BFS */
const struct sched_class *sched_class;
@@ -1581,6 +1576,8 @@ struct task_struct {
#ifdef CONFIG_SCHED_BFS
extern int grunqueue_is_locked(void);
extern void grq_unlock_wait(void);
+extern void cpu_scaling(int cpu);
+extern void cpu_nonscaling(int cpu);
#define tsk_seruntime(t) ((t)->sched_time)
#define tsk_rttimeout(t) ((t)->rt_timeout)
#define task_rq_unlock_wait(tsk) grq_unlock_wait()
@@ -1598,7 +1595,7 @@ static inline void tsk_cpus_current(stru
static inline void print_scheduler_version(void)
{
- printk(KERN_INFO"BFS CPU scheduler v0.357 by Con Kolivas.\n");
+ printk(KERN_INFO"BFS CPU scheduler v0.401 by Con Kolivas.\n");
}
static inline int iso_task(struct task_struct *p)
@@ -1608,6 +1605,13 @@ static inline int iso_task(struct task_s
#else
extern int runqueue_is_locked(int cpu);
extern void task_rq_unlock_wait(struct task_struct *p);
+static inline void cpu_scaling(int cpu)
+{
+}
+
+static inline void cpu_nonscaling(int cpu)
+{
+}
#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
#define tsk_rttimeout(t) ((t)->rt.timeout)
@@ -1836,8 +1840,7 @@ extern int task_free_unregister(struct n
/*
* Per process flags
*/
-#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
- /* Not implemented yet, only for 486*/
+#define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */
#define PF_STARTING 0x00000002 /* being created */
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
@@ -1974,6 +1977,19 @@ extern void sched_clock_idle_wakeup_even
*/
extern unsigned long long cpu_clock(int cpu);
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+/*
+ * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
+ * The reason for this explicit opt-in is not to have perf penalty with
+ * slow sched_clocks.
+ */
+extern void enable_sched_clock_irqtime(void);
+extern void disable_sched_clock_irqtime(void);
+#else
+static inline void enable_sched_clock_irqtime(void) {}
+static inline void disable_sched_clock_irqtime(void) {}
+#endif
+
extern unsigned long long
task_sched_runtime(struct task_struct *task);
extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
@@ -2525,9 +2541,9 @@ extern int __cond_resched_lock(spinlock_
extern int __cond_resched_softirq(void);
-#define cond_resched_softirq() ({ \
- __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \
- __cond_resched_softirq(); \
+#define cond_resched_softirq() ({ \
+ __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
+ __cond_resched_softirq(); \
})
/*
@@ -2616,13 +2632,9 @@ extern long sched_getaffinity(pid_t pid,
extern void normalize_rt_tasks(void);
-#ifdef CONFIG_GROUP_SCHED
+#ifdef CONFIG_CGROUP_SCHED
extern struct task_group init_task_group;
-#ifdef CONFIG_USER_SCHED
-extern struct task_group root_task_group;
-extern void set_tg_uid(struct user_struct *user);
-#endif
extern struct task_group *sched_create_group(struct task_group *parent);
extern void sched_destroy_group(struct task_group *tg);