rename target/linux/generic-2.6 to generic
[15.05/openwrt.git] / target / linux / generic / patches-2.6.31 / 270-sched_bfs.patch
1 This patch adds support for bfs v230, modified for diff size reduction
2
3 --- a/Documentation/sysctl/kernel.txt
4 +++ b/Documentation/sysctl/kernel.txt
5 @@ -27,6 +27,7 @@ show up in /proc/sys/kernel:
6  - domainname
7  - hostname
8  - hotplug
9 +- iso_cpu
10  - java-appletviewer           [ binfmt_java, obsolete ]
11  - java-interpreter            [ binfmt_java, obsolete ]
12  - kstack_depth_to_print       [ X86 only ]
13 @@ -49,6 +50,7 @@ show up in /proc/sys/kernel:
14  - randomize_va_space
15  - real-root-dev               ==> Documentation/initrd.txt
16  - reboot-cmd                  [ SPARC only ]
17 +- rr_interval
18  - rtsig-max
19  - rtsig-nr
20  - sem
21 @@ -171,6 +173,16 @@ Default value is "/sbin/hotplug".
22  
23  ==============================================================
24  
25 +iso_cpu: (BFS only)
26 +
27 +This sets the percentage cpu that the unprivileged SCHED_ISO tasks can
28 +run effectively at realtime priority, averaged over a rolling five
29 +seconds over the -whole- system, meaning all cpus.
30 +
31 +Set to 70 (percent) by default.
32 +
33 +==============================================================
34 +
35  l2cr: (PPC only)
36  
37  This flag controls the L2 cache of G3 processor boards. If
38 @@ -333,6 +345,19 @@ rebooting. ???
39  
40  ==============================================================
41  
42 +rr_interval: (BFS only)
43 +
44 +This is the smallest duration that any cpu process scheduling unit
45 +will run for. Increasing this value can increase throughput of cpu
46 +bound tasks substantially but at the expense of increased latencies
47 +overall. This value is in milliseconds and the default value chosen
48 +depends on the number of cpus available at scheduler initialisation
49 +with a minimum of 6.
50 +
51 +Valid values are from 1-5000.
52 +
53 +==============================================================
54 +
55  rtsig-max & rtsig-nr:
56  
57  The file rtsig-max can be used to tune the maximum number
58 --- a/include/linux/init_task.h
59 +++ b/include/linux/init_task.h
60 @@ -116,9 +116,10 @@ extern struct cred init_cred;
61         .usage          = ATOMIC_INIT(2),                               \
62         .flags          = PF_KTHREAD,                                   \
63         .lock_depth     = -1,                                           \
64 -       .prio           = MAX_PRIO-20,                                  \
65 +       .prio           = NORMAL_PRIO,                                  \
66         .static_prio    = MAX_PRIO-20,                                  \
67 -       .normal_prio    = MAX_PRIO-20,                                  \
68 +       .normal_prio    = NORMAL_PRIO,                                  \
69 +       .deadline       = 0,                                            \
70         .policy         = SCHED_NORMAL,                                 \
71         .cpus_allowed   = CPU_MASK_ALL,                                 \
72         .mm             = NULL,                                         \
73 --- a/include/linux/sched.h
74 +++ b/include/linux/sched.h
75 @@ -36,9 +36,12 @@
76  #define SCHED_FIFO             1
77  #define SCHED_RR               2
78  #define SCHED_BATCH            3
79 -/* SCHED_ISO: reserved but not implemented yet */
80 +#define SCHED_ISO              4
81  #define SCHED_IDLE             5
82  
83 +#define SCHED_MAX              (SCHED_IDLE)
84 +#define SCHED_RANGE(policy)    ((policy) <= SCHED_MAX)
85 +
86  #ifdef __KERNEL__
87  
88  struct sched_param {
89 @@ -1090,10 +1093,13 @@ struct sched_entity {
90         struct load_weight      load;           /* for load-balancing */
91         struct rb_node          run_node;
92         struct list_head        group_node;
93 +#ifdef CONFIG_SCHED_CFS
94         unsigned int            on_rq;
95  
96         u64                     exec_start;
97 +#endif
98         u64                     sum_exec_runtime;
99 +#ifdef CONFIG_SCHED_CFS
100         u64                     vruntime;
101         u64                     prev_sum_exec_runtime;
102  
103 @@ -1145,6 +1151,7 @@ struct sched_entity {
104         /* rq "owned" by this entity/group: */
105         struct cfs_rq           *my_q;
106  #endif
107 +#endif
108  };
109  
110  struct sched_rt_entity {
111 @@ -1172,17 +1179,19 @@ struct task_struct {
112  
113         int lock_depth;         /* BKL lock depth */
114  
115 -#ifdef CONFIG_SMP
116 -#ifdef __ARCH_WANT_UNLOCKED_CTXSW
117         int oncpu;
118 -#endif
119 -#endif
120 -
121         int prio, static_prio, normal_prio;
122         unsigned int rt_priority;
123         const struct sched_class *sched_class;
124         struct sched_entity se;
125         struct sched_rt_entity rt;
126 +       unsigned long deadline;
127 +#ifdef CONFIG_SCHED_BFS
128 +       int load_weight;        /* for niceness load balancing purposes */
129 +       int first_time_slice;
130 +       unsigned long long timestamp, last_ran;
131 +       unsigned long utime_pc, stime_pc;
132 +#endif
133  
134  #ifdef CONFIG_PREEMPT_NOTIFIERS
135         /* list of struct preempt_notifier: */
136 @@ -1205,6 +1214,9 @@ struct task_struct {
137  
138         unsigned int policy;
139         cpumask_t cpus_allowed;
140 +#ifdef CONFIG_HOTPLUG_CPU
141 +       cpumask_t unplugged_mask;
142 +#endif
143  
144  #ifdef CONFIG_PREEMPT_RCU
145         int rcu_read_lock_nesting;
146 @@ -1497,11 +1509,19 @@ struct task_struct {
147   * priority to a value higher than any user task. Note:
148   * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
149   */
150 -
151 +#define PRIO_RANGE             (40)
152  #define MAX_USER_RT_PRIO       100
153  #define MAX_RT_PRIO            MAX_USER_RT_PRIO
154 -
155 +#ifdef CONFIG_SCHED_BFS
156 +#define MAX_PRIO               (MAX_RT_PRIO + PRIO_RANGE)
157 +#define ISO_PRIO               (MAX_RT_PRIO)
158 +#define NORMAL_PRIO            (MAX_RT_PRIO + 1)
159 +#define IDLE_PRIO              (MAX_RT_PRIO + 2)
160 +#define PRIO_LIMIT             ((IDLE_PRIO) + 1)
161 +#else
162  #define MAX_PRIO               (MAX_RT_PRIO + 40)
163 +#define NORMAL_PRIO    (MAX_RT_PRIO - 20)
164 +#endif
165  #define DEFAULT_PRIO           (MAX_RT_PRIO + 20)
166  
167  static inline int rt_prio(int prio)
168 @@ -1785,7 +1805,7 @@ task_sched_runtime(struct task_struct *t
169  extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
170  
171  /* sched_exec is called by processes performing an exec */
172 -#ifdef CONFIG_SMP
173 +#if defined(CONFIG_SMP) && defined(CONFIG_SCHED_CFS)
174  extern void sched_exec(void);
175  #else
176  #define sched_exec()   {}
177 --- a/init/Kconfig
178 +++ b/init/Kconfig
179 @@ -451,9 +451,22 @@ config LOG_BUF_SHIFT
180  config HAVE_UNSTABLE_SCHED_CLOCK
181         bool
182  
183 +choice
184 +       prompt "Scheduler"
185 +       default SCHED_CFS
186 +
187 +       config SCHED_CFS
188 +               bool "CFS"
189 +
190 +       config SCHED_BFS
191 +               bool "BFS"
192 +
193 +endchoice
194 +
195  config GROUP_SCHED
196         bool "Group CPU scheduler"
197         depends on EXPERIMENTAL
198 +       depends on SCHED_CFS
199         default n
200         help
201           This feature lets CPU scheduler recognize task groups and control CPU
202 @@ -504,6 +517,7 @@ endchoice
203  
204  menuconfig CGROUPS
205         boolean "Control Group support"
206 +       depends on SCHED_CFS
207         help
208           This option adds support for grouping sets of processes together, for
209           use with process control subsystems such as Cpusets, CFS, memory
210 --- a/kernel/Makefile
211 +++ b/kernel/Makefile
212 @@ -2,7 +2,7 @@
213  # Makefile for the linux kernel.
214  #
215  
216 -obj-y     = sched.o fork.o exec_domain.o panic.o printk.o \
217 +obj-y     = $(if $(CONFIG_SCHED_CFS),sched.o,sched_bfs.o) fork.o exec_domain.o panic.o printk.o \
218             cpu.o exit.o itimer.o time.o softirq.o resource.o \
219             sysctl.o capability.o ptrace.o timer.o user.o \
220             signal.o sys.o kmod.o workqueue.o pid.o \
221 @@ -108,6 +108,7 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER
222  # I turn this off for IA-64 only.  Andreas Schwab says it's also needed on m68k
223  # to get a correct value for the wait-channel (WCHAN in ps). --davidm
224  CFLAGS_sched.o := $(PROFILING) -fno-omit-frame-pointer
225 +CFLAGS_sched_bfs.o := $(PROFILING) -fno-omit-frame-pointer
226  endif
227  
228  $(obj)/configs.o: $(obj)/config_data.h
229 --- a/kernel/kthread.c
230 +++ b/kernel/kthread.c
231 @@ -16,7 +16,11 @@
232  #include <linux/mutex.h>
233  #include <trace/events/sched.h>
234  
235 +#ifdef CONFIG_SCHED_BFS
236 +#define KTHREAD_NICE_LEVEL (0)
237 +#else
238  #define KTHREAD_NICE_LEVEL (-5)
239 +#endif
240  
241  static DEFINE_SPINLOCK(kthread_create_lock);
242  static LIST_HEAD(kthread_create_list);
243 --- /dev/null
244 +++ b/kernel/sched_bfs.c
245 @@ -0,0 +1,6105 @@
246 +/*
247 + *  kernel/sched_bfs.c, was sched.c
248 + *
249 + *  Kernel scheduler and related syscalls
250 + *
251 + *  Copyright (C) 1991-2002  Linus Torvalds
252 + *
253 + *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
254 + *             make semaphores SMP safe
255 + *  1998-11-19 Implemented schedule_timeout() and related stuff
256 + *             by Andrea Arcangeli
257 + *  2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
258 + *             hybrid priority-list and round-robin design with
259 + *             an array-switch method of distributing timeslices
260 + *             and per-CPU runqueues.  Cleanups and useful suggestions
261 + *             by Davide Libenzi, preemptible kernel bits by Robert Love.
262 + *  2003-09-03 Interactivity tuning by Con Kolivas.
263 + *  2004-04-02 Scheduler domains code by Nick Piggin
264 + *  2007-04-15  Work begun on replacing all interactivity tuning with a
265 + *              fair scheduling design by Con Kolivas.
266 + *  2007-05-05  Load balancing (smp-nice) and other improvements
267 + *              by Peter Williams
268 + *  2007-05-06  Interactivity improvements to CFS by Mike Galbraith
269 + *  2007-07-01  Group scheduling enhancements by Srivatsa Vaddagiri
270 + *  2007-11-29  RT balancing improvements by Steven Rostedt, Gregory Haskins,
271 + *              Thomas Gleixner, Mike Kravetz
272 + *  now                Brainfuck deadline scheduling policy by Con Kolivas deletes
273 + *              a whole lot of those previous things.
274 + */
275 +
276 +#include <linux/mm.h>
277 +#include <linux/module.h>
278 +#include <linux/nmi.h>
279 +#include <linux/init.h>
280 +#include <asm/uaccess.h>
281 +#include <linux/highmem.h>
282 +#include <linux/smp_lock.h>
283 +#include <asm/mmu_context.h>
284 +#include <linux/interrupt.h>
285 +#include <linux/capability.h>
286 +#include <linux/completion.h>
287 +#include <linux/kernel_stat.h>
288 +#include <linux/debug_locks.h>
289 +#include <linux/perf_counter.h>
290 +#include <linux/security.h>
291 +#include <linux/notifier.h>
292 +#include <linux/profile.h>
293 +#include <linux/freezer.h>
294 +#include <linux/vmalloc.h>
295 +#include <linux/blkdev.h>
296 +#include <linux/delay.h>
297 +#include <linux/smp.h>
298 +#include <linux/threads.h>
299 +#include <linux/timer.h>
300 +#include <linux/rcupdate.h>
301 +#include <linux/cpu.h>
302 +#include <linux/cpuset.h>
303 +#include <linux/cpumask.h>
304 +#include <linux/percpu.h>
305 +#include <linux/kthread.h>
306 +#include <linux/proc_fs.h>
307 +#include <linux/seq_file.h>
308 +#include <linux/syscalls.h>
309 +#include <linux/times.h>
310 +#include <linux/tsacct_kern.h>
311 +#include <linux/kprobes.h>
312 +#include <linux/delayacct.h>
313 +#include <linux/reciprocal_div.h>
314 +#include <linux/log2.h>
315 +#include <linux/bootmem.h>
316 +#include <linux/ftrace.h>
317 +
318 +#include <asm/tlb.h>
319 +#include <asm/unistd.h>
320 +
321 +#define CREATE_TRACE_POINTS
322 +#include <trace/events/sched.h>
323 +
324 +#define rt_prio(prio)          unlikely((prio) < MAX_RT_PRIO)
325 +#define rt_task(p)             rt_prio((p)->prio)
326 +#define rt_queue(rq)           rt_prio((rq)->rq_prio)
327 +#define batch_task(p)          (unlikely((p)->policy == SCHED_BATCH))
328 +#define is_rt_policy(policy)   ((policy) == SCHED_FIFO || \
329 +                                       (policy) == SCHED_RR)
330 +#define has_rt_policy(p)       unlikely(is_rt_policy((p)->policy))
331 +#define idleprio_task(p)       unlikely((p)->policy == SCHED_IDLE)
332 +#define iso_task(p)            unlikely((p)->policy == SCHED_ISO)
333 +#define iso_queue(rq)          unlikely((rq)->rq_policy == SCHED_ISO)
334 +#define ISO_PERIOD             ((5 * HZ * num_online_cpus()) + 1)
335 +
336 +/*
337 + * Convert user-nice values [ -20 ... 0 ... 19 ]
338 + * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
339 + * and back.
340 + */
341 +#define NICE_TO_PRIO(nice)     (MAX_RT_PRIO + (nice) + 20)
342 +#define PRIO_TO_NICE(prio)     ((prio) - MAX_RT_PRIO - 20)
343 +#define TASK_NICE(p)           PRIO_TO_NICE((p)->static_prio)
344 +
345 +/*
346 + * 'User priority' is the nice value converted to something we
347 + * can work with better when scaling various scheduler parameters,
348 + * it's a [ 0 ... 39 ] range.
349 + */
350 +#define USER_PRIO(p)           ((p)-MAX_RT_PRIO)
351 +#define TASK_USER_PRIO(p)      USER_PRIO((p)->static_prio)
352 +#define MAX_USER_PRIO          (USER_PRIO(MAX_PRIO))
353 +#define SCHED_PRIO(p)          ((p)+MAX_RT_PRIO)
354 +
355 +/* Some helpers for converting to/from various scales.*/
356 +#define JIFFIES_TO_NS(TIME)    ((TIME) * (1000000000 / HZ))
357 +#define MS_TO_NS(TIME)         ((TIME) * 1000000)
358 +#define MS_TO_US(TIME)         ((TIME) * 1000)
359 +
360 +#ifdef CONFIG_SMP
361 +/*
362 + * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
363 + * Since cpu_power is a 'constant', we can use a reciprocal divide.
364 + */
365 +static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
366 +{
367 +       return reciprocal_divide(load, sg->reciprocal_cpu_power);
368 +}
369 +
370 +/*
371 + * Each time a sched group cpu_power is changed,
372 + * we must compute its reciprocal value
373 + */
374 +static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
375 +{
376 +       sg->__cpu_power += val;
377 +       sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
378 +}
379 +#endif
380 +
381 +/*
382 + * This is the time all tasks within the same priority round robin.
383 + * Value is in ms and set to a minimum of 6ms. Scales with number of cpus.
384 + * Tunable via /proc interface.
385 + */
386 +int rr_interval __read_mostly = 6;
387 +
388 +/*
389 + * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks
390 + * are allowed to run five seconds as real time tasks. This is the total over
391 + * all online cpus.
392 + */
393 +int sched_iso_cpu __read_mostly = 70;
394 +
395 +int prio_ratios[PRIO_RANGE] __read_mostly;
396 +
397 +static inline unsigned long timeslice(void)
398 +{
399 +       return MS_TO_US(rr_interval);
400 +}
401 +
402 +struct global_rq {
403 +       spinlock_t lock;
404 +       unsigned long nr_running;
405 +       unsigned long nr_uninterruptible;
406 +       unsigned long long nr_switches;
407 +       struct list_head queue[PRIO_LIMIT];
408 +       DECLARE_BITMAP(prio_bitmap, PRIO_LIMIT + 1);
409 +       unsigned long iso_ticks;
410 +       unsigned short iso_refractory;
411 +#ifdef CONFIG_SMP
412 +       unsigned long qnr; /* queued not running */
413 +       cpumask_t cpu_idle_map;
414 +#endif
415 +};
416 +
417 +static struct global_rq grq;
418 +
419 +/*
420 + * This is the main, per-CPU runqueue data structure.
421 + * All this is protected by the global_rq lock.
422 + */
423 +struct rq {
424 +#ifdef CONFIG_SMP
425 +#ifdef CONFIG_NO_HZ
426 +       unsigned char in_nohz_recently;
427 +#endif
428 +#endif
429 +
430 +       struct task_struct *curr, *idle;
431 +       struct mm_struct *prev_mm;
432 +       struct list_head queue; /* Place to store currently running task */
433 +
434 +       /* Stored data about rq->curr to work outside grq lock */
435 +       unsigned long rq_deadline;
436 +       unsigned int rq_policy;
437 +       int rq_time_slice;
438 +       int rq_prio;
439 +
440 +       /* Accurate timekeeping data */
441 +       u64 timekeep_clock;
442 +       unsigned long user_pc, nice_pc, irq_pc, softirq_pc, system_pc,
443 +                       iowait_pc, idle_pc;
444 +       atomic_t nr_iowait;
445 +
446 +       int cpu;                /* cpu of this runqueue */
447 +       int online;
448 +
449 +#ifdef CONFIG_SMP
450 +       struct root_domain *rd;
451 +       struct sched_domain *sd;
452 +
453 +       struct list_head migration_queue;
454 +#endif
455 +
456 +       u64 clock;
457 +#ifdef CONFIG_SCHEDSTATS
458 +
459 +       /* latency stats */
460 +       struct sched_info rq_sched_info;
461 +       unsigned long long rq_cpu_time;
462 +       /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
463 +
464 +       /* sys_sched_yield() stats */
465 +       unsigned int yld_count;
466 +
467 +       /* schedule() stats */
468 +       unsigned int sched_switch;
469 +       unsigned int sched_count;
470 +       unsigned int sched_goidle;
471 +
472 +       /* try_to_wake_up() stats */
473 +       unsigned int ttwu_count;
474 +       unsigned int ttwu_local;
475 +
476 +       /* BKL stats */
477 +       unsigned int bkl_count;
478 +#endif
479 +};
480 +
481 +static DEFINE_PER_CPU(struct rq, runqueues) ____cacheline_aligned_in_smp;
482 +static DEFINE_MUTEX(sched_hotcpu_mutex);
483 +
484 +#ifdef CONFIG_SMP
485 +
486 +/*
487 + * We add the notion of a root-domain which will be used to define per-domain
488 + * variables. Each exclusive cpuset essentially defines an island domain by
489 + * fully partitioning the member cpus from any other cpuset. Whenever a new
490 + * exclusive cpuset is created, we also create and attach a new root-domain
491 + * object.
492 + *
493 + */
494 +struct root_domain {
495 +       atomic_t refcount;
496 +       cpumask_var_t span;
497 +       cpumask_var_t online;
498 +
499 +       /*
500 +        * The "RT overload" flag: it gets set if a CPU has more than
501 +        * one runnable RT task.
502 +        */
503 +       cpumask_var_t rto_mask;
504 +       atomic_t rto_count;
505 +#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
506 +       /*
507 +        * Preferred wake up cpu nominated by sched_mc balance that will be
508 +        * used when most cpus are idle in the system indicating overall very
509 +        * low system utilisation. Triggered at POWERSAVINGS_BALANCE_WAKEUP(2)
510 +        */
511 +       unsigned int sched_mc_preferred_wakeup_cpu;
512 +#endif
513 +};
514 +
515 +/*
516 + * By default the system creates a single root-domain with all cpus as
517 + * members (mimicking the global state we have today).
518 + */
519 +static struct root_domain def_root_domain;
520 +
521 +#endif
522 +
523 +static inline int cpu_of(struct rq *rq)
524 +{
525 +#ifdef CONFIG_SMP
526 +       return rq->cpu;
527 +#else
528 +       return 0;
529 +#endif
530 +}
531 +
532 +/*
533 + * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
534 + * See detach_destroy_domains: synchronize_sched for details.
535 + *
536 + * The domain tree of any CPU may only be accessed from within
537 + * preempt-disabled sections.
538 + */
539 +#define for_each_domain(cpu, __sd) \
540 +       for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
541 +
542 +#define cpu_rq(cpu)            (&per_cpu(runqueues, (cpu)))
543 +#define this_rq()              (&__get_cpu_var(runqueues))
544 +#define task_rq(p)             cpu_rq(task_cpu(p))
545 +#define cpu_curr(cpu)          (cpu_rq(cpu)->curr)
546 +
547 +#include "sched_stats.h"
548 +
549 +#ifndef prepare_arch_switch
550 +# define prepare_arch_switch(next)     do { } while (0)
551 +#endif
552 +#ifndef finish_arch_switch
553 +# define finish_arch_switch(prev)      do { } while (0)
554 +#endif
555 +
556 +inline void update_rq_clock(struct rq *rq)
557 +{
558 +       rq->clock = sched_clock_cpu(cpu_of(rq));
559 +}
560 +
561 +static inline int task_running(struct task_struct *p)
562 +{
563 +       return (!!p->oncpu);
564 +}
565 +
566 +static inline void grq_lock(void)
567 +       __acquires(grq.lock)
568 +{
569 +       smp_mb();
570 +       spin_lock(&grq.lock);
571 +}
572 +
573 +static inline void grq_unlock(void)
574 +       __releases(grq.lock)
575 +{
576 +       spin_unlock(&grq.lock);
577 +}
578 +
579 +static inline void grq_lock_irq(void)
580 +       __acquires(grq.lock)
581 +{
582 +       smp_mb();
583 +       spin_lock_irq(&grq.lock);
584 +}
585 +
586 +static inline void time_lock_grq(struct rq *rq)
587 +       __acquires(grq.lock)
588 +{
589 +       grq_lock();
590 +       update_rq_clock(rq);
591 +}
592 +
593 +static inline void grq_unlock_irq(void)
594 +       __releases(grq.lock)
595 +{
596 +       spin_unlock_irq(&grq.lock);
597 +}
598 +
599 +static inline void grq_lock_irqsave(unsigned long *flags)
600 +       __acquires(grq.lock)
601 +{
602 +       smp_mb();
603 +       spin_lock_irqsave(&grq.lock, *flags);
604 +}
605 +
606 +static inline void grq_unlock_irqrestore(unsigned long *flags)
607 +       __releases(grq.lock)
608 +{
609 +       spin_unlock_irqrestore(&grq.lock, *flags);
610 +}
611 +
612 +static inline struct rq
613 +*task_grq_lock(struct task_struct *p, unsigned long *flags)
614 +       __acquires(grq.lock)
615 +{
616 +       grq_lock_irqsave(flags);
617 +       return task_rq(p);
618 +}
619 +
620 +static inline struct rq
621 +*time_task_grq_lock(struct task_struct *p, unsigned long *flags)
622 +       __acquires(grq.lock)
623 +{
624 +       struct rq *rq = task_grq_lock(p, flags);
625 +       update_rq_clock(rq);
626 +       return rq;
627 +}
628 +
629 +static inline void task_grq_unlock(unsigned long *flags)
630 +       __releases(grq.lock)
631 +{
632 +       grq_unlock_irqrestore(flags);
633 +}
634 +
635 +/**
636 + * runqueue_is_locked
637 + *
638 + * Returns true if the global runqueue is locked.
639 + * This interface allows printk to be called with the runqueue lock
640 + * held and know whether or not it is OK to wake up the klogd.
641 + */
642 +int runqueue_is_locked(void)
643 +{
644 +       return spin_is_locked(&grq.lock);
645 +}
646 +
647 +void task_rq_unlock_wait(struct task_struct *p)
648 +       __releases(grq.lock)
649 +{
650 +       smp_mb(); /* spin-unlock-wait is not a full memory barrier */
651 +       spin_unlock_wait(&grq.lock);
652 +}
653 +
654 +static inline void time_grq_lock(struct rq *rq, unsigned long *flags)
655 +       __acquires(grq.lock)
656 +{
657 +       spin_lock_irqsave(&grq.lock, *flags);
658 +       update_rq_clock(rq);
659 +}
660 +
661 +static inline struct rq *__task_grq_lock(struct task_struct *p)
662 +       __acquires(grq.lock)
663 +{
664 +       grq_lock();
665 +       return task_rq(p);
666 +}
667 +
668 +static inline void __task_grq_unlock(void)
669 +       __releases(grq.lock)
670 +{
671 +       grq_unlock();
672 +}
673 +
674 +#ifndef __ARCH_WANT_UNLOCKED_CTXSW
675 +static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
676 +{
677 +}
678 +
679 +static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
680 +{
681 +#ifdef CONFIG_DEBUG_SPINLOCK
682 +       /* this is a valid case when another task releases the spinlock */
683 +       grq.lock.owner = current;
684 +#endif
685 +       /*
686 +        * If we are tracking spinlock dependencies then we have to
687 +        * fix up the runqueue lock - which gets 'carried over' from
688 +        * prev into current:
689 +        */
690 +       spin_acquire(&grq.lock.dep_map, 0, 0, _THIS_IP_);
691 +
692 +       grq_unlock_irq();
693 +}
694 +
695 +#else /* __ARCH_WANT_UNLOCKED_CTXSW */
696 +
697 +static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
698 +{
699 +#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
700 +       grq_unlock_irq();
701 +#else
702 +       grq_unlock();
703 +#endif
704 +}
705 +
706 +static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
707 +{
708 +       smp_wmb();
709 +#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
710 +       local_irq_enable();
711 +#endif
712 +}
713 +#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
714 +
715 +/*
716 + * A task that is queued will be on the grq run list.
717 + * A task that is not running or queued will not be on the grq run list.
718 + * A task that is currently running will have ->oncpu set and be queued
719 + * temporarily in its own rq queue.
720 + * A task that is running and no longer queued will be seen only on
721 + * context switch exit.
722 + */
723 +
724 +static inline int task_queued(struct task_struct *p)
725 +{
726 +       return (!list_empty(&p->rt.run_list));
727 +}
728 +
729 +static inline int task_queued_only(struct task_struct *p)
730 +{
731 +       return (!list_empty(&p->rt.run_list) && !task_running(p));
732 +}
733 +
734 +/*
735 + * Removing from the global runqueue. Enter with grq locked.
736 + */
737 +static void dequeue_task(struct task_struct *p)
738 +{
739 +       list_del_init(&p->rt.run_list);
740 +       if (list_empty(grq.queue + p->prio))
741 +               __clear_bit(p->prio, grq.prio_bitmap);
742 +}
743 +
744 +static inline void reset_first_time_slice(struct task_struct *p)
745 +{
746 +       if (unlikely(p->first_time_slice))
747 +               p->first_time_slice = 0;
748 +}
749 +
750 +static int idleprio_suitable(struct task_struct *p)
751 +{
752 +       return (!freezing(p) && !signal_pending(p) &&
753 +               !(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING)));
754 +}
755 +
756 +static int isoprio_suitable(void)
757 +{
758 +       return !grq.iso_refractory;
759 +}
760 +
761 +/*
762 + * Adding to the global runqueue. Enter with grq locked.
763 + */
764 +static void enqueue_task(struct task_struct *p)
765 +{
766 +       if (!rt_task(p)) {
767 +               /* Check it hasn't gotten rt from PI */
768 +               if ((idleprio_task(p) && idleprio_suitable(p)) ||
769 +                  (iso_task(p) && isoprio_suitable()))
770 +                       p->prio = p->normal_prio;
771 +               else
772 +                       p->prio = NORMAL_PRIO;
773 +       }
774 +       __set_bit(p->prio, grq.prio_bitmap);
775 +       list_add_tail(&p->rt.run_list, grq.queue + p->prio);
776 +       sched_info_queued(p);
777 +}
778 +
779 +/* Only idle task does this as a real time task*/
780 +static inline void enqueue_task_head(struct task_struct *p)
781 +{
782 +       __set_bit(p->prio, grq.prio_bitmap);
783 +       list_add(&p->rt.run_list, grq.queue + p->prio);
784 +       sched_info_queued(p);
785 +}
786 +
787 +static inline void requeue_task(struct task_struct *p)
788 +{
789 +       sched_info_queued(p);
790 +}
791 +
792 +static inline int pratio(struct task_struct *p)
793 +{
794 +       return prio_ratios[TASK_USER_PRIO(p)];
795 +}
796 +
797 +/*
798 + * task_timeslice - all tasks of all priorities get the exact same timeslice
799 + * length. CPU distribution is handled by giving different deadlines to
800 + * tasks of different priorities.
801 + */
802 +static inline int task_timeslice(struct task_struct *p)
803 +{
804 +       return (rr_interval * pratio(p) / 100);
805 +}
806 +
807 +#ifdef CONFIG_SMP
808 +static inline void inc_qnr(void)
809 +{
810 +       grq.qnr++;
811 +}
812 +
813 +static inline void dec_qnr(void)
814 +{
815 +       grq.qnr--;
816 +}
817 +
818 +static inline int queued_notrunning(void)
819 +{
820 +       return grq.qnr;
821 +}
822 +#else
823 +static inline void inc_qnr(void)
824 +{
825 +}
826 +
827 +static inline void dec_qnr(void)
828 +{
829 +}
830 +
831 +static inline int queued_notrunning(void)
832 +{
833 +       return grq.nr_running;
834 +}
835 +#endif
836 +
837 +/*
838 + * activate_idle_task - move idle task to the _front_ of runqueue.
839 + */
840 +static inline void activate_idle_task(struct task_struct *p)
841 +{
842 +       enqueue_task_head(p);
843 +       grq.nr_running++;
844 +       inc_qnr();
845 +}
846 +
847 +static inline int normal_prio(struct task_struct *p)
848 +{
849 +       if (has_rt_policy(p))
850 +               return MAX_RT_PRIO - 1 - p->rt_priority;
851 +       if (idleprio_task(p))
852 +               return IDLE_PRIO;
853 +       if (iso_task(p))
854 +               return ISO_PRIO;
855 +       return NORMAL_PRIO;
856 +}
857 +
858 +/*
859 + * Calculate the current priority, i.e. the priority
860 + * taken into account by the scheduler. This value might
861 + * be boosted by RT tasks as it will be RT if the task got
862 + * RT-boosted. If not then it returns p->normal_prio.
863 + */
864 +static int effective_prio(struct task_struct *p)
865 +{
866 +       p->normal_prio = normal_prio(p);
867 +       /*
868 +        * If we are RT tasks or we were boosted to RT priority,
869 +        * keep the priority unchanged. Otherwise, update priority
870 +        * to the normal priority:
871 +        */
872 +       if (!rt_prio(p->prio))
873 +               return p->normal_prio;
874 +       return p->prio;
875 +}
876 +
877 +/*
878 + * activate_task - move a task to the runqueue. Enter with grq locked. The rq
879 + * doesn't really matter but gives us the local clock.
880 + */
881 +static void activate_task(struct task_struct *p, struct rq *rq)
882 +{
883 +       u64 now = rq->clock;
884 +
885 +       /*
886 +        * Sleep time is in units of nanosecs, so shift by 20 to get a
887 +        * milliseconds-range estimation of the amount of time that the task
888 +        * spent sleeping:
889 +        */
890 +       if (unlikely(prof_on == SLEEP_PROFILING)) {
891 +               if (p->state == TASK_UNINTERRUPTIBLE)
892 +                       profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
893 +                                    (now - p->timestamp) >> 20);
894 +       }
895 +
896 +       p->prio = effective_prio(p);
897 +       p->timestamp = now;
898 +       if (task_contributes_to_load(p))
899 +               grq.nr_uninterruptible--;
900 +       enqueue_task(p);
901 +       grq.nr_running++;
902 +       inc_qnr();
903 +}
904 +
905 +/*
906 + * deactivate_task - If it's running, it's not on the grq and we can just
907 + * decrement the nr_running.
908 + */
909 +static inline void deactivate_task(struct task_struct *p)
910 +{
911 +       if (task_contributes_to_load(p))
912 +               grq.nr_uninterruptible++;
913 +       grq.nr_running--;
914 +}
915 +
916 +#ifdef CONFIG_SMP
917 +void set_task_cpu(struct task_struct *p, unsigned int cpu)
918 +{
919 +       trace_sched_migrate_task(p, cpu);
920 +       /*
921 +        * After ->cpu is set up to a new value, task_grq_lock(p, ...) can be
922 +        * successfuly executed on another CPU. We must ensure that updates of
923 +        * per-task data have been completed by this moment.
924 +        */
925 +       smp_wmb();
926 +       task_thread_info(p)->cpu = cpu;
927 +}
928 +#endif
929 +
930 +/*
931 + * Move a task off the global queue and take it to a cpu for it will
932 + * become the running task.
933 + */
934 +static inline void take_task(struct rq *rq, struct task_struct *p)
935 +{
936 +       set_task_cpu(p, rq->cpu);
937 +       dequeue_task(p);
938 +       list_add(&p->rt.run_list, &rq->queue);
939 +       dec_qnr();
940 +}
941 +
942 +/*
943 + * Returns a descheduling task to the grq runqueue unless it is being
944 + * deactivated.
945 + */
946 +static inline void return_task(struct task_struct *p, int deactivate)
947 +{
948 +       list_del_init(&p->rt.run_list);
949 +       if (deactivate)
950 +               deactivate_task(p);
951 +       else {
952 +               inc_qnr();
953 +               enqueue_task(p);
954 +       }
955 +}
956 +
957 +/*
958 + * resched_task - mark a task 'to be rescheduled now'.
959 + *
960 + * On UP this means the setting of the need_resched flag, on SMP it
961 + * might also involve a cross-CPU call to trigger the scheduler on
962 + * the target CPU.
963 + */
964 +#ifdef CONFIG_SMP
965 +
966 +#ifndef tsk_is_polling
967 +#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
968 +#endif
969 +
970 +static void resched_task(struct task_struct *p)
971 +{
972 +       int cpu;
973 +
974 +       assert_spin_locked(&grq.lock);
975 +
976 +       if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
977 +               return;
978 +
979 +       set_tsk_thread_flag(p, TIF_NEED_RESCHED);
980 +
981 +       cpu = task_cpu(p);
982 +       if (cpu == smp_processor_id())
983 +               return;
984 +
985 +       /* NEED_RESCHED must be visible before we test polling */
986 +       smp_mb();
987 +       if (!tsk_is_polling(p))
988 +               smp_send_reschedule(cpu);
989 +}
990 +
991 +#else
992 +static inline void resched_task(struct task_struct *p)
993 +{
994 +       assert_spin_locked(&grq.lock);
995 +       set_tsk_need_resched(p);
996 +}
997 +#endif
998 +
999 +/**
1000 + * task_curr - is this task currently executing on a CPU?
1001 + * @p: the task in question.
1002 + */
1003 +inline int task_curr(const struct task_struct *p)
1004 +{
1005 +       return cpu_curr(task_cpu(p)) == p;
1006 +}
1007 +
1008 +#ifdef CONFIG_SMP
1009 +struct migration_req {
1010 +       struct list_head list;
1011 +
1012 +       struct task_struct *task;
1013 +       int dest_cpu;
1014 +
1015 +       struct completion done;
1016 +};
1017 +
1018 +/*
1019 + * wait_task_context_switch -  wait for a thread to complete at least one
1020 + *                             context switch.
1021 + *
1022 + * @p must not be current.
1023 + */
1024 +void wait_task_context_switch(struct task_struct *p)
1025 +{
1026 +       unsigned long nvcsw, nivcsw, flags;
1027 +       int running;
1028 +       struct rq *rq;
1029 +
1030 +       nvcsw   = p->nvcsw;
1031 +       nivcsw  = p->nivcsw;
1032 +       for (;;) {
1033 +               /*
1034 +                * The runqueue is assigned before the actual context
1035 +                * switch. We need to take the runqueue lock.
1036 +                *
1037 +                * We could check initially without the lock but it is
1038 +                * very likely that we need to take the lock in every
1039 +                * iteration.
1040 +                */
1041 +               rq = task_grq_lock(p, &flags);
1042 +               running = task_running(p);
1043 +               task_grq_unlock(&flags);
1044 +
1045 +               if (likely(!running))
1046 +                       break;
1047 +               /*
1048 +                * The switch count is incremented before the actual
1049 +                * context switch. We thus wait for two switches to be
1050 +                * sure at least one completed.
1051 +                */
1052 +               if ((p->nvcsw - nvcsw) > 1)
1053 +                       break;
1054 +               if ((p->nivcsw - nivcsw) > 1)
1055 +                       break;
1056 +
1057 +               cpu_relax();
1058 +       }
1059 +}
1060 +
1061 +/*
1062 + * wait_task_inactive - wait for a thread to unschedule.
1063 + *
1064 + * If @match_state is nonzero, it's the @p->state value just checked and
1065 + * not expected to change.  If it changes, i.e. @p might have woken up,
1066 + * then return zero.  When we succeed in waiting for @p to be off its CPU,
1067 + * we return a positive number (its total switch count).  If a second call
1068 + * a short while later returns the same number, the caller can be sure that
1069 + * @p has remained unscheduled the whole time.
1070 + *
1071 + * The caller must ensure that the task *will* unschedule sometime soon,
1072 + * else this function might spin for a *long* time. This function can't
1073 + * be called with interrupts off, or it may introduce deadlock with
1074 + * smp_call_function() if an IPI is sent by the same process we are
1075 + * waiting to become inactive.
1076 + */
1077 +unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1078 +{
1079 +       unsigned long flags;
1080 +       int running, on_rq;
1081 +       unsigned long ncsw;
1082 +       struct rq *rq;
1083 +
1084 +       for (;;) {
1085 +               /*
1086 +                * We do the initial early heuristics without holding
1087 +                * any task-queue locks at all. We'll only try to get
1088 +                * the runqueue lock when things look like they will
1089 +                * work out!
1090 +                */
1091 +               rq = task_rq(p);
1092 +
1093 +               /*
1094 +                * If the task is actively running on another CPU
1095 +                * still, just relax and busy-wait without holding
1096 +                * any locks.
1097 +                *
1098 +                * NOTE! Since we don't hold any locks, it's not
1099 +                * even sure that "rq" stays as the right runqueue!
1100 +                * But we don't care, since this will
1101 +                * return false if the runqueue has changed and p
1102 +                * is actually now running somewhere else!
1103 +                */
1104 +               while (task_running(p) && p == rq->curr) {
1105 +                       if (match_state && unlikely(p->state != match_state))
1106 +                               return 0;
1107 +                       cpu_relax();
1108 +               }
1109 +
1110 +               /*
1111 +                * Ok, time to look more closely! We need the grq
1112 +                * lock now, to be *sure*. If we're wrong, we'll
1113 +                * just go back and repeat.
1114 +                */
1115 +               rq = task_grq_lock(p, &flags);
1116 +               trace_sched_wait_task(rq, p);
1117 +               running = task_running(p);
1118 +               on_rq = task_queued(p);
1119 +               ncsw = 0;
1120 +               if (!match_state || p->state == match_state)
1121 +                       ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1122 +               task_grq_unlock(&flags);
1123 +
1124 +               /*
1125 +                * If it changed from the expected state, bail out now.
1126 +                */
1127 +               if (unlikely(!ncsw))
1128 +                       break;
1129 +
1130 +               /*
1131 +                * Was it really running after all now that we
1132 +                * checked with the proper locks actually held?
1133 +                *
1134 +                * Oops. Go back and try again..
1135 +                */
1136 +               if (unlikely(running)) {
1137 +                       cpu_relax();
1138 +                       continue;
1139 +               }
1140 +
1141 +               /*
1142 +                * It's not enough that it's not actively running,
1143 +                * it must be off the runqueue _entirely_, and not
1144 +                * preempted!
1145 +                *
1146 +                * So if it was still runnable (but just not actively
1147 +                * running right now), it's preempted, and we should
1148 +                * yield - it could be a while.
1149 +                */
1150 +               if (unlikely(on_rq)) {
1151 +                       schedule_timeout_uninterruptible(1);
1152 +                       continue;
1153 +               }
1154 +
1155 +               /*
1156 +                * Ahh, all good. It wasn't running, and it wasn't
1157 +                * runnable, which means that it will never become
1158 +                * running in the future either. We're all done!
1159 +                */
1160 +               break;
1161 +       }
1162 +
1163 +       return ncsw;
1164 +}
1165 +
1166 +/***
1167 + * kick_process - kick a running thread to enter/exit the kernel
1168 + * @p: the to-be-kicked thread
1169 + *
1170 + * Cause a process which is running on another CPU to enter
1171 + * kernel-mode, without any delay. (to get signals handled.)
1172 + *
1173 + * NOTE: this function doesnt have to take the runqueue lock,
1174 + * because all it wants to ensure is that the remote task enters
1175 + * the kernel. If the IPI races and the task has been migrated
1176 + * to another CPU then no harm is done and the purpose has been
1177 + * achieved as well.
1178 + */
1179 +void kick_process(struct task_struct *p)
1180 +{
1181 +       int cpu;
1182 +
1183 +       preempt_disable();
1184 +       cpu = task_cpu(p);
1185 +       if ((cpu != smp_processor_id()) && task_curr(p))
1186 +               smp_send_reschedule(cpu);
1187 +       preempt_enable();
1188 +}
1189 +EXPORT_SYMBOL_GPL(kick_process);
1190 +#endif
1191 +
1192 +#define rq_idle(rq)    ((rq)->rq_prio == PRIO_LIMIT)
1193 +
1194 +/*
1195 + * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the
1196 + * basis of earlier deadlines. SCHED_BATCH and SCHED_IDLE don't preempt,
1197 + * they cooperatively multitask.
1198 + */
1199 +static inline int task_preempts_curr(struct task_struct *p, struct rq *rq)
1200 +{
1201 +       int preempts = 0;
1202 +
1203 +       if (p->prio < rq->rq_prio)
1204 +               preempts = 1;
1205 +       else if (p->policy == SCHED_NORMAL && (p->prio == rq->rq_prio &&
1206 +                time_before(p->deadline, rq->rq_deadline)))
1207 +                       preempts = 1;
1208 +       return preempts;
1209 +}
1210 +
1211 +/*
1212 + * Wake up *any* suitable cpu to schedule this task.
1213 + */
1214 +static void try_preempt(struct task_struct *p)
1215 +{
1216 +       struct rq *highest_prio_rq, *this_rq;
1217 +       unsigned long latest_deadline, cpu;
1218 +       int highest_prio;
1219 +       cpumask_t tmp;
1220 +
1221 +       /* Try the task's previous rq first and as a fallback */
1222 +       this_rq = task_rq(p);
1223 +
1224 +       if (cpu_isset(this_rq->cpu, p->cpus_allowed)) {
1225 +               highest_prio_rq = this_rq;
1226 +               /* If this_rq is idle, use that. */
1227 +               if (rq_idle(this_rq))
1228 +                       goto found_rq;
1229 +       } else
1230 +               highest_prio_rq = cpu_rq(any_online_cpu(p->cpus_allowed));
1231 +       latest_deadline = this_rq->rq_deadline;
1232 +       highest_prio = this_rq->rq_prio;
1233 +
1234 +       cpus_and(tmp, cpu_online_map, p->cpus_allowed);
1235 +
1236 +       for_each_cpu_mask(cpu, tmp) {
1237 +               struct rq *rq;
1238 +               int rq_prio;
1239 +
1240 +               rq = cpu_rq(cpu);
1241 +
1242 +               if (rq_idle(rq)) {
1243 +                       /* found an idle rq, use that one */
1244 +                       highest_prio_rq = rq;
1245 +                       goto found_rq;
1246 +               }
1247 +
1248 +               rq_prio = rq->rq_prio;
1249 +               if (rq_prio > highest_prio ||
1250 +                       (rq_prio == highest_prio &&
1251 +                       time_after(rq->rq_deadline, latest_deadline))) {
1252 +                               highest_prio = rq_prio;
1253 +                               latest_deadline = rq->rq_deadline;
1254 +                               highest_prio_rq = rq;
1255 +               }
1256 +       }
1257 +
1258 +       if (!task_preempts_curr(p, highest_prio_rq))
1259 +               return;
1260 +found_rq:
1261 +       resched_task(highest_prio_rq->curr);
1262 +       return;
1263 +}
1264 +
1265 +/**
1266 + * task_oncpu_function_call - call a function on the cpu on which a task runs
1267 + * @p:         the task to evaluate
1268 + * @func:      the function to be called
1269 + * @info:      the function call argument
1270 + *
1271 + * Calls the function @func when the task is currently running. This might
1272 + * be on the current CPU, which just calls the function directly
1273 + */
1274 +void task_oncpu_function_call(struct task_struct *p,
1275 +                             void (*func) (void *info), void *info)
1276 +{
1277 +       int cpu;
1278 +
1279 +       preempt_disable();
1280 +       cpu = task_cpu(p);
1281 +       if (task_curr(p))
1282 +               smp_call_function_single(cpu, func, info, 1);
1283 +       preempt_enable();
1284 +}
1285 +
1286 +#ifdef CONFIG_SMP
1287 +static int suitable_idle_cpus(struct task_struct *p)
1288 +{
1289 +       return (cpus_intersects(p->cpus_allowed, grq.cpu_idle_map));
1290 +}
1291 +#else
1292 +static int suitable_idle_cpus(struct task_struct *p)
1293 +{
1294 +       return 0;
1295 +}
1296 +#endif
1297 +
1298 +/***
1299 + * try_to_wake_up - wake up a thread
1300 + * @p: the to-be-woken-up thread
1301 + * @state: the mask of task states that can be woken
1302 + * @sync: do a synchronous wakeup?
1303 + *
1304 + * Put it on the run-queue if it's not already there. The "current"
1305 + * thread is always on the run-queue (except when the actual
1306 + * re-schedule is in progress), and as such you're allowed to do
1307 + * the simpler "current->state = TASK_RUNNING" to mark yourself
1308 + * runnable without the overhead of this.
1309 + *
1310 + * returns failure only if the task is already active.
1311 + */
1312 +static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
1313 +{
1314 +       unsigned long flags;
1315 +       int success = 0;
1316 +       long old_state;
1317 +       struct rq *rq;
1318 +
1319 +       rq = time_task_grq_lock(p, &flags);
1320 +       old_state = p->state;
1321 +       if (!(old_state & state))
1322 +               goto out_unlock;
1323 +
1324 +       /*
1325 +        * Note this catches tasks that are running and queued, but returns
1326 +        * false during the context switch when they're running and no
1327 +        * longer queued.
1328 +        */
1329 +       if (task_queued(p))
1330 +               goto out_running;
1331 +
1332 +       activate_task(p, rq);
1333 +       /*
1334 +        * Sync wakeups (i.e. those types of wakeups where the waker
1335 +        * has indicated that it will leave the CPU in short order)
1336 +        * don't trigger a preemption if there are no idle cpus,
1337 +        * instead waiting for current to deschedule.
1338 +        */
1339 +       if (!sync || (sync && suitable_idle_cpus(p)))
1340 +               try_preempt(p);
1341 +       success = 1;
1342 +
1343 +out_running:
1344 +       trace_sched_wakeup(rq, p, success);
1345 +       p->state = TASK_RUNNING;
1346 +out_unlock:
1347 +       task_grq_unlock(&flags);
1348 +       return success;
1349 +}
1350 +
1351 +/**
1352 + * wake_up_process - Wake up a specific process
1353 + * @p: The process to be woken up.
1354 + *
1355 + * Attempt to wake up the nominated process and move it to the set of runnable
1356 + * processes.  Returns 1 if the process was woken up, 0 if it was already
1357 + * running.
1358 + *
1359 + * It may be assumed that this function implies a write memory barrier before
1360 + * changing the task state if and only if any tasks are woken up.
1361 + */
1362 +int wake_up_process(struct task_struct *p)
1363 +{
1364 +       return try_to_wake_up(p, TASK_ALL, 0);
1365 +}
1366 +EXPORT_SYMBOL(wake_up_process);
1367 +
1368 +int wake_up_state(struct task_struct *p, unsigned int state)
1369 +{
1370 +       return try_to_wake_up(p, state, 0);
1371 +}
1372 +
1373 +/*
1374 + * Perform scheduler related setup for a newly forked process p.
1375 + * p is forked by current.
1376 + */
1377 +void sched_fork(struct task_struct *p, int clone_flags)
1378 +{
1379 +       int cpu = get_cpu();
1380 +       struct rq *rq;
1381 +
1382 +#ifdef CONFIG_PREEMPT_NOTIFIERS
1383 +       INIT_HLIST_HEAD(&p->preempt_notifiers);
1384 +#endif
1385 +       /*
1386 +        * We mark the process as running here, but have not actually
1387 +        * inserted it onto the runqueue yet. This guarantees that
1388 +        * nobody will actually run it, and a signal or other external
1389 +        * event cannot wake it up and insert it on the runqueue either.
1390 +        */
1391 +       p->state = TASK_RUNNING;
1392 +       set_task_cpu(p, cpu);
1393 +
1394 +       /* Should be reset in fork.c but done here for ease of bfs patching */
1395 +       p->se.sum_exec_runtime = p->stime_pc = p->utime_pc = 0;
1396 +
1397 +       /*
1398 +        * Make sure we do not leak PI boosting priority to the child:
1399 +        */
1400 +       p->prio = current->normal_prio;
1401 +
1402 +       INIT_LIST_HEAD(&p->rt.run_list);
1403 +#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1404 +       if (unlikely(sched_info_on()))
1405 +               memset(&p->sched_info, 0, sizeof(p->sched_info));
1406 +#endif
1407 +
1408 +       p->oncpu = 0;
1409 +
1410 +#ifdef CONFIG_PREEMPT
1411 +       /* Want to start with kernel preemption disabled. */
1412 +       task_thread_info(p)->preempt_count = 1;
1413 +#endif
1414 +       if (unlikely(p->policy == SCHED_FIFO))
1415 +               goto out;
1416 +       /*
1417 +        * Share the timeslice between parent and child, thus the
1418 +        * total amount of pending timeslices in the system doesn't change,
1419 +        * resulting in more scheduling fairness. If it's negative, it won't
1420 +        * matter since that's the same as being 0. current's time_slice is
1421 +        * actually in rq_time_slice when it's running.
1422 +        */
1423 +       local_irq_disable();
1424 +       rq = task_rq(current);
1425 +       if (likely(rq->rq_time_slice > 0)) {
1426 +               rq->rq_time_slice /= 2;
1427 +               /*
1428 +                * The remainder of the first timeslice might be recovered by
1429 +                * the parent if the child exits early enough.
1430 +                */
1431 +               p->first_time_slice = 1;
1432 +       }
1433 +       p->rt.time_slice = rq->rq_time_slice;
1434 +       local_irq_enable();
1435 +out:
1436 +       put_cpu();
1437 +}
1438 +
1439 +/*
1440 + * wake_up_new_task - wake up a newly created task for the first time.
1441 + *
1442 + * This function will do some initial scheduler statistics housekeeping
1443 + * that must be done for every newly created context, then puts the task
1444 + * on the runqueue and wakes it.
1445 + */
1446 +void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1447 +{
1448 +       struct task_struct *parent;
1449 +       unsigned long flags;
1450 +       struct rq *rq;
1451 +
1452 +       rq = time_task_grq_lock(p, &flags); ;
1453 +       parent = p->parent;
1454 +       BUG_ON(p->state != TASK_RUNNING);
1455 +       set_task_cpu(p, task_cpu(parent));
1456 +
1457 +       activate_task(p, rq);
1458 +       trace_sched_wakeup_new(rq, p, 1);
1459 +       if (!(clone_flags & CLONE_VM) && rq->curr == parent &&
1460 +               !suitable_idle_cpus(p)) {
1461 +               /*
1462 +                * The VM isn't cloned, so we're in a good position to
1463 +                * do child-runs-first in anticipation of an exec. This
1464 +                * usually avoids a lot of COW overhead.
1465 +                */
1466 +                       resched_task(parent);
1467 +       } else
1468 +               try_preempt(p);
1469 +       task_grq_unlock(&flags);
1470 +}
1471 +
1472 +/*
1473 + * Potentially available exiting-child timeslices are
1474 + * retrieved here - this way the parent does not get
1475 + * penalized for creating too many threads.
1476 + *
1477 + * (this cannot be used to 'generate' timeslices
1478 + * artificially, because any timeslice recovered here
1479 + * was given away by the parent in the first place.)
1480 + */
1481 +void sched_exit(struct task_struct *p)
1482 +{
1483 +       struct task_struct *parent;
1484 +       unsigned long flags;
1485 +       struct rq *rq;
1486 +
1487 +       if (p->first_time_slice) {
1488 +               parent = p->parent;
1489 +               rq = task_grq_lock(parent, &flags);
1490 +               parent->rt.time_slice += p->rt.time_slice;
1491 +               if (unlikely(parent->rt.time_slice > timeslice()))
1492 +                       parent->rt.time_slice = timeslice();
1493 +               task_grq_unlock(&flags);
1494 +       }
1495 +}
1496 +
1497 +#ifdef CONFIG_PREEMPT_NOTIFIERS
1498 +
1499 +/**
1500 + * preempt_notifier_register - tell me when current is being preempted & rescheduled
1501 + * @notifier: notifier struct to register
1502 + */
1503 +void preempt_notifier_register(struct preempt_notifier *notifier)
1504 +{
1505 +       hlist_add_head(&notifier->link, &current->preempt_notifiers);
1506 +}
1507 +EXPORT_SYMBOL_GPL(preempt_notifier_register);
1508 +
1509 +/**
1510 + * preempt_notifier_unregister - no longer interested in preemption notifications
1511 + * @notifier: notifier struct to unregister
1512 + *
1513 + * This is safe to call from within a preemption notifier.
1514 + */
1515 +void preempt_notifier_unregister(struct preempt_notifier *notifier)
1516 +{
1517 +       hlist_del(&notifier->link);
1518 +}
1519 +EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
1520 +
1521 +static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1522 +{
1523 +       struct preempt_notifier *notifier;
1524 +       struct hlist_node *node;
1525 +
1526 +       hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1527 +               notifier->ops->sched_in(notifier, raw_smp_processor_id());
1528 +}
1529 +
1530 +static void
1531 +fire_sched_out_preempt_notifiers(struct task_struct *curr,
1532 +                                struct task_struct *next)
1533 +{
1534 +       struct preempt_notifier *notifier;
1535 +       struct hlist_node *node;
1536 +
1537 +       hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
1538 +               notifier->ops->sched_out(notifier, next);
1539 +}
1540 +
1541 +#else /* !CONFIG_PREEMPT_NOTIFIERS */
1542 +
1543 +static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
1544 +{
1545 +}
1546 +
1547 +static void
1548 +fire_sched_out_preempt_notifiers(struct task_struct *curr,
1549 +                                struct task_struct *next)
1550 +{
1551 +}
1552 +
1553 +#endif /* CONFIG_PREEMPT_NOTIFIERS */
1554 +
1555 +/**
1556 + * prepare_task_switch - prepare to switch tasks
1557 + * @rq: the runqueue preparing to switch
1558 + * @next: the task we are going to switch to.
1559 + *
1560 + * This is called with the rq lock held and interrupts off. It must
1561 + * be paired with a subsequent finish_task_switch after the context
1562 + * switch.
1563 + *
1564 + * prepare_task_switch sets up locking and calls architecture specific
1565 + * hooks.
1566 + */
1567 +static inline void
1568 +prepare_task_switch(struct rq *rq, struct task_struct *prev,
1569 +                   struct task_struct *next)
1570 +{
1571 +       fire_sched_out_preempt_notifiers(prev, next);
1572 +       prepare_lock_switch(rq, next);
1573 +       prepare_arch_switch(next);
1574 +}
1575 +
1576 +/**
1577 + * finish_task_switch - clean up after a task-switch
1578 + * @rq: runqueue associated with task-switch
1579 + * @prev: the thread we just switched away from.
1580 + *
1581 + * finish_task_switch must be called after the context switch, paired
1582 + * with a prepare_task_switch call before the context switch.
1583 + * finish_task_switch will reconcile locking set up by prepare_task_switch,
1584 + * and do any other architecture-specific cleanup actions.
1585 + *
1586 + * Note that we may have delayed dropping an mm in context_switch(). If
1587 + * so, we finish that here outside of the runqueue lock.  (Doing it
1588 + * with the lock held can cause deadlocks; see schedule() for
1589 + * details.)
1590 + */
1591 +static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
1592 +       __releases(grq.lock)
1593 +{
1594 +       struct mm_struct *mm = rq->prev_mm;
1595 +       long prev_state;
1596 +
1597 +       rq->prev_mm = NULL;
1598 +
1599 +       /*
1600 +        * A task struct has one reference for the use as "current".
1601 +        * If a task dies, then it sets TASK_DEAD in tsk->state and calls
1602 +        * schedule one last time. The schedule call will never return, and
1603 +        * the scheduled task must drop that reference.
1604 +        * The test for TASK_DEAD must occur while the runqueue locks are
1605 +        * still held, otherwise prev could be scheduled on another cpu, die
1606 +        * there before we look at prev->state, and then the reference would
1607 +        * be dropped twice.
1608 +        *              Manfred Spraul <manfred@colorfullife.com>
1609 +        */
1610 +       prev_state = prev->state;
1611 +       finish_arch_switch(prev);
1612 +       perf_counter_task_sched_in(current, cpu_of(rq));
1613 +       finish_lock_switch(rq, prev);
1614 +
1615 +       fire_sched_in_preempt_notifiers(current);
1616 +       if (mm)
1617 +               mmdrop(mm);
1618 +       if (unlikely(prev_state == TASK_DEAD)) {
1619 +               /*
1620 +                * Remove function-return probe instances associated with this
1621 +                * task and put them back on the free list.
1622 +                */
1623 +               kprobe_flush_task(prev);
1624 +               put_task_struct(prev);
1625 +       }
1626 +}
1627 +
1628 +/**
1629 + * schedule_tail - first thing a freshly forked thread must call.
1630 + * @prev: the thread we just switched away from.
1631 + */
1632 +asmlinkage void schedule_tail(struct task_struct *prev)
1633 +       __releases(grq.lock)
1634 +{
1635 +       struct rq *rq = this_rq();
1636 +
1637 +       finish_task_switch(rq, prev);
1638 +#ifdef __ARCH_WANT_UNLOCKED_CTXSW
1639 +       /* In this case, finish_task_switch does not reenable preemption */
1640 +       preempt_enable();
1641 +#endif
1642 +       if (current->set_child_tid)
1643 +               put_user(current->pid, current->set_child_tid);
1644 +}
1645 +
1646 +/*
1647 + * context_switch - switch to the new MM and the new
1648 + * thread's register state.
1649 + */
1650 +static inline void
1651 +context_switch(struct rq *rq, struct task_struct *prev,
1652 +              struct task_struct *next)
1653 +{
1654 +       struct mm_struct *mm, *oldmm;
1655 +
1656 +       prepare_task_switch(rq, prev, next);
1657 +       trace_sched_switch(rq, prev, next);
1658 +       mm = next->mm;
1659 +       oldmm = prev->active_mm;
1660 +       /*
1661 +        * For paravirt, this is coupled with an exit in switch_to to
1662 +        * combine the page table reload and the switch backend into
1663 +        * one hypercall.
1664 +        */
1665 +       arch_start_context_switch(prev);
1666 +
1667 +       if (unlikely(!mm)) {
1668 +               next->active_mm = oldmm;
1669 +               atomic_inc(&oldmm->mm_count);
1670 +               enter_lazy_tlb(oldmm, next);
1671 +       } else
1672 +               switch_mm(oldmm, mm, next);
1673 +
1674 +       if (unlikely(!prev->mm)) {
1675 +               prev->active_mm = NULL;
1676 +               rq->prev_mm = oldmm;
1677 +       }
1678 +       /*
1679 +        * Since the runqueue lock will be released by the next
1680 +        * task (which is an invalid locking op but in the case
1681 +        * of the scheduler it's an obvious special-case), so we
1682 +        * do an early lockdep release here:
1683 +        */
1684 +#ifndef __ARCH_WANT_UNLOCKED_CTXSW
1685 +       spin_release(&grq.lock.dep_map, 1, _THIS_IP_);
1686 +#endif
1687 +
1688 +       /* Here we just switch the register state and the stack. */
1689 +       switch_to(prev, next, prev);
1690 +
1691 +       barrier();
1692 +       /*
1693 +        * this_rq must be evaluated again because prev may have moved
1694 +        * CPUs since it called schedule(), thus the 'rq' on its stack
1695 +        * frame will be invalid.
1696 +        */
1697 +       finish_task_switch(this_rq(), prev);
1698 +}
1699 +
1700 +/*
1701 + * nr_running, nr_uninterruptible and nr_context_switches:
1702 + *
1703 + * externally visible scheduler statistics: current number of runnable
1704 + * threads, current number of uninterruptible-sleeping threads, total
1705 + * number of context switches performed since bootup. All are measured
1706 + * without grabbing the grq lock but the occasional inaccurate result
1707 + * doesn't matter so long as it's positive.
1708 + */
1709 +unsigned long nr_running(void)
1710 +{
1711 +       long nr = grq.nr_running;
1712 +
1713 +       if (unlikely(nr < 0))
1714 +               nr = 0;
1715 +       return (unsigned long)nr;
1716 +}
1717 +
1718 +unsigned long nr_uninterruptible(void)
1719 +{
1720 +       unsigned long nu = grq.nr_uninterruptible;
1721 +
1722 +       if (unlikely(nu < 0))
1723 +               nu = 0;
1724 +       return nu;
1725 +}
1726 +
1727 +unsigned long long nr_context_switches(void)
1728 +{
1729 +       long long ns = grq.nr_switches;
1730 +
1731 +       /* This is of course impossible */
1732 +       if (unlikely(ns < 0))
1733 +               ns = 1;
1734 +       return (long long)ns;
1735 +}
1736 +
1737 +unsigned long nr_iowait(void)
1738 +{
1739 +       unsigned long i, sum = 0;
1740 +
1741 +       for_each_possible_cpu(i)
1742 +               sum += atomic_read(&cpu_rq(i)->nr_iowait);
1743 +
1744 +       return sum;
1745 +}
1746 +
1747 +unsigned long nr_active(void)
1748 +{
1749 +       return nr_running() + nr_uninterruptible();
1750 +}
1751 +
1752 +/* Variables and functions for calc_load */
1753 +static unsigned long calc_load_update;
1754 +unsigned long avenrun[3];
1755 +EXPORT_SYMBOL(avenrun);
1756 +
1757 +/**
1758 + * get_avenrun - get the load average array
1759 + * @loads:     pointer to dest load array
1760 + * @offset:    offset to add
1761 + * @shift:     shift count to shift the result left
1762 + *
1763 + * These values are estimates at best, so no need for locking.
1764 + */
1765 +void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
1766 +{
1767 +       loads[0] = (avenrun[0] + offset) << shift;
1768 +       loads[1] = (avenrun[1] + offset) << shift;
1769 +       loads[2] = (avenrun[2] + offset) << shift;
1770 +}
1771 +
1772 +static unsigned long
1773 +calc_load(unsigned long load, unsigned long exp, unsigned long active)
1774 +{
1775 +       load *= exp;
1776 +       load += active * (FIXED_1 - exp);
1777 +       return load >> FSHIFT;
1778 +}
1779 +
1780 +/*
1781 + * calc_load - update the avenrun load estimates every LOAD_FREQ seconds.
1782 + */
1783 +void calc_global_load(void)
1784 +{
1785 +       long active;
1786 +
1787 +       if (time_before(jiffies, calc_load_update))
1788 +               return;
1789 +       active = nr_active() * FIXED_1;
1790 +
1791 +       avenrun[0] = calc_load(avenrun[0], EXP_1, active);
1792 +       avenrun[1] = calc_load(avenrun[1], EXP_5, active);
1793 +       avenrun[2] = calc_load(avenrun[2], EXP_15, active);
1794 +
1795 +       calc_load_update = jiffies + LOAD_FREQ;
1796 +}
1797 +
1798 +DEFINE_PER_CPU(struct kernel_stat, kstat);
1799 +
1800 +EXPORT_PER_CPU_SYMBOL(kstat);
1801 +
1802 +/*
1803 + * On each tick, see what percentage of that tick was attributed to each
1804 + * component and add the percentage to the _pc values. Once a _pc value has
1805 + * accumulated one tick's worth, account for that. This means the total
1806 + * percentage of load components will always be 100 per tick.
1807 + */
1808 +static void pc_idle_time(struct rq *rq, unsigned long pc)
1809 +{
1810 +       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1811 +       cputime64_t tmp = cputime_to_cputime64(jiffies_to_cputime(1));
1812 +
1813 +       if (atomic_read(&rq->nr_iowait) > 0) {
1814 +               rq->iowait_pc += pc;
1815 +               if (rq->iowait_pc >= 100) {
1816 +                       rq->iowait_pc %= 100;
1817 +                       cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
1818 +               }
1819 +       } else {
1820 +               rq->idle_pc += pc;
1821 +               if (rq->idle_pc >= 100) {
1822 +                       rq->idle_pc %= 100;
1823 +                       cpustat->idle = cputime64_add(cpustat->idle, tmp);
1824 +               }
1825 +       }
1826 +}
1827 +
1828 +static void
1829 +pc_system_time(struct rq *rq, struct task_struct *p, int hardirq_offset,
1830 +              unsigned long pc, unsigned long ns)
1831 +{
1832 +       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1833 +       cputime_t one_jiffy = jiffies_to_cputime(1);
1834 +       cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
1835 +       cputime64_t tmp = cputime_to_cputime64(one_jiffy);
1836 +
1837 +       p->stime_pc += pc;
1838 +       if (p->stime_pc >= 100) {
1839 +               p->stime_pc -= 100;
1840 +               p->stime = cputime_add(p->stime, one_jiffy);
1841 +               p->stimescaled = cputime_add(p->stimescaled, one_jiffy_scaled);
1842 +               account_group_system_time(p, one_jiffy);
1843 +               acct_update_integrals(p);
1844 +       }
1845 +       p->se.sum_exec_runtime += ns;
1846 +
1847 +       if (hardirq_count() - hardirq_offset)
1848 +               rq->irq_pc += pc;
1849 +       else if (softirq_count()) {
1850 +               rq->softirq_pc += pc;
1851 +               if (rq->softirq_pc >= 100) {
1852 +                       rq->softirq_pc %= 100;
1853 +                       cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
1854 +               }
1855 +       } else {
1856 +               rq->system_pc += pc;
1857 +               if (rq->system_pc >= 100) {
1858 +                       rq->system_pc %= 100;
1859 +                       cpustat->system = cputime64_add(cpustat->system, tmp);
1860 +               }
1861 +       }
1862 +}
1863 +
1864 +static void pc_user_time(struct rq *rq, struct task_struct *p,
1865 +                        unsigned long pc, unsigned long ns)
1866 +{
1867 +       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
1868 +       cputime_t one_jiffy = jiffies_to_cputime(1);
1869 +       cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy);
1870 +       cputime64_t tmp = cputime_to_cputime64(one_jiffy);
1871 +
1872 +       p->utime_pc += pc;
1873 +       if (p->utime_pc >= 100) {
1874 +               p->utime_pc -= 100;
1875 +               p->utime = cputime_add(p->utime, one_jiffy);
1876 +               p->utimescaled = cputime_add(p->utimescaled, one_jiffy_scaled);
1877 +               account_group_user_time(p, one_jiffy);
1878 +               acct_update_integrals(p);
1879 +       }
1880 +       p->se.sum_exec_runtime += ns;
1881 +
1882 +       if (TASK_NICE(p) > 0 || idleprio_task(p)) {
1883 +               rq->nice_pc += pc;
1884 +               if (rq->nice_pc >= 100) {
1885 +                       rq->nice_pc %= 100;
1886 +                       cpustat->nice = cputime64_add(cpustat->nice, tmp);
1887 +               }
1888 +       } else {
1889 +               rq->user_pc += pc;
1890 +               if (rq->user_pc >= 100) {
1891 +                       rq->user_pc %= 100;
1892 +                       cpustat->user = cputime64_add(cpustat->user, tmp);
1893 +               }
1894 +       }
1895 +}
1896 +
1897 +/* Convert nanoseconds to percentage of one tick. */
1898 +#define NS_TO_PC(NS)   (NS * 100 / JIFFIES_TO_NS(1))
1899 +
1900 +/*
1901 + * This is called on clock ticks and on context switches.
1902 + * Bank in p->se.sum_exec_runtime the ns elapsed since the last tick or switch.
1903 + * CPU scheduler quota accounting is also performed here in microseconds.
1904 + * The value returned from sched_clock() occasionally gives bogus values so
1905 + * some sanity checking is required. Time is supposed to be banked all the
1906 + * time so default to half a tick to make up for when sched_clock reverts
1907 + * to just returning jiffies, and for hardware that can't do tsc.
1908 + */
1909 +static void
1910 +update_cpu_clock(struct rq *rq, struct task_struct *p, int tick)
1911 +{
1912 +       long time_diff = rq->clock - p->last_ran;
1913 +       long account_ns = rq->clock - rq->timekeep_clock;
1914 +       struct task_struct *idle = rq->idle;
1915 +       unsigned long account_pc;
1916 +
1917 +       /*
1918 +        * There should be less than or equal to one jiffy worth, and not
1919 +        * negative/overflow. time_diff is only used for internal scheduler
1920 +        * time_slice accounting.
1921 +        */
1922 +       if (time_diff <= 0)
1923 +               time_diff = JIFFIES_TO_NS(1) / 2;
1924 +       else if (time_diff > JIFFIES_TO_NS(1))
1925 +               time_diff = JIFFIES_TO_NS(1);
1926 +
1927 +       if (unlikely(account_ns < 0))
1928 +               account_ns = 0;
1929 +
1930 +       account_pc = NS_TO_PC(account_ns);
1931 +
1932 +       if (tick) {
1933 +               int user_tick = user_mode(get_irq_regs());
1934 +
1935 +               /* Accurate tick timekeeping */
1936 +               if (user_tick)
1937 +                       pc_user_time(rq, p, account_pc, account_ns);
1938 +               else if (p != idle || (irq_count() != HARDIRQ_OFFSET))
1939 +                       pc_system_time(rq, p, HARDIRQ_OFFSET,
1940 +                                      account_pc, account_ns);
1941 +               else
1942 +                       pc_idle_time(rq, account_pc);
1943 +       } else {
1944 +               /* Accurate subtick timekeeping */
1945 +               if (p == idle)
1946 +                       pc_idle_time(rq, account_pc);
1947 +               else
1948 +                       pc_user_time(rq, p, account_pc, account_ns);
1949 +       }
1950 +
1951 +       /* time_slice accounting is done in usecs to avoid overflow on 32bit */
1952 +       if (rq->rq_policy != SCHED_FIFO && p != idle)
1953 +               rq->rq_time_slice -= time_diff / 1000;
1954 +       p->last_ran = rq->timekeep_clock = rq->clock;
1955 +}
1956 +
1957 +/*
1958 + * Return any ns on the sched_clock that have not yet been accounted in
1959 + * @p in case that task is currently running.
1960 + *
1961 + * Called with task_grq_lock() held on @rq.
1962 + */
1963 +static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
1964 +{
1965 +       u64 ns = 0;
1966 +
1967 +       if (p == rq->curr) {
1968 +               update_rq_clock(rq);
1969 +               ns = rq->clock - p->last_ran;
1970 +               if ((s64)ns < 0)
1971 +                       ns = 0;
1972 +       }
1973 +
1974 +       return ns;
1975 +}
1976 +
1977 +unsigned long long task_delta_exec(struct task_struct *p)
1978 +{
1979 +       unsigned long flags;
1980 +       struct rq *rq;
1981 +       u64 ns = 0;
1982 +
1983 +       rq = task_grq_lock(p, &flags);
1984 +       ns = do_task_delta_exec(p, rq);
1985 +       task_grq_unlock(&flags);
1986 +
1987 +       return ns;
1988 +}
1989 +
1990 +/*
1991 + * Return accounted runtime for the task.
1992 + * In case the task is currently running, return the runtime plus current's
1993 + * pending runtime that have not been accounted yet.
1994 + */
1995 +unsigned long long task_sched_runtime(struct task_struct *p)
1996 +{
1997 +       unsigned long flags;
1998 +       struct rq *rq;
1999 +       u64 ns = 0;
2000 +
2001 +       rq = task_grq_lock(p, &flags);
2002 +       ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
2003 +       task_grq_unlock(&flags);
2004 +
2005 +       return ns;
2006 +}
2007 +
2008 +/*
2009 + * Return sum_exec_runtime for the thread group.
2010 + * In case the task is currently running, return the sum plus current's
2011 + * pending runtime that have not been accounted yet.
2012 + *
2013 + * Note that the thread group might have other running tasks as well,
2014 + * so the return value not includes other pending runtime that other
2015 + * running tasks might have.
2016 + */
2017 +unsigned long long thread_group_sched_runtime(struct task_struct *p)
2018 +{
2019 +       struct task_cputime totals;
2020 +       unsigned long flags;
2021 +       struct rq *rq;
2022 +       u64 ns;
2023 +
2024 +       rq = task_grq_lock(p, &flags);
2025 +       thread_group_cputime(p, &totals);
2026 +       ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
2027 +       task_grq_unlock(&flags);
2028 +
2029 +       return ns;
2030 +}
2031 +
2032 +/* Compatibility crap for removal */
2033 +void account_user_time(struct task_struct *p, cputime_t cputime,
2034 +                      cputime_t cputime_scaled)
2035 +{
2036 +}
2037 +
2038 +void account_idle_time(cputime_t cputime)
2039 +{
2040 +}
2041 +
2042 +/*
2043 + * Account guest cpu time to a process.
2044 + * @p: the process that the cpu time gets accounted to
2045 + * @cputime: the cpu time spent in virtual machine since the last update
2046 + * @cputime_scaled: cputime scaled by cpu frequency
2047 + */
2048 +static void account_guest_time(struct task_struct *p, cputime_t cputime,
2049 +                              cputime_t cputime_scaled)
2050 +{
2051 +       cputime64_t tmp;
2052 +       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2053 +
2054 +       tmp = cputime_to_cputime64(cputime);
2055 +
2056 +       /* Add guest time to process. */
2057 +       p->utime = cputime_add(p->utime, cputime);
2058 +       p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
2059 +       account_group_user_time(p, cputime);
2060 +       p->gtime = cputime_add(p->gtime, cputime);
2061 +
2062 +       /* Add guest time to cpustat. */
2063 +       cpustat->user = cputime64_add(cpustat->user, tmp);
2064 +       cpustat->guest = cputime64_add(cpustat->guest, tmp);
2065 +}
2066 +
2067 +/*
2068 + * Account system cpu time to a process.
2069 + * @p: the process that the cpu time gets accounted to
2070 + * @hardirq_offset: the offset to subtract from hardirq_count()
2071 + * @cputime: the cpu time spent in kernel space since the last update
2072 + * @cputime_scaled: cputime scaled by cpu frequency
2073 + * This is for guest only now.
2074 + */
2075 +void account_system_time(struct task_struct *p, int hardirq_offset,
2076 +                        cputime_t cputime, cputime_t cputime_scaled)
2077 +{
2078 +
2079 +       if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0))
2080 +               account_guest_time(p, cputime, cputime_scaled);
2081 +}
2082 +
2083 +/*
2084 + * Account for involuntary wait time.
2085 + * @steal: the cpu time spent in involuntary wait
2086 + */
2087 +void account_steal_time(cputime_t cputime)
2088 +{
2089 +       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2090 +       cputime64_t cputime64 = cputime_to_cputime64(cputime);
2091 +
2092 +       cpustat->steal = cputime64_add(cpustat->steal, cputime64);
2093 +}
2094 +
2095 +/*
2096 + * Account for idle time.
2097 + * @cputime: the cpu time spent in idle wait
2098 + */
2099 +static void account_idle_times(cputime_t cputime)
2100 +{
2101 +       struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
2102 +       cputime64_t cputime64 = cputime_to_cputime64(cputime);
2103 +       struct rq *rq = this_rq();
2104 +
2105 +       if (atomic_read(&rq->nr_iowait) > 0)
2106 +               cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
2107 +       else
2108 +               cpustat->idle = cputime64_add(cpustat->idle, cputime64);
2109 +}
2110 +
2111 +#ifndef CONFIG_VIRT_CPU_ACCOUNTING
2112 +
2113 +void account_process_tick(struct task_struct *p, int user_tick)
2114 +{
2115 +}
2116 +
2117 +/*
2118 + * Account multiple ticks of steal time.
2119 + * @p: the process from which the cpu time has been stolen
2120 + * @ticks: number of stolen ticks
2121 + */
2122 +void account_steal_ticks(unsigned long ticks)
2123 +{
2124 +       account_steal_time(jiffies_to_cputime(ticks));
2125 +}
2126 +
2127 +/*
2128 + * Account multiple ticks of idle time.
2129 + * @ticks: number of stolen ticks
2130 + */
2131 +void account_idle_ticks(unsigned long ticks)
2132 +{
2133 +       account_idle_times(jiffies_to_cputime(ticks));
2134 +}
2135 +#endif
2136 +
2137 +/*
2138 + * Functions to test for when SCHED_ISO tasks have used their allocated
2139 + * quota as real time scheduling and convert them back to SCHED_NORMAL.
2140 + * Where possible, the data is tested lockless, to avoid grabbing grq_lock
2141 + * because the occasional inaccurate result won't matter. However the
2142 + * data is only ever modified under lock.
2143 + */
2144 +static void set_iso_refractory(void)
2145 +{
2146 +       grq_lock();
2147 +       grq.iso_refractory = 1;
2148 +       grq_unlock();
2149 +}
2150 +
2151 +static void clear_iso_refractory(void)
2152 +{
2153 +       grq_lock();
2154 +       grq.iso_refractory = 0;
2155 +       grq_unlock();
2156 +}
2157 +
2158 +/*
2159 + * Test if SCHED_ISO tasks have run longer than their alloted period as RT
2160 + * tasks and set the refractory flag if necessary. There is 10% hysteresis
2161 + * for unsetting the flag.
2162 + */
2163 +static unsigned int test_ret_isorefractory(struct rq *rq)
2164 +{
2165 +       if (likely(!grq.iso_refractory)) {
2166 +               if (grq.iso_ticks / ISO_PERIOD > sched_iso_cpu)
2167 +                       set_iso_refractory();
2168 +       } else {
2169 +               if (grq.iso_ticks / ISO_PERIOD < (sched_iso_cpu * 90 / 100))
2170 +                       clear_iso_refractory();
2171 +       }
2172 +       return grq.iso_refractory;
2173 +}
2174 +
2175 +static void iso_tick(void)
2176 +{
2177 +       grq_lock();
2178 +       grq.iso_ticks += 100;
2179 +       grq_unlock();
2180 +}
2181 +
2182 +/* No SCHED_ISO task was running so decrease rq->iso_ticks */
2183 +static inline void no_iso_tick(void)
2184 +{
2185 +       if (grq.iso_ticks) {
2186 +               grq_lock();
2187 +               grq.iso_ticks = grq.iso_ticks * (ISO_PERIOD - 1) / ISO_PERIOD;
2188 +               grq_unlock();
2189 +       }
2190 +}
2191 +
2192 +static int rq_running_iso(struct rq *rq)
2193 +{
2194 +       return rq->rq_prio == ISO_PRIO;
2195 +}
2196 +
2197 +/* This manages tasks that have run out of timeslice during a scheduler_tick */
2198 +static void task_running_tick(struct rq *rq)
2199 +{
2200 +       struct task_struct *p;
2201 +
2202 +       /*
2203 +        * If a SCHED_ISO task is running we increment the iso_ticks. In
2204 +        * order to prevent SCHED_ISO tasks from causing starvation in the
2205 +        * presence of true RT tasks we account those as iso_ticks as well.
2206 +        */
2207 +       if ((rt_queue(rq) || (iso_queue(rq) && !grq.iso_refractory))) {
2208 +               if (grq.iso_ticks <= (ISO_PERIOD * 100) - 100)
2209 +                       iso_tick();
2210 +       } else
2211 +               no_iso_tick();
2212 +
2213 +       if (iso_queue(rq)) {
2214 +               if (unlikely(test_ret_isorefractory(rq))) {
2215 +                       if (rq_running_iso(rq)) {
2216 +                               /*
2217 +                                * SCHED_ISO task is running as RT and limit
2218 +                                * has been hit. Force it to reschedule as
2219 +                                * SCHED_NORMAL by zeroing its time_slice
2220 +                                */
2221 +                               rq->rq_time_slice = 0;
2222 +                       }
2223 +               }
2224 +       }
2225 +
2226 +       /* SCHED_FIFO tasks never run out of timeslice. */
2227 +       if (rq_idle(rq) || rq->rq_time_slice > 0 || rq->rq_policy == SCHED_FIFO)
2228 +               return;
2229 +
2230 +       /* p->rt.time_slice <= 0. We only modify task_struct under grq lock */
2231 +       grq_lock();
2232 +       p = rq->curr;
2233 +       if (likely(task_running(p))) {
2234 +               requeue_task(p);
2235 +               set_tsk_need_resched(p);
2236 +       }
2237 +       grq_unlock();
2238 +}
2239 +
2240 +void wake_up_idle_cpu(int cpu);
2241 +
2242 +/*
2243 + * This function gets called by the timer code, with HZ frequency.
2244 + * We call it with interrupts disabled. The data modified is all
2245 + * local to struct rq so we don't need to grab grq lock.
2246 + */
2247 +void scheduler_tick(void)
2248 +{
2249 +       int cpu = smp_processor_id();
2250 +       struct rq *rq = cpu_rq(cpu);
2251 +
2252 +       sched_clock_tick();
2253 +       update_rq_clock(rq);
2254 +       update_cpu_clock(rq, rq->curr, 1);
2255 +       if (!rq_idle(rq))
2256 +               task_running_tick(rq);
2257 +       else {
2258 +               no_iso_tick();
2259 +               if (unlikely(queued_notrunning()))
2260 +                       set_tsk_need_resched(rq->idle);
2261 +       }
2262 +}
2263 +
2264 +notrace unsigned long get_parent_ip(unsigned long addr)
2265 +{
2266 +       if (in_lock_functions(addr)) {
2267 +               addr = CALLER_ADDR2;
2268 +               if (in_lock_functions(addr))
2269 +                       addr = CALLER_ADDR3;
2270 +       }
2271 +       return addr;
2272 +}
2273 +
2274 +#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
2275 +                               defined(CONFIG_PREEMPT_TRACER))
2276 +void __kprobes add_preempt_count(int val)
2277 +{
2278 +#ifdef CONFIG_DEBUG_PREEMPT
2279 +       /*
2280 +        * Underflow?
2281 +        */
2282 +       if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
2283 +               return;
2284 +#endif
2285 +       preempt_count() += val;
2286 +#ifdef CONFIG_DEBUG_PREEMPT
2287 +       /*
2288 +        * Spinlock count overflowing soon?
2289 +        */
2290 +       DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
2291 +                               PREEMPT_MASK - 10);
2292 +#endif
2293 +       if (preempt_count() == val)
2294 +               trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
2295 +}
2296 +EXPORT_SYMBOL(add_preempt_count);
2297 +
2298 +void __kprobes sub_preempt_count(int val)
2299 +{
2300 +#ifdef CONFIG_DEBUG_PREEMPT
2301 +       /*
2302 +        * Underflow?
2303 +        */
2304 +       if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
2305 +               return;
2306 +       /*
2307 +        * Is the spinlock portion underflowing?
2308 +        */
2309 +       if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
2310 +                       !(preempt_count() & PREEMPT_MASK)))
2311 +               return;
2312 +#endif
2313 +
2314 +       if (preempt_count() == val)
2315 +               trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
2316 +       preempt_count() -= val;
2317 +}
2318 +EXPORT_SYMBOL(sub_preempt_count);
2319 +#endif
2320 +
2321 +/*
2322 + * Deadline is "now" in jiffies + (offset by priority). Setting the deadline
2323 + * is the key to everything. It distributes cpu fairly amongst tasks of the
2324 + * same nice value, it proportions cpu according to nice level, it means the
2325 + * task that last woke up the longest ago has the earliest deadline, thus
2326 + * ensuring that interactive tasks get low latency on wake up.
2327 + */
2328 +static inline int prio_deadline_diff(struct task_struct *p)
2329 +{
2330 +       return (pratio(p) * rr_interval * HZ / 1000 / 100) ? : 1;
2331 +}
2332 +
2333 +static inline int longest_deadline(void)
2334 +{
2335 +       return (prio_ratios[39] * rr_interval * HZ / 1000 / 100);
2336 +}
2337 +
2338 +/*
2339 + * SCHED_IDLE tasks still have a deadline set, but offset by to nice +19.
2340 + * This allows nice levels to work between IDLEPRIO tasks and gives a
2341 + * deadline longer than nice +19 for when they're scheduled as SCHED_NORMAL
2342 + * tasks.
2343 + */
2344 +static inline void time_slice_expired(struct task_struct *p)
2345 +{
2346 +       reset_first_time_slice(p);
2347 +       p->rt.time_slice = timeslice();
2348 +       p->deadline = jiffies + prio_deadline_diff(p);
2349 +       if (idleprio_task(p))
2350 +               p->deadline += longest_deadline();
2351 +}
2352 +
2353 +static inline void check_deadline(struct task_struct *p)
2354 +{
2355 +       if (p->rt.time_slice <= 0)
2356 +               time_slice_expired(p);
2357 +}
2358 +
2359 +/*
2360 + * O(n) lookup of all tasks in the global runqueue. The real brainfuck
2361 + * of lock contention and O(n). It's not really O(n) as only the queued,
2362 + * but not running tasks are scanned, and is O(n) queued in the worst case
2363 + * scenario only because the right task can be found before scanning all of
2364 + * them.
2365 + * Tasks are selected in this order:
2366 + * Real time tasks are selected purely by their static priority and in the
2367 + * order they were queued, so the lowest value idx, and the first queued task
2368 + * of that priority value is chosen.
2369 + * If no real time tasks are found, the SCHED_ISO priority is checked, and
2370 + * all SCHED_ISO tasks have the same priority value, so they're selected by
2371 + * the earliest deadline value.
2372 + * If no SCHED_ISO tasks are found, SCHED_NORMAL tasks are selected by the
2373 + * earliest deadline.
2374 + * Finally if no SCHED_NORMAL tasks are found, SCHED_IDLEPRIO tasks are
2375 + * selected by the earliest deadline.
2376 + */
2377 +static inline struct
2378 +task_struct *earliest_deadline_task(struct rq *rq, struct task_struct *idle)
2379 +{
2380 +       unsigned long dl, earliest_deadline = 0; /* Initialise to silence compiler */
2381 +       struct task_struct *p, *edt;
2382 +       unsigned int cpu = rq->cpu;
2383 +       struct list_head *queue;
2384 +       int idx = 0;
2385 +
2386 +       edt = idle;
2387 +retry:
2388 +       idx = find_next_bit(grq.prio_bitmap, PRIO_LIMIT, idx);
2389 +       if (idx >= PRIO_LIMIT)
2390 +               goto out;
2391 +       queue = &grq.queue[idx];
2392 +       list_for_each_entry(p, queue, rt.run_list) {
2393 +               /* Make sure cpu affinity is ok */
2394 +               if (!cpu_isset(cpu, p->cpus_allowed))
2395 +                       continue;
2396 +               if (idx < MAX_RT_PRIO) {
2397 +                       /* We found an rt task */
2398 +                       edt = p;
2399 +                       goto out_take;
2400 +               }
2401 +
2402 +               /*
2403 +                * No rt task, select the earliest deadline task now.
2404 +                * On the 1st run the 2nd condition is never used, so
2405 +                * there is no need to initialise earliest_deadline
2406 +                * before. Normalise all old deadlines to now.
2407 +                */
2408 +               if (time_before(p->deadline, jiffies))
2409 +                       dl = jiffies;
2410 +               else
2411 +                       dl = p->deadline;
2412 +
2413 +               if (edt == idle ||
2414 +                   time_before(dl, earliest_deadline)) {
2415 +                       earliest_deadline = dl;
2416 +                       edt = p;
2417 +               }
2418 +       }
2419 +       if (edt == idle) {
2420 +               if (++idx < PRIO_LIMIT)
2421 +                       goto retry;
2422 +               goto out;
2423 +       }
2424 +out_take:
2425 +       take_task(rq, edt);
2426 +out:
2427 +       return edt;
2428 +}
2429 +
2430 +#ifdef CONFIG_SMP
2431 +static inline void set_cpuidle_map(unsigned long cpu)
2432 +{
2433 +       cpu_set(cpu, grq.cpu_idle_map);
2434 +}
2435 +
2436 +static inline void clear_cpuidle_map(unsigned long cpu)
2437 +{
2438 +       cpu_clear(cpu, grq.cpu_idle_map);
2439 +}
2440 +
2441 +#else /* CONFIG_SMP */
2442 +static inline void set_cpuidle_map(unsigned long cpu)
2443 +{
2444 +}
2445 +
2446 +static inline void clear_cpuidle_map(unsigned long cpu)
2447 +{
2448 +}
2449 +#endif /* !CONFIG_SMP */
2450 +
2451 +/*
2452 + * Print scheduling while atomic bug:
2453 + */
2454 +static noinline void __schedule_bug(struct task_struct *prev)
2455 +{
2456 +       struct pt_regs *regs = get_irq_regs();
2457 +
2458 +       printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
2459 +               prev->comm, prev->pid, preempt_count());
2460 +
2461 +       debug_show_held_locks(prev);
2462 +       print_modules();
2463 +       if (irqs_disabled())
2464 +               print_irqtrace_events(prev);
2465 +
2466 +       if (regs)
2467 +               show_regs(regs);
2468 +       else
2469 +               dump_stack();
2470 +}
2471 +
2472 +/*
2473 + * Various schedule()-time debugging checks and statistics:
2474 + */
2475 +static inline void schedule_debug(struct task_struct *prev)
2476 +{
2477 +       /*
2478 +        * Test if we are atomic. Since do_exit() needs to call into
2479 +        * schedule() atomically, we ignore that path for now.
2480 +        * Otherwise, whine if we are scheduling when we should not be.
2481 +        */
2482 +       if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
2483 +               __schedule_bug(prev);
2484 +
2485 +       profile_hit(SCHED_PROFILING, __builtin_return_address(0));
2486 +
2487 +       schedstat_inc(this_rq(), sched_count);
2488 +#ifdef CONFIG_SCHEDSTATS
2489 +       if (unlikely(prev->lock_depth >= 0)) {
2490 +               schedstat_inc(this_rq(), bkl_count);
2491 +               schedstat_inc(prev, sched_info.bkl_count);
2492 +       }
2493 +#endif
2494 +}
2495 +
2496 +/*
2497 + * schedule() is the main scheduler function.
2498 + */
2499 +asmlinkage void __sched __schedule(void)
2500 +{
2501 +       struct task_struct *prev, *next, *idle;
2502 +       int deactivate = 0, cpu;
2503 +       long *switch_count;
2504 +       struct rq *rq;
2505 +       u64 now;
2506 +
2507 +       cpu = smp_processor_id();
2508 +       rq = this_rq();
2509 +       rcu_qsctr_inc(cpu);
2510 +       prev = rq->curr;
2511 +       switch_count = &prev->nivcsw;
2512 +
2513 +       release_kernel_lock(prev);
2514 +need_resched_nonpreemptible:
2515 +
2516 +       schedule_debug(prev);
2517 +       idle = rq->idle;
2518 +       /*
2519 +        * The idle thread is not allowed to schedule!
2520 +        * Remove this check after it has been exercised a bit.
2521 +        */
2522 +       if (unlikely(prev == idle) && prev->state != TASK_RUNNING) {
2523 +               printk(KERN_ERR "bad: scheduling from the idle thread!\n");
2524 +               dump_stack();
2525 +       }
2526 +
2527 +       grq_lock_irq();
2528 +       update_rq_clock(rq);
2529 +       now = rq->clock;
2530 +       update_cpu_clock(rq, prev, 0);
2531 +
2532 +       clear_tsk_need_resched(prev);
2533 +
2534 +       if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
2535 +               if (unlikely(signal_pending_state(prev->state, prev)))
2536 +                       prev->state = TASK_RUNNING;
2537 +               else
2538 +                       deactivate = 1;
2539 +               switch_count = &prev->nvcsw;
2540 +       }
2541 +
2542 +       if (prev != idle) {
2543 +               /* Update all the information stored on struct rq */
2544 +               prev->rt.time_slice = rq->rq_time_slice;
2545 +               prev->deadline = rq->rq_deadline;
2546 +               check_deadline(prev);
2547 +               return_task(prev, deactivate);
2548 +       }
2549 +
2550 +       if (likely(queued_notrunning())) {
2551 +               next = earliest_deadline_task(rq, idle);
2552 +       } else {
2553 +               next = idle;
2554 +               schedstat_inc(rq, sched_goidle);
2555 +       }
2556 +
2557 +       if (next == rq->idle)
2558 +               set_cpuidle_map(cpu);
2559 +       else
2560 +               clear_cpuidle_map(cpu);
2561 +
2562 +       prefetch(next);
2563 +       prefetch_stack(next);
2564 +
2565 +       prev->timestamp = prev->last_ran = now;
2566 +
2567 +       if (likely(prev != next)) {
2568 +               rq->rq_time_slice = next->rt.time_slice;
2569 +               rq->rq_deadline = next->deadline;
2570 +               rq->rq_prio = next->prio;
2571 +
2572 +               sched_info_switch(prev, next);
2573 +               grq.nr_switches++;
2574 +               next->oncpu = 1;
2575 +               prev->oncpu = 0;
2576 +               rq->curr = next;
2577 +               ++*switch_count;
2578 +
2579 +               context_switch(rq, prev, next); /* unlocks the rq */
2580 +               /*
2581 +                * the context switch might have flipped the stack from under
2582 +                * us, hence refresh the local variables.
2583 +                */
2584 +               cpu = smp_processor_id();
2585 +               rq = cpu_rq(cpu);
2586 +       } else
2587 +               grq_unlock_irq();
2588 +
2589 +       if (unlikely(reacquire_kernel_lock(current) < 0))
2590 +               goto need_resched_nonpreemptible;
2591 +}
2592 +
2593 +asmlinkage void __sched schedule(void)
2594 +{
2595 +need_resched:
2596 +       preempt_disable();
2597 +       __schedule();
2598 +       preempt_enable_no_resched();
2599 +       if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
2600 +               goto need_resched;
2601 +}
2602 +EXPORT_SYMBOL(schedule);
2603 +
2604 +#ifdef CONFIG_SMP
2605 +int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
2606 +{
2607 +       return 0;
2608 +}
2609 +#endif
2610 +
2611 +#ifdef CONFIG_PREEMPT
2612 +/*
2613 + * this is the entry point to schedule() from in-kernel preemption
2614 + * off of preempt_enable. Kernel preemptions off return from interrupt
2615 + * occur there and call schedule directly.
2616 + */
2617 +asmlinkage void __sched preempt_schedule(void)
2618 +{
2619 +       struct thread_info *ti = current_thread_info();
2620 +
2621 +       /*
2622 +        * If there is a non-zero preempt_count or interrupts are disabled,
2623 +        * we do not want to preempt the current task. Just return..
2624 +        */
2625 +       if (likely(ti->preempt_count || irqs_disabled()))
2626 +               return;
2627 +
2628 +       do {
2629 +               add_preempt_count(PREEMPT_ACTIVE);
2630 +               schedule();
2631 +               sub_preempt_count(PREEMPT_ACTIVE);
2632 +
2633 +               /*
2634 +                * Check again in case we missed a preemption opportunity
2635 +                * between schedule and now.
2636 +                */
2637 +               barrier();
2638 +       } while (need_resched());
2639 +}
2640 +EXPORT_SYMBOL(preempt_schedule);
2641 +
2642 +/*
2643 + * this is the entry point to schedule() from kernel preemption
2644 + * off of irq context.
2645 + * Note, that this is called and return with irqs disabled. This will
2646 + * protect us against recursive calling from irq.
2647 + */
2648 +asmlinkage void __sched preempt_schedule_irq(void)
2649 +{
2650 +       struct thread_info *ti = current_thread_info();
2651 +
2652 +       /* Catch callers which need to be fixed */
2653 +       BUG_ON(ti->preempt_count || !irqs_disabled());
2654 +
2655 +       do {
2656 +               add_preempt_count(PREEMPT_ACTIVE);
2657 +               local_irq_enable();
2658 +               schedule();
2659 +               local_irq_disable();
2660 +               sub_preempt_count(PREEMPT_ACTIVE);
2661 +
2662 +               /*
2663 +                * Check again in case we missed a preemption opportunity
2664 +                * between schedule and now.
2665 +                */
2666 +               barrier();
2667 +       } while (need_resched());
2668 +}
2669 +
2670 +#endif /* CONFIG_PREEMPT */
2671 +
2672 +int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
2673 +                         void *key)
2674 +{
2675 +       return try_to_wake_up(curr->private, mode, sync);
2676 +}
2677 +EXPORT_SYMBOL(default_wake_function);
2678 +
2679 +/*
2680 + * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
2681 + * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve
2682 + * number) then we wake all the non-exclusive tasks and one exclusive task.
2683 + *
2684 + * There are circumstances in which we can try to wake a task which has already
2685 + * started to run but is not in state TASK_RUNNING.  try_to_wake_up() returns
2686 + * zero in this (rare) case, and we handle it by continuing to scan the queue.
2687 + */
2688 +void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
2689 +                     int nr_exclusive, int sync, void *key)
2690 +{
2691 +       struct list_head *tmp, *next;
2692 +
2693 +       list_for_each_safe(tmp, next, &q->task_list) {
2694 +               wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
2695 +               unsigned flags = curr->flags;
2696 +
2697 +               if (curr->func(curr, mode, sync, key) &&
2698 +                               (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
2699 +                       break;
2700 +       }
2701 +}
2702 +
2703 +/**
2704 + * __wake_up - wake up threads blocked on a waitqueue.
2705 + * @q: the waitqueue
2706 + * @mode: which threads
2707 + * @nr_exclusive: how many wake-one or wake-many threads to wake up
2708 + * @key: is directly passed to the wakeup function
2709 + *
2710 + * It may be assumed that this function implies a write memory barrier before
2711 + * changing the task state if and only if any tasks are woken up.
2712 + */
2713 +void __wake_up(wait_queue_head_t *q, unsigned int mode,
2714 +                       int nr_exclusive, void *key)
2715 +{
2716 +       unsigned long flags;
2717 +
2718 +       spin_lock_irqsave(&q->lock, flags);
2719 +       __wake_up_common(q, mode, nr_exclusive, 0, key);
2720 +       spin_unlock_irqrestore(&q->lock, flags);
2721 +}
2722 +EXPORT_SYMBOL(__wake_up);
2723 +
2724 +/*
2725 + * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
2726 + */
2727 +void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
2728 +{
2729 +       __wake_up_common(q, mode, 1, 0, NULL);
2730 +}
2731 +
2732 +void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
2733 +{
2734 +       __wake_up_common(q, mode, 1, 0, key);
2735 +}
2736 +
2737 +/**
2738 + * __wake_up_sync_key - wake up threads blocked on a waitqueue.
2739 + * @q: the waitqueue
2740 + * @mode: which threads
2741 + * @nr_exclusive: how many wake-one or wake-many threads to wake up
2742 + * @key: opaque value to be passed to wakeup targets
2743 + *
2744 + * The sync wakeup differs that the waker knows that it will schedule
2745 + * away soon, so while the target thread will be woken up, it will not
2746 + * be migrated to another CPU - ie. the two threads are 'synchronized'
2747 + * with each other. This can prevent needless bouncing between CPUs.
2748 + *
2749 + * On UP it can prevent extra preemption.
2750 + *
2751 + * It may be assumed that this function implies a write memory barrier before
2752 + * changing the task state if and only if any tasks are woken up.
2753 + */
2754 +void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
2755 +                       int nr_exclusive, void *key)
2756 +{
2757 +       unsigned long flags;
2758 +       int sync = 1;
2759 +
2760 +       if (unlikely(!q))
2761 +               return;
2762 +
2763 +       if (unlikely(!nr_exclusive))
2764 +               sync = 0;
2765 +
2766 +       spin_lock_irqsave(&q->lock, flags);
2767 +       __wake_up_common(q, mode, nr_exclusive, sync, key);
2768 +       spin_unlock_irqrestore(&q->lock, flags);
2769 +}
2770 +EXPORT_SYMBOL_GPL(__wake_up_sync_key);
2771 +
2772 +/**
2773 + * __wake_up_sync - wake up threads blocked on a waitqueue.
2774 + * @q: the waitqueue
2775 + * @mode: which threads
2776 + * @nr_exclusive: how many wake-one or wake-many threads to wake up
2777 + *
2778 + * The sync wakeup differs that the waker knows that it will schedule
2779 + * away soon, so while the target thread will be woken up, it will not
2780 + * be migrated to another CPU - ie. the two threads are 'synchronized'
2781 + * with each other. This can prevent needless bouncing between CPUs.
2782 + *
2783 + * On UP it can prevent extra preemption.
2784 + */
2785 +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
2786 +{
2787 +       unsigned long flags;
2788 +       int sync = 1;
2789 +
2790 +       if (unlikely(!q))
2791 +               return;
2792 +
2793 +       if (unlikely(!nr_exclusive))
2794 +               sync = 0;
2795 +
2796 +       spin_lock_irqsave(&q->lock, flags);
2797 +       __wake_up_common(q, mode, nr_exclusive, sync, NULL);
2798 +       spin_unlock_irqrestore(&q->lock, flags);
2799 +}
2800 +EXPORT_SYMBOL_GPL(__wake_up_sync);     /* For internal use only */
2801 +
2802 +/**
2803 + * complete: - signals a single thread waiting on this completion
2804 + * @x:  holds the state of this particular completion
2805 + *
2806 + * This will wake up a single thread waiting on this completion. Threads will be
2807 + * awakened in the same order in which they were queued.
2808 + *
2809 + * See also complete_all(), wait_for_completion() and related routines.
2810 + *
2811 + * It may be assumed that this function implies a write memory barrier before
2812 + * changing the task state if and only if any tasks are woken up.
2813 + */
2814 +void complete(struct completion *x)
2815 +{
2816 +       unsigned long flags;
2817 +
2818 +       spin_lock_irqsave(&x->wait.lock, flags);
2819 +       x->done++;
2820 +       __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
2821 +       spin_unlock_irqrestore(&x->wait.lock, flags);
2822 +}
2823 +EXPORT_SYMBOL(complete);
2824 +
2825 +/**
2826 + * complete_all: - signals all threads waiting on this completion
2827 + * @x:  holds the state of this particular completion
2828 + *
2829 + * This will wake up all threads waiting on this particular completion event.
2830 + *
2831 + * It may be assumed that this function implies a write memory barrier before
2832 + * changing the task state if and only if any tasks are woken up.
2833 + */
2834 +void complete_all(struct completion *x)
2835 +{
2836 +       unsigned long flags;
2837 +
2838 +       spin_lock_irqsave(&x->wait.lock, flags);
2839 +       x->done += UINT_MAX/2;
2840 +       __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
2841 +       spin_unlock_irqrestore(&x->wait.lock, flags);
2842 +}
2843 +EXPORT_SYMBOL(complete_all);
2844 +
2845 +static inline long __sched
2846 +do_wait_for_common(struct completion *x, long timeout, int state)
2847 +{
2848 +       if (!x->done) {
2849 +               DECLARE_WAITQUEUE(wait, current);
2850 +
2851 +               wait.flags |= WQ_FLAG_EXCLUSIVE;
2852 +               __add_wait_queue_tail(&x->wait, &wait);
2853 +               do {
2854 +                       if (signal_pending_state(state, current)) {
2855 +                               timeout = -ERESTARTSYS;
2856 +                               break;
2857 +                       }
2858 +                       __set_current_state(state);
2859 +                       spin_unlock_irq(&x->wait.lock);
2860 +                       timeout = schedule_timeout(timeout);
2861 +                       spin_lock_irq(&x->wait.lock);
2862 +               } while (!x->done && timeout);
2863 +               __remove_wait_queue(&x->wait, &wait);
2864 +               if (!x->done)
2865 +                       return timeout;
2866 +       }
2867 +       x->done--;
2868 +       return timeout ?: 1;
2869 +}
2870 +
2871 +static long __sched
2872 +wait_for_common(struct completion *x, long timeout, int state)
2873 +{
2874 +       might_sleep();
2875 +
2876 +       spin_lock_irq(&x->wait.lock);
2877 +       timeout = do_wait_for_common(x, timeout, state);
2878 +       spin_unlock_irq(&x->wait.lock);
2879 +       return timeout;
2880 +}
2881 +
2882 +/**
2883 + * wait_for_completion: - waits for completion of a task
2884 + * @x:  holds the state of this particular completion
2885 + *
2886 + * This waits to be signaled for completion of a specific task. It is NOT
2887 + * interruptible and there is no timeout.
2888 + *
2889 + * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
2890 + * and interrupt capability. Also see complete().
2891 + */
2892 +void __sched wait_for_completion(struct completion *x)
2893 +{
2894 +       wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
2895 +}
2896 +EXPORT_SYMBOL(wait_for_completion);
2897 +
2898 +/**
2899 + * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
2900 + * @x:  holds the state of this particular completion
2901 + * @timeout:  timeout value in jiffies
2902 + *
2903 + * This waits for either a completion of a specific task to be signaled or for a
2904 + * specified timeout to expire. The timeout is in jiffies. It is not
2905 + * interruptible.
2906 + */
2907 +unsigned long __sched
2908 +wait_for_completion_timeout(struct completion *x, unsigned long timeout)
2909 +{
2910 +       return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
2911 +}
2912 +EXPORT_SYMBOL(wait_for_completion_timeout);
2913 +
2914 +/**
2915 + * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
2916 + * @x:  holds the state of this particular completion
2917 + *
2918 + * This waits for completion of a specific task to be signaled. It is
2919 + * interruptible.
2920 + */
2921 +int __sched wait_for_completion_interruptible(struct completion *x)
2922 +{
2923 +       long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
2924 +       if (t == -ERESTARTSYS)
2925 +               return t;
2926 +       return 0;
2927 +}
2928 +EXPORT_SYMBOL(wait_for_completion_interruptible);
2929 +
2930 +/**
2931 + * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
2932 + * @x:  holds the state of this particular completion
2933 + * @timeout:  timeout value in jiffies
2934 + *
2935 + * This waits for either a completion of a specific task to be signaled or for a
2936 + * specified timeout to expire. It is interruptible. The timeout is in jiffies.
2937 + */
2938 +unsigned long __sched
2939 +wait_for_completion_interruptible_timeout(struct completion *x,
2940 +                                         unsigned long timeout)
2941 +{
2942 +       return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
2943 +}
2944 +EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
2945 +
2946 +/**
2947 + * wait_for_completion_killable: - waits for completion of a task (killable)
2948 + * @x:  holds the state of this particular completion
2949 + *
2950 + * This waits to be signaled for completion of a specific task. It can be
2951 + * interrupted by a kill signal.
2952 + */
2953 +int __sched wait_for_completion_killable(struct completion *x)
2954 +{
2955 +       long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
2956 +       if (t == -ERESTARTSYS)
2957 +               return t;
2958 +       return 0;
2959 +}
2960 +EXPORT_SYMBOL(wait_for_completion_killable);
2961 +
2962 +/**
2963 + *     try_wait_for_completion - try to decrement a completion without blocking
2964 + *     @x:     completion structure
2965 + *
2966 + *     Returns: 0 if a decrement cannot be done without blocking
2967 + *              1 if a decrement succeeded.
2968 + *
2969 + *     If a completion is being used as a counting completion,
2970 + *     attempt to decrement the counter without blocking. This
2971 + *     enables us to avoid waiting if the resource the completion
2972 + *     is protecting is not available.
2973 + */
2974 +bool try_wait_for_completion(struct completion *x)
2975 +{
2976 +       int ret = 1;
2977 +
2978 +       spin_lock_irq(&x->wait.lock);
2979 +       if (!x->done)
2980 +               ret = 0;
2981 +       else
2982 +               x->done--;
2983 +       spin_unlock_irq(&x->wait.lock);
2984 +       return ret;
2985 +}
2986 +EXPORT_SYMBOL(try_wait_for_completion);
2987 +
2988 +/**
2989 + *     completion_done - Test to see if a completion has any waiters
2990 + *     @x:     completion structure
2991 + *
2992 + *     Returns: 0 if there are waiters (wait_for_completion() in progress)
2993 + *              1 if there are no waiters.
2994 + *
2995 + */
2996 +bool completion_done(struct completion *x)
2997 +{
2998 +       int ret = 1;
2999 +
3000 +       spin_lock_irq(&x->wait.lock);
3001 +       if (!x->done)
3002 +               ret = 0;
3003 +       spin_unlock_irq(&x->wait.lock);
3004 +       return ret;
3005 +}
3006 +EXPORT_SYMBOL(completion_done);
3007 +
3008 +static long __sched
3009 +sleep_on_common(wait_queue_head_t *q, int state, long timeout)
3010 +{
3011 +       unsigned long flags;
3012 +       wait_queue_t wait;
3013 +
3014 +       init_waitqueue_entry(&wait, current);
3015 +
3016 +       __set_current_state(state);
3017 +
3018 +       spin_lock_irqsave(&q->lock, flags);
3019 +       __add_wait_queue(q, &wait);
3020 +       spin_unlock(&q->lock);
3021 +       timeout = schedule_timeout(timeout);
3022 +       spin_lock_irq(&q->lock);
3023 +       __remove_wait_queue(q, &wait);
3024 +       spin_unlock_irqrestore(&q->lock, flags);
3025 +
3026 +       return timeout;
3027 +}
3028 +
3029 +void __sched interruptible_sleep_on(wait_queue_head_t *q)
3030 +{
3031 +       sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
3032 +}
3033 +EXPORT_SYMBOL(interruptible_sleep_on);
3034 +
3035 +long __sched
3036 +interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
3037 +{
3038 +       return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
3039 +}
3040 +EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3041 +
3042 +void __sched sleep_on(wait_queue_head_t *q)
3043 +{
3044 +       sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
3045 +}
3046 +EXPORT_SYMBOL(sleep_on);
3047 +
3048 +long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
3049 +{
3050 +       return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
3051 +}
3052 +EXPORT_SYMBOL(sleep_on_timeout);
3053 +
3054 +#ifdef CONFIG_RT_MUTEXES
3055 +
3056 +/*
3057 + * rt_mutex_setprio - set the current priority of a task
3058 + * @p: task
3059 + * @prio: prio value (kernel-internal form)
3060 + *
3061 + * This function changes the 'effective' priority of a task. It does
3062 + * not touch ->normal_prio like __setscheduler().
3063 + *
3064 + * Used by the rt_mutex code to implement priority inheritance logic.
3065 + */
3066 +void rt_mutex_setprio(struct task_struct *p, int prio)
3067 +{
3068 +       unsigned long flags;
3069 +       int queued, oldprio;
3070 +       struct rq *rq;
3071 +
3072 +       BUG_ON(prio < 0 || prio > MAX_PRIO);
3073 +
3074 +       rq = time_task_grq_lock(p, &flags);
3075 +
3076 +       oldprio = p->prio;
3077 +       queued = task_queued_only(p);
3078 +       if (queued)
3079 +               dequeue_task(p);
3080 +       p->prio = prio;
3081 +       if (task_running(p) && prio > oldprio)
3082 +               resched_task(p);
3083 +       if (queued) {
3084 +               enqueue_task(p);
3085 +               try_preempt(p);
3086 +       }
3087 +
3088 +       task_grq_unlock(&flags);
3089 +}
3090 +
3091 +#endif
3092 +
3093 +/*
3094 + * Adjust the deadline for when the priority is to change, before it's
3095 + * changed.
3096 + */
3097 +static void adjust_deadline(struct task_struct *p, int new_prio)
3098 +{
3099 +       p->deadline += (prio_ratios[USER_PRIO(new_prio)] - pratio(p)) *
3100 +                       rr_interval * HZ / 1000 / 100;
3101 +}
3102 +
3103 +void set_user_nice(struct task_struct *p, long nice)
3104 +{
3105 +       int queued, new_static;
3106 +       unsigned long flags;
3107 +       struct rq *rq;
3108 +
3109 +       if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3110 +               return;
3111 +       new_static = NICE_TO_PRIO(nice);
3112 +       /*
3113 +        * We have to be careful, if called from sys_setpriority(),
3114 +        * the task might be in the middle of scheduling on another CPU.
3115 +        */
3116 +       rq = time_task_grq_lock(p, &flags);
3117 +       /*
3118 +        * The RT priorities are set via sched_setscheduler(), but we still
3119 +        * allow the 'normal' nice value to be set - but as expected
3120 +        * it wont have any effect on scheduling until the task is
3121 +        * not SCHED_NORMAL/SCHED_BATCH:
3122 +        */
3123 +       if (has_rt_policy(p)) {
3124 +               p->static_prio = new_static;
3125 +               goto out_unlock;
3126 +       }
3127 +       queued = task_queued_only(p);
3128 +       /*
3129 +        * If p is actually running, we don't need to do anything when
3130 +        * changing the priority because the grq is unaffected.
3131 +        */
3132 +       if (queued)
3133 +               dequeue_task(p);
3134 +
3135 +       adjust_deadline(p, new_static);
3136 +       p->static_prio = new_static;
3137 +       p->prio = effective_prio(p);
3138 +
3139 +       if (queued) {
3140 +               enqueue_task(p);
3141 +               try_preempt(p);
3142 +       }
3143 +
3144 +       /* Just resched the task, schedule() will know what to do. */
3145 +       if (task_running(p))
3146 +               resched_task(p);
3147 +out_unlock:
3148 +       task_grq_unlock(&flags);
3149 +}
3150 +EXPORT_SYMBOL(set_user_nice);
3151 +
3152 +/*
3153 + * can_nice - check if a task can reduce its nice value
3154 + * @p: task
3155 + * @nice: nice value
3156 + */
3157 +int can_nice(const struct task_struct *p, const int nice)
3158 +{
3159 +       /* convert nice value [19,-20] to rlimit style value [1,40] */
3160 +       int nice_rlim = 20 - nice;
3161 +
3162 +       return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
3163 +               capable(CAP_SYS_NICE));
3164 +}
3165 +
3166 +#ifdef __ARCH_WANT_SYS_NICE
3167 +
3168 +/*
3169 + * sys_nice - change the priority of the current process.
3170 + * @increment: priority increment
3171 + *
3172 + * sys_setpriority is a more generic, but much slower function that
3173 + * does similar things.
3174 + */
3175 +SYSCALL_DEFINE1(nice, int, increment)
3176 +{
3177 +       long nice, retval;
3178 +
3179 +       /*
3180 +        * Setpriority might change our priority at the same moment.
3181 +        * We don't have to worry. Conceptually one call occurs first
3182 +        * and we have a single winner.
3183 +        */
3184 +       if (increment < -40)
3185 +               increment = -40;
3186 +       if (increment > 40)
3187 +               increment = 40;
3188 +
3189 +       nice = TASK_NICE(current) + increment;
3190 +       if (nice < -20)
3191 +               nice = -20;
3192 +       if (nice > 19)
3193 +               nice = 19;
3194 +
3195 +       if (increment < 0 && !can_nice(current, nice))
3196 +               return -EPERM;
3197 +
3198 +       retval = security_task_setnice(current, nice);
3199 +       if (retval)
3200 +               return retval;
3201 +
3202 +       set_user_nice(current, nice);
3203 +       return 0;
3204 +}
3205 +
3206 +#endif
3207 +
3208 +/**
3209 + * task_prio - return the priority value of a given task.
3210 + * @p: the task in question.
3211 + *
3212 + * This is the priority value as seen by users in /proc.
3213 + * RT tasks are offset by -100. Normal tasks are centered
3214 + * around 1, value goes from 0 (SCHED_ISO) up to 82 (nice +19
3215 + * SCHED_IDLE).
3216 + */
3217 +int task_prio(const struct task_struct *p)
3218 +{
3219 +       int delta, prio = p->prio - MAX_RT_PRIO;
3220 +
3221 +       /* rt tasks and iso tasks */
3222 +       if (prio <= 0)
3223 +               goto out;
3224 +
3225 +       delta = (p->deadline - jiffies) * 40 / longest_deadline();
3226 +       if (delta > 0 && delta <= 80)
3227 +               prio += delta;
3228 +out:
3229 +       return prio;
3230 +}
3231 +
3232 +/**
3233 + * task_nice - return the nice value of a given task.
3234 + * @p: the task in question.
3235 + */
3236 +int task_nice(const struct task_struct *p)
3237 +{
3238 +       return TASK_NICE(p);
3239 +}
3240 +EXPORT_SYMBOL_GPL(task_nice);
3241 +
3242 +/**
3243 + * idle_cpu - is a given cpu idle currently?
3244 + * @cpu: the processor in question.
3245 + */
3246 +int idle_cpu(int cpu)
3247 +{
3248 +       return cpu_curr(cpu) == cpu_rq(cpu)->idle;
3249 +}
3250 +
3251 +/**
3252 + * idle_task - return the idle task for a given cpu.
3253 + * @cpu: the processor in question.
3254 + */
3255 +struct task_struct *idle_task(int cpu)
3256 +{
3257 +       return cpu_rq(cpu)->idle;
3258 +}
3259 +
3260 +/**
3261 + * find_process_by_pid - find a process with a matching PID value.
3262 + * @pid: the pid in question.
3263 + */
3264 +static inline struct task_struct *find_process_by_pid(pid_t pid)
3265 +{
3266 +       return pid ? find_task_by_vpid(pid) : current;
3267 +}
3268 +
3269 +/* Actually do priority change: must hold grq lock. */
3270 +static void __setscheduler(struct task_struct *p, int policy, int prio)
3271 +{
3272 +       BUG_ON(task_queued_only(p));
3273 +
3274 +       p->policy = policy;
3275 +       p->rt_priority = prio;
3276 +       p->normal_prio = normal_prio(p);
3277 +       /* we are holding p->pi_lock already */
3278 +       p->prio = rt_mutex_getprio(p);
3279 +       /*
3280 +        * Reschedule if running. schedule() will know if it can continue
3281 +        * running or not.
3282 +        */
3283 +       if (task_running(p))
3284 +               resched_task(p);
3285 +}
3286 +
3287 +/*
3288 + * check the target process has a UID that matches the current process's
3289 + */
3290 +static bool check_same_owner(struct task_struct *p)
3291 +{
3292 +       const struct cred *cred = current_cred(), *pcred;
3293 +       bool match;
3294 +
3295 +       rcu_read_lock();
3296 +       pcred = __task_cred(p);
3297 +       match = (cred->euid == pcred->euid ||
3298 +                cred->euid == pcred->uid);
3299 +       rcu_read_unlock();
3300 +       return match;
3301 +}
3302 +
3303 +static int __sched_setscheduler(struct task_struct *p, int policy,
3304 +                      struct sched_param *param, bool user)
3305 +{
3306 +       struct sched_param zero_param = { .sched_priority = 0 };
3307 +       int queued, retval, oldprio, oldpolicy = -1;
3308 +       unsigned long flags, rlim_rtprio = 0;
3309 +       struct rq *rq;
3310 +
3311 +       /* may grab non-irq protected spin_locks */
3312 +       BUG_ON(in_interrupt());
3313 +
3314 +       if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) {
3315 +               unsigned long lflags;
3316 +
3317 +               if (!lock_task_sighand(p, &lflags))
3318 +                       return -ESRCH;
3319 +               rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
3320 +               unlock_task_sighand(p, &lflags);
3321 +               if (rlim_rtprio)
3322 +                       goto recheck;
3323 +               /*
3324 +                * If the caller requested an RT policy without having the
3325 +                * necessary rights, we downgrade the policy to SCHED_ISO.
3326 +                * We also set the parameter to zero to pass the checks.
3327 +                */
3328 +               policy = SCHED_ISO;
3329 +               param = &zero_param;
3330 +       }
3331 +recheck:
3332 +       /* double check policy once rq lock held */
3333 +       if (policy < 0)
3334 +               policy = oldpolicy = p->policy;
3335 +       else if (!SCHED_RANGE(policy))
3336 +               return -EINVAL;
3337 +       /*
3338 +        * Valid priorities for SCHED_FIFO and SCHED_RR are
3339 +        * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
3340 +        * SCHED_BATCH is 0.
3341 +        */
3342 +       if (param->sched_priority < 0 ||
3343 +           (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
3344 +           (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
3345 +               return -EINVAL;
3346 +       if (is_rt_policy(policy) != (param->sched_priority != 0))
3347 +               return -EINVAL;
3348 +
3349 +       /*
3350 +        * Allow unprivileged RT tasks to decrease priority:
3351 +        */
3352 +       if (user && !capable(CAP_SYS_NICE)) {
3353 +               if (is_rt_policy(policy)) {
3354 +                       /* can't set/change the rt policy */
3355 +                       if (policy != p->policy && !rlim_rtprio)
3356 +                               return -EPERM;
3357 +
3358 +                       /* can't increase priority */
3359 +                       if (param->sched_priority > p->rt_priority &&
3360 +                           param->sched_priority > rlim_rtprio)
3361 +                               return -EPERM;
3362 +               } else {
3363 +                       switch (p->policy) {
3364 +                               /*
3365 +                                * Can only downgrade policies but not back to
3366 +                                * SCHED_NORMAL
3367 +                                */
3368 +                               case SCHED_ISO:
3369 +                                       if (policy == SCHED_ISO)
3370 +                                               goto out;
3371 +                                       if (policy == SCHED_NORMAL)
3372 +                                               return -EPERM;
3373 +                                       break;
3374 +                               case SCHED_BATCH:
3375 +                                       if (policy == SCHED_BATCH)
3376 +                                               goto out;
3377 +                                       if (policy != SCHED_IDLE)
3378 +                                               return -EPERM;
3379 +                                       break;
3380 +                               case SCHED_IDLE:
3381 +                                       if (policy == SCHED_IDLE)
3382 +                                               goto out;
3383 +                                       return -EPERM;
3384 +                               default:
3385 +                                       break;
3386 +                       }
3387 +               }
3388 +
3389 +               /* can't change other user's priorities */
3390 +               if (!check_same_owner(p))
3391 +                       return -EPERM;
3392 +       }
3393 +
3394 +       retval = security_task_setscheduler(p, policy, param);
3395 +       if (retval)
3396 +               return retval;
3397 +       /*
3398 +        * make sure no PI-waiters arrive (or leave) while we are
3399 +        * changing the priority of the task:
3400 +        */
3401 +       spin_lock_irqsave(&p->pi_lock, flags);
3402 +       /*
3403 +        * To be able to change p->policy safely, the apropriate
3404 +        * runqueue lock must be held.
3405 +        */
3406 +       rq = __task_grq_lock(p);
3407 +       /* recheck policy now with rq lock held */
3408 +       if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
3409 +               __task_grq_unlock();
3410 +               spin_unlock_irqrestore(&p->pi_lock, flags);
3411 +               policy = oldpolicy = -1;
3412 +               goto recheck;
3413 +       }
3414 +       update_rq_clock(rq);
3415 +       queued = task_queued_only(p);
3416 +       if (queued)
3417 +               dequeue_task(p);
3418 +       oldprio = p->prio;
3419 +       __setscheduler(p, policy, param->sched_priority);
3420 +       if (queued) {
3421 +               enqueue_task(p);
3422 +               try_preempt(p);
3423 +       }
3424 +       __task_grq_unlock();
3425 +       spin_unlock_irqrestore(&p->pi_lock, flags);
3426 +
3427 +       rt_mutex_adjust_pi(p);
3428 +out:
3429 +       return 0;
3430 +}
3431 +
3432 +/**
3433 + * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
3434 + * @p: the task in question.
3435 + * @policy: new policy.
3436 + * @param: structure containing the new RT priority.
3437 + *
3438 + * NOTE that the task may be already dead.
3439 + */
3440 +int sched_setscheduler(struct task_struct *p, int policy,
3441 +                      struct sched_param *param)
3442 +{
3443 +       return __sched_setscheduler(p, policy, param, true);
3444 +}
3445 +
3446 +EXPORT_SYMBOL_GPL(sched_setscheduler);
3447 +
3448 +/**
3449 + * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
3450 + * @p: the task in question.
3451 + * @policy: new policy.
3452 + * @param: structure containing the new RT priority.
3453 + *
3454 + * Just like sched_setscheduler, only don't bother checking if the
3455 + * current context has permission.  For example, this is needed in
3456 + * stop_machine(): we create temporary high priority worker threads,
3457 + * but our caller might not have that capability.
3458 + */
3459 +int sched_setscheduler_nocheck(struct task_struct *p, int policy,
3460 +                              struct sched_param *param)
3461 +{
3462 +       return __sched_setscheduler(p, policy, param, false);
3463 +}
3464 +
3465 +static int
3466 +do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
3467 +{
3468 +       struct sched_param lparam;
3469 +       struct task_struct *p;
3470 +       int retval;
3471 +
3472 +       if (!param || pid < 0)
3473 +               return -EINVAL;
3474 +       if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
3475 +               return -EFAULT;
3476 +
3477 +       rcu_read_lock();
3478 +       retval = -ESRCH;
3479 +       p = find_process_by_pid(pid);
3480 +       if (p != NULL)
3481 +               retval = sched_setscheduler(p, policy, &lparam);
3482 +       rcu_read_unlock();
3483 +
3484 +       return retval;
3485 +}
3486 +
3487 +/**
3488 + * sys_sched_setscheduler - set/change the scheduler policy and RT priority
3489 + * @pid: the pid in question.
3490 + * @policy: new policy.
3491 + * @param: structure containing the new RT priority.
3492 + */
3493 +asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
3494 +                                      struct sched_param __user *param)
3495 +{
3496 +       /* negative values for policy are not valid */
3497 +       if (policy < 0)
3498 +               return -EINVAL;
3499 +
3500 +       return do_sched_setscheduler(pid, policy, param);
3501 +}
3502 +
3503 +/**
3504 + * sys_sched_setparam - set/change the RT priority of a thread
3505 + * @pid: the pid in question.
3506 + * @param: structure containing the new RT priority.
3507 + */
3508 +SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
3509 +{
3510 +       return do_sched_setscheduler(pid, -1, param);
3511 +}
3512 +
3513 +/**
3514 + * sys_sched_getscheduler - get the policy (scheduling class) of a thread
3515 + * @pid: the pid in question.
3516 + */
3517 +SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
3518 +{
3519 +       struct task_struct *p;
3520 +       int retval = -EINVAL;
3521 +
3522 +       if (pid < 0)
3523 +               goto out_nounlock;
3524 +
3525 +       retval = -ESRCH;
3526 +       read_lock(&tasklist_lock);
3527 +       p = find_process_by_pid(pid);
3528 +       if (p) {
3529 +               retval = security_task_getscheduler(p);
3530 +               if (!retval)
3531 +                       retval = p->policy;
3532 +       }
3533 +       read_unlock(&tasklist_lock);
3534 +
3535 +out_nounlock:
3536 +       return retval;
3537 +}
3538 +
3539 +/**
3540 + * sys_sched_getscheduler - get the RT priority of a thread
3541 + * @pid: the pid in question.
3542 + * @param: structure containing the RT priority.
3543 + */
3544 +SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
3545 +{
3546 +       struct sched_param lp;
3547 +       struct task_struct *p;
3548 +       int retval = -EINVAL;
3549 +
3550 +       if (!param || pid < 0)
3551 +               goto out_nounlock;
3552 +
3553 +       read_lock(&tasklist_lock);
3554 +       p = find_process_by_pid(pid);
3555 +       retval = -ESRCH;
3556 +       if (!p)
3557 +               goto out_unlock;
3558 +
3559 +       retval = security_task_getscheduler(p);
3560 +       if (retval)
3561 +               goto out_unlock;
3562 +
3563 +       lp.sched_priority = p->rt_priority;
3564 +       read_unlock(&tasklist_lock);
3565 +
3566 +       /*
3567 +        * This one might sleep, we cannot do it with a spinlock held ...
3568 +        */
3569 +       retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
3570 +
3571 +out_nounlock:
3572 +       return retval;
3573 +
3574 +out_unlock:
3575 +       read_unlock(&tasklist_lock);
3576 +       return retval;
3577 +}
3578 +
3579 +long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
3580 +{
3581 +       cpumask_var_t cpus_allowed, new_mask;
3582 +       struct task_struct *p;
3583 +       int retval;
3584 +
3585 +       get_online_cpus();
3586 +       read_lock(&tasklist_lock);
3587 +
3588 +       p = find_process_by_pid(pid);
3589 +       if (!p) {
3590 +               read_unlock(&tasklist_lock);
3591 +               put_online_cpus();
3592 +               return -ESRCH;
3593 +       }
3594 +
3595 +       /*
3596 +        * It is not safe to call set_cpus_allowed with the
3597 +        * tasklist_lock held. We will bump the task_struct's
3598 +        * usage count and then drop tasklist_lock.
3599 +        */
3600 +       get_task_struct(p);
3601 +       read_unlock(&tasklist_lock);
3602 +
3603 +       if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
3604 +               retval = -ENOMEM;
3605 +               goto out_put_task;
3606 +       }
3607 +       if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
3608 +               retval = -ENOMEM;
3609 +               goto out_free_cpus_allowed;
3610 +       }
3611 +       retval = -EPERM;
3612 +       if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
3613 +               goto out_unlock;
3614 +
3615 +       retval = security_task_setscheduler(p, 0, NULL);
3616 +       if (retval)
3617 +               goto out_unlock;
3618 +
3619 +       cpuset_cpus_allowed(p, cpus_allowed);
3620 +       cpumask_and(new_mask, in_mask, cpus_allowed);
3621 +again:
3622 +       retval = set_cpus_allowed_ptr(p, new_mask);
3623 +
3624 +       if (!retval) {
3625 +               cpuset_cpus_allowed(p, cpus_allowed);
3626 +               if (!cpumask_subset(new_mask, cpus_allowed)) {
3627 +                       /*
3628 +                        * We must have raced with a concurrent cpuset
3629 +                        * update. Just reset the cpus_allowed to the
3630 +                        * cpuset's cpus_allowed
3631 +                        */
3632 +                       cpumask_copy(new_mask, cpus_allowed);
3633 +                       goto again;
3634 +               }
3635 +       }
3636 +out_unlock:
3637 +       free_cpumask_var(new_mask);
3638 +out_free_cpus_allowed:
3639 +       free_cpumask_var(cpus_allowed);
3640 +out_put_task:
3641 +       put_task_struct(p);
3642 +       put_online_cpus();
3643 +       return retval;
3644 +}
3645 +
3646 +static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
3647 +                            cpumask_t *new_mask)
3648 +{
3649 +       if (len < sizeof(cpumask_t)) {
3650 +               memset(new_mask, 0, sizeof(cpumask_t));
3651 +       } else if (len > sizeof(cpumask_t)) {
3652 +               len = sizeof(cpumask_t);
3653 +       }
3654 +       return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
3655 +}
3656 +
3657 +
3658 +/**
3659 + * sys_sched_setaffinity - set the cpu affinity of a process
3660 + * @pid: pid of the process
3661 + * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3662 + * @user_mask_ptr: user-space pointer to the new cpu mask
3663 + */
3664 +SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
3665 +               unsigned long __user *, user_mask_ptr)
3666 +{
3667 +       cpumask_var_t new_mask;
3668 +       int retval;
3669 +
3670 +       if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
3671 +               return -ENOMEM;
3672 +
3673 +       retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
3674 +       if (retval == 0)
3675 +               retval = sched_setaffinity(pid, new_mask);
3676 +       free_cpumask_var(new_mask);
3677 +       return retval;
3678 +}
3679 +
3680 +long sched_getaffinity(pid_t pid, cpumask_t *mask)
3681 +{
3682 +       struct task_struct *p;
3683 +       int retval;
3684 +
3685 +       mutex_lock(&sched_hotcpu_mutex);
3686 +       read_lock(&tasklist_lock);
3687 +
3688 +       retval = -ESRCH;
3689 +       p = find_process_by_pid(pid);
3690 +       if (!p)
3691 +               goto out_unlock;
3692 +
3693 +       retval = security_task_getscheduler(p);
3694 +       if (retval)
3695 +               goto out_unlock;
3696 +
3697 +       cpus_and(*mask, p->cpus_allowed, cpu_online_map);
3698 +
3699 +out_unlock:
3700 +       read_unlock(&tasklist_lock);
3701 +       mutex_unlock(&sched_hotcpu_mutex);
3702 +       if (retval)
3703 +               return retval;
3704 +
3705 +       return 0;
3706 +}
3707 +
3708 +/**
3709 + * sys_sched_getaffinity - get the cpu affinity of a process
3710 + * @pid: pid of the process
3711 + * @len: length in bytes of the bitmask pointed to by user_mask_ptr
3712 + * @user_mask_ptr: user-space pointer to hold the current cpu mask
3713 + */
3714 +SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
3715 +               unsigned long __user *, user_mask_ptr)
3716 +{
3717 +       int ret;
3718 +       cpumask_var_t mask;
3719 +
3720 +       if (len < cpumask_size())
3721 +               return -EINVAL;
3722 +
3723 +       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
3724 +               return -ENOMEM;
3725 +
3726 +       ret = sched_getaffinity(pid, mask);
3727 +       if (ret == 0) {
3728 +               if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
3729 +                       ret = -EFAULT;
3730 +               else
3731 +                       ret = cpumask_size();
3732 +       }
3733 +       free_cpumask_var(mask);
3734 +
3735 +       return ret;
3736 +}
3737 +
3738 +/**
3739 + * sys_sched_yield - yield the current processor to other threads.
3740 + *
3741 + * This function yields the current CPU to other tasks. It does this by
3742 + * refilling the timeslice, resetting the deadline and scheduling away.
3743 + */
3744 +SYSCALL_DEFINE0(sched_yield)
3745 +{
3746 +       struct task_struct *p;
3747 +
3748 +       grq_lock_irq();
3749 +       p = current;
3750 +       schedstat_inc(this_rq(), yld_count);
3751 +       update_rq_clock(task_rq(p));
3752 +       time_slice_expired(p);
3753 +       requeue_task(p);
3754 +
3755 +       /*
3756 +        * Since we are going to call schedule() anyway, there's
3757 +        * no need to preempt or enable interrupts:
3758 +        */
3759 +       __release(grq.lock);
3760 +       spin_release(&grq.lock.dep_map, 1, _THIS_IP_);
3761 +       _raw_spin_unlock(&grq.lock);
3762 +       preempt_enable_no_resched();
3763 +
3764 +       schedule();
3765 +
3766 +       return 0;
3767 +}
3768 +
3769 +static inline int should_resched(void)
3770 +{
3771 +       return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
3772 +}
3773 +
3774 +static void __cond_resched(void)
3775 +{
3776 +#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
3777 +       __might_sleep(__FILE__, __LINE__);
3778 +#endif
3779 +       /*
3780 +        * The BKS might be reacquired before we have dropped
3781 +        * PREEMPT_ACTIVE, which could trigger a second
3782 +        * cond_resched() call.
3783 +        */
3784 +       do {
3785 +               add_preempt_count(PREEMPT_ACTIVE);
3786 +               schedule();
3787 +               sub_preempt_count(PREEMPT_ACTIVE);
3788 +       } while (need_resched());
3789 +}
3790 +
3791 +int __sched _cond_resched(void)
3792 +{
3793 +       if (should_resched()) {
3794 +               __cond_resched();
3795 +               return 1;
3796 +       }
3797 +       return 0;
3798 +}
3799 +EXPORT_SYMBOL(_cond_resched);
3800 +
3801 +/*
3802 + * cond_resched_lock() - if a reschedule is pending, drop the given lock,
3803 + * call schedule, and on return reacquire the lock.
3804 + *
3805 + * This works OK both with and without CONFIG_PREEMPT.  We do strange low-level
3806 + * operations here to prevent schedule() from being called twice (once via
3807 + * spin_unlock(), once by hand).
3808 + */
3809 +int cond_resched_lock(spinlock_t *lock)
3810 +{
3811 +       int resched = should_resched();
3812 +       int ret = 0;
3813 +
3814 +       if (spin_needbreak(lock) || resched) {
3815 +               spin_unlock(lock);
3816 +               if (resched)
3817 +                       __cond_resched();
3818 +               else
3819 +                       cpu_relax();
3820 +               ret = 1;
3821 +               spin_lock(lock);
3822 +       }
3823 +       return ret;
3824 +}
3825 +EXPORT_SYMBOL(cond_resched_lock);
3826 +
3827 +int __sched cond_resched_softirq(void)
3828 +{
3829 +       BUG_ON(!in_softirq());
3830 +
3831 +       if (should_resched()) {
3832 +               local_bh_enable();
3833 +               __cond_resched();
3834 +               local_bh_disable();
3835 +               return 1;
3836 +       }
3837 +       return 0;
3838 +}
3839 +EXPORT_SYMBOL(cond_resched_softirq);
3840 +
3841 +/**
3842 + * yield - yield the current processor to other threads.
3843 + *
3844 + * This is a shortcut for kernel-space yielding - it marks the
3845 + * thread runnable and calls sys_sched_yield().
3846 + */
3847 +void __sched yield(void)
3848 +{
3849 +       set_current_state(TASK_RUNNING);
3850 +       sys_sched_yield();
3851 +}
3852 +EXPORT_SYMBOL(yield);
3853 +
3854 +/*
3855 + * This task is about to go to sleep on IO.  Increment rq->nr_iowait so
3856 + * that process accounting knows that this is a task in IO wait state.
3857 + *
3858 + * But don't do that if it is a deliberate, throttling IO wait (this task
3859 + * has set its backing_dev_info: the queue against which it should throttle)
3860 + */
3861 +void __sched io_schedule(void)
3862 +{
3863 +       struct rq *rq = &__raw_get_cpu_var(runqueues);
3864 +
3865 +       delayacct_blkio_start();
3866 +       atomic_inc(&rq->nr_iowait);
3867 +       schedule();
3868 +       atomic_dec(&rq->nr_iowait);
3869 +       delayacct_blkio_end();
3870 +}
3871 +EXPORT_SYMBOL(io_schedule);
3872 +
3873 +long __sched io_schedule_timeout(long timeout)
3874 +{
3875 +       struct rq *rq = &__raw_get_cpu_var(runqueues);
3876 +       long ret;
3877 +
3878 +       delayacct_blkio_start();
3879 +       atomic_inc(&rq->nr_iowait);
3880 +       ret = schedule_timeout(timeout);
3881 +       atomic_dec(&rq->nr_iowait);
3882 +       delayacct_blkio_end();
3883 +       return ret;
3884 +}
3885 +
3886 +/**
3887 + * sys_sched_get_priority_max - return maximum RT priority.
3888 + * @policy: scheduling class.
3889 + *
3890 + * this syscall returns the maximum rt_priority that can be used
3891 + * by a given scheduling class.
3892 + */
3893 +SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
3894 +{
3895 +       int ret = -EINVAL;
3896 +
3897 +       switch (policy) {
3898 +       case SCHED_FIFO:
3899 +       case SCHED_RR:
3900 +               ret = MAX_USER_RT_PRIO-1;
3901 +               break;
3902 +       case SCHED_NORMAL:
3903 +       case SCHED_BATCH:
3904 +       case SCHED_ISO:
3905 +       case SCHED_IDLE:
3906 +               ret = 0;
3907 +               break;
3908 +       }
3909 +       return ret;
3910 +}
3911 +
3912 +/**
3913 + * sys_sched_get_priority_min - return minimum RT priority.
3914 + * @policy: scheduling class.
3915 + *
3916 + * this syscall returns the minimum rt_priority that can be used
3917 + * by a given scheduling class.
3918 + */
3919 +SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
3920 +{
3921 +       int ret = -EINVAL;
3922 +
3923 +       switch (policy) {
3924 +       case SCHED_FIFO:
3925 +       case SCHED_RR:
3926 +               ret = 1;
3927 +               break;
3928 +       case SCHED_NORMAL:
3929 +       case SCHED_BATCH:
3930 +       case SCHED_ISO:
3931 +       case SCHED_IDLE:
3932 +               ret = 0;
3933 +               break;
3934 +       }
3935 +       return ret;
3936 +}
3937 +
3938 +/**
3939 + * sys_sched_rr_get_interval - return the default timeslice of a process.
3940 + * @pid: pid of the process.
3941 + * @interval: userspace pointer to the timeslice value.
3942 + *
3943 + * this syscall writes the default timeslice value of a given process
3944 + * into the user-space timespec buffer. A value of '0' means infinity.
3945 + */
3946 +SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
3947 +               struct timespec __user *, interval)
3948 +{
3949 +       struct task_struct *p;
3950 +       int retval = -EINVAL;
3951 +       struct timespec t;
3952 +
3953 +       if (pid < 0)
3954 +               goto out_nounlock;
3955 +
3956 +       retval = -ESRCH;
3957 +       read_lock(&tasklist_lock);
3958 +       p = find_process_by_pid(pid);
3959 +       if (!p)
3960 +               goto out_unlock;
3961 +
3962 +       retval = security_task_getscheduler(p);
3963 +       if (retval)
3964 +               goto out_unlock;
3965 +
3966 +       t = ns_to_timespec(p->policy == SCHED_FIFO ? 0 :
3967 +                          MS_TO_NS(task_timeslice(p)));
3968 +       read_unlock(&tasklist_lock);
3969 +       retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
3970 +out_nounlock:
3971 +       return retval;
3972 +out_unlock:
3973 +       read_unlock(&tasklist_lock);
3974 +       return retval;
3975 +}
3976 +
3977 +static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
3978 +
3979 +void sched_show_task(struct task_struct *p)
3980 +{
3981 +       unsigned long free = 0;
3982 +       unsigned state;
3983 +
3984 +       state = p->state ? __ffs(p->state) + 1 : 0;
3985 +       printk(KERN_INFO "%-13.13s %c", p->comm,
3986 +               state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
3987 +#if BITS_PER_LONG == 32
3988 +       if (state == TASK_RUNNING)
3989 +               printk(KERN_CONT " running  ");
3990 +       else
3991 +               printk(KERN_CONT " %08lx ", thread_saved_pc(p));
3992 +#else
3993 +       if (state == TASK_RUNNING)
3994 +               printk(KERN_CONT "  running task    ");
3995 +       else
3996 +               printk(KERN_CONT " %016lx ", thread_saved_pc(p));
3997 +#endif
3998 +#ifdef CONFIG_DEBUG_STACK_USAGE
3999 +       free = stack_not_used(p);
4000 +#endif
4001 +       printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
4002 +               task_pid_nr(p), task_pid_nr(p->real_parent),
4003 +               (unsigned long)task_thread_info(p)->flags);
4004 +
4005 +       show_stack(p, NULL);
4006 +}
4007 +
4008 +void show_state_filter(unsigned long state_filter)
4009 +{
4010 +       struct task_struct *g, *p;
4011 +
4012 +#if BITS_PER_LONG == 32
4013 +       printk(KERN_INFO
4014 +               "  task                PC stack   pid father\n");
4015 +#else
4016 +       printk(KERN_INFO
4017 +               "  task                        PC stack   pid father\n");
4018 +#endif
4019 +       read_lock(&tasklist_lock);
4020 +       do_each_thread(g, p) {
4021 +               /*
4022 +                * reset the NMI-timeout, listing all files on a slow
4023 +                * console might take alot of time:
4024 +                */
4025 +               touch_nmi_watchdog();
4026 +               if (!state_filter || (p->state & state_filter))
4027 +                       sched_show_task(p);
4028 +       } while_each_thread(g, p);
4029 +
4030 +       touch_all_softlockup_watchdogs();
4031 +
4032 +       read_unlock(&tasklist_lock);
4033 +       /*
4034 +        * Only show locks if all tasks are dumped:
4035 +        */
4036 +       if (state_filter == -1)
4037 +               debug_show_all_locks();
4038 +}
4039 +
4040 +/**
4041 + * init_idle - set up an idle thread for a given CPU
4042 + * @idle: task in question
4043 + * @cpu: cpu the idle task belongs to
4044 + *
4045 + * NOTE: this function does not set the idle thread's NEED_RESCHED
4046 + * flag, to make booting more robust.
4047 + */
4048 +void __cpuinit init_idle(struct task_struct *idle, int cpu)
4049 +{
4050 +       struct rq *rq = cpu_rq(cpu);
4051 +       unsigned long flags;
4052 +
4053 +       time_grq_lock(rq, &flags);
4054 +       idle->timestamp = idle->last_ran = rq->clock;
4055 +       idle->state = TASK_RUNNING;
4056 +       /* Setting prio to illegal value shouldn't matter when never queued */
4057 +       idle->prio = rq->rq_prio = PRIO_LIMIT;
4058 +       rq->rq_deadline = idle->deadline;
4059 +       rq->rq_policy = idle->policy;
4060 +       rq->rq_time_slice = idle->rt.time_slice;
4061 +       idle->cpus_allowed = cpumask_of_cpu(cpu);
4062 +       set_task_cpu(idle, cpu);
4063 +       rq->curr = rq->idle = idle;
4064 +       idle->oncpu = 1;
4065 +       set_cpuidle_map(cpu);
4066 +#ifdef CONFIG_HOTPLUG_CPU
4067 +       idle->unplugged_mask = CPU_MASK_NONE;
4068 +#endif
4069 +       grq_unlock_irqrestore(&flags);
4070 +
4071 +       /* Set the preempt count _outside_ the spinlocks! */
4072 +#if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
4073 +       task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
4074 +#else
4075 +       task_thread_info(idle)->preempt_count = 0;
4076 +#endif
4077 +       ftrace_graph_init_task(idle);
4078 +}
4079 +
4080 +/*
4081 + * In a system that switches off the HZ timer nohz_cpu_mask
4082 + * indicates which cpus entered this state. This is used
4083 + * in the rcu update to wait only for active cpus. For system
4084 + * which do not switch off the HZ timer nohz_cpu_mask should
4085 + * always be CPU_BITS_NONE.
4086 + */
4087 +cpumask_var_t nohz_cpu_mask;
4088 +
4089 +#ifdef CONFIG_SMP
4090 +#ifdef CONFIG_NO_HZ
4091 +static struct {
4092 +       atomic_t load_balancer;
4093 +       cpumask_var_t cpu_mask;
4094 +       cpumask_var_t ilb_grp_nohz_mask;
4095 +} nohz ____cacheline_aligned = {
4096 +       .load_balancer = ATOMIC_INIT(-1),
4097 +};
4098 +
4099 +int get_nohz_load_balancer(void)
4100 +{
4101 +       return atomic_read(&nohz.load_balancer);
4102 +}
4103 +
4104 +/*
4105 + * This routine will try to nominate the ilb (idle load balancing)
4106 + * owner among the cpus whose ticks are stopped. ilb owner will do the idle
4107 + * load balancing on behalf of all those cpus. If all the cpus in the system
4108 + * go into this tickless mode, then there will be no ilb owner (as there is
4109 + * no need for one) and all the cpus will sleep till the next wakeup event
4110 + * arrives...
4111 + *
4112 + * For the ilb owner, tick is not stopped. And this tick will be used
4113 + * for idle load balancing. ilb owner will still be part of
4114 + * nohz.cpu_mask..
4115 + *
4116 + * While stopping the tick, this cpu will become the ilb owner if there
4117 + * is no other owner. And will be the owner till that cpu becomes busy
4118 + * or if all cpus in the system stop their ticks at which point
4119 + * there is no need for ilb owner.
4120 + *
4121 + * When the ilb owner becomes busy, it nominates another owner, during the
4122 + * next busy scheduler_tick()
4123 + */
4124 +int select_nohz_load_balancer(int stop_tick)
4125 +{
4126 +       int cpu = smp_processor_id();
4127 +
4128 +       if (stop_tick) {
4129 +               cpu_rq(cpu)->in_nohz_recently = 1;
4130 +
4131 +               if (!cpu_active(cpu)) {
4132 +                       if (atomic_read(&nohz.load_balancer) != cpu)
4133 +                               return 0;
4134 +
4135 +                       /*
4136 +                        * If we are going offline and still the leader,
4137 +                        * give up!
4138 +                        */
4139 +                       if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
4140 +                               BUG();
4141 +
4142 +                       return 0;
4143 +               }
4144 +
4145 +               cpumask_set_cpu(cpu, nohz.cpu_mask);
4146 +
4147 +               /* time for ilb owner also to sleep */
4148 +               if (cpumask_weight(nohz.cpu_mask) == num_online_cpus()) {
4149 +                       if (atomic_read(&nohz.load_balancer) == cpu)
4150 +                               atomic_set(&nohz.load_balancer, -1);
4151 +                       return 0;
4152 +               }
4153 +
4154 +               if (atomic_read(&nohz.load_balancer) == -1) {
4155 +                       /* make me the ilb owner */
4156 +                       if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
4157 +                               return 1;
4158 +               } else if (atomic_read(&nohz.load_balancer) == cpu)
4159 +                       return 1;
4160 +       } else {
4161 +               if (!cpumask_test_cpu(cpu, nohz.cpu_mask))
4162 +                       return 0;
4163 +
4164 +               cpumask_clear_cpu(cpu, nohz.cpu_mask);
4165 +
4166 +               if (atomic_read(&nohz.load_balancer) == cpu)
4167 +                       if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
4168 +                               BUG();
4169 +       }
4170 +       return 0;
4171 +}
4172 +
4173 +/*
4174 + * When add_timer_on() enqueues a timer into the timer wheel of an
4175 + * idle CPU then this timer might expire before the next timer event
4176 + * which is scheduled to wake up that CPU. In case of a completely
4177 + * idle system the next event might even be infinite time into the
4178 + * future. wake_up_idle_cpu() ensures that the CPU is woken up and
4179 + * leaves the inner idle loop so the newly added timer is taken into
4180 + * account when the CPU goes back to idle and evaluates the timer
4181 + * wheel for the next timer event.
4182 + */
4183 +void wake_up_idle_cpu(int cpu)
4184 +{
4185 +       struct task_struct *idle;
4186 +       struct rq *rq;
4187 +
4188 +       if (cpu == smp_processor_id())
4189 +               return;
4190 +
4191 +       rq = cpu_rq(cpu);
4192 +       idle = rq->idle;
4193 +
4194 +       /*
4195 +        * This is safe, as this function is called with the timer
4196 +        * wheel base lock of (cpu) held. When the CPU is on the way
4197 +        * to idle and has not yet set rq->curr to idle then it will
4198 +        * be serialized on the timer wheel base lock and take the new
4199 +        * timer into account automatically.
4200 +        */
4201 +       if (unlikely(rq->curr != idle))
4202 +               return;
4203 +
4204 +       /*
4205 +        * We can set TIF_RESCHED on the idle task of the other CPU
4206 +        * lockless. The worst case is that the other CPU runs the
4207 +        * idle task through an additional NOOP schedule()
4208 +        */
4209 +       set_tsk_need_resched(idle);
4210 +
4211 +       /* NEED_RESCHED must be visible before we test polling */
4212 +       smp_mb();
4213 +       if (!tsk_is_polling(idle))
4214 +               smp_send_reschedule(cpu);
4215 +}
4216 +
4217 +#endif /* CONFIG_NO_HZ */
4218 +
4219 +/*
4220 + * Change a given task's CPU affinity. Migrate the thread to a
4221 + * proper CPU and schedule it away if the CPU it's executing on
4222 + * is removed from the allowed bitmask.
4223 + *
4224 + * NOTE: the caller must have a valid reference to the task, the
4225 + * task must not exit() & deallocate itself prematurely. The
4226 + * call is not atomic; no spinlocks may be held.
4227 + */
4228 +int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
4229 +{
4230 +       unsigned long flags;
4231 +       int running = 0;
4232 +       int queued = 0;
4233 +       struct rq *rq;
4234 +       int ret = 0;
4235 +
4236 +       rq = task_grq_lock(p, &flags);
4237 +       if (!cpumask_intersects(new_mask, cpu_online_mask)) {
4238 +               ret = -EINVAL;
4239 +               goto out;
4240 +       }
4241 +
4242 +       if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
4243 +                    !cpumask_equal(&p->cpus_allowed, new_mask))) {
4244 +               ret = -EINVAL;
4245 +               goto out;
4246 +       }
4247 +
4248 +       queued = task_queued_only(p);
4249 +
4250 +       cpumask_copy(&p->cpus_allowed, new_mask);
4251 +       p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
4252 +
4253 +       /* Can the task run on the task's current CPU? If so, we're done */
4254 +       if (cpumask_test_cpu(task_cpu(p), new_mask))
4255 +               goto out;
4256 +
4257 +       /* Reschedule the task, schedule() will know if it can keep running */
4258 +       if (task_running(p))
4259 +               running = 1;
4260 +       else
4261 +               set_task_cpu(p, cpumask_any_and(cpu_online_mask, new_mask));
4262 +
4263 +out:
4264 +       if (queued)
4265 +               try_preempt(p);
4266 +       task_grq_unlock(&flags);
4267 +
4268 +       /* This might be a flaky way of changing cpus! */
4269 +       if (running)
4270 +               schedule();
4271 +       return ret;
4272 +}
4273 +EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
4274 +
4275 +#ifdef CONFIG_HOTPLUG_CPU
4276 +/* Schedules idle task to be the next runnable task on current CPU.
4277 + * It does so by boosting its priority to highest possible.
4278 + * Used by CPU offline code.
4279 + */
4280 +void sched_idle_next(void)
4281 +{
4282 +       int this_cpu = smp_processor_id();
4283 +       struct rq *rq = cpu_rq(this_cpu);
4284 +       struct task_struct *idle = rq->idle;
4285 +       unsigned long flags;
4286 +
4287 +       /* cpu has to be offline */
4288 +       BUG_ON(cpu_online(this_cpu));
4289 +
4290 +       /*
4291 +        * Strictly not necessary since rest of the CPUs are stopped by now
4292 +        * and interrupts disabled on the current cpu.
4293 +        */
4294 +       time_grq_lock(rq, &flags);
4295 +
4296 +       __setscheduler(idle, SCHED_FIFO, MAX_RT_PRIO - 1);
4297 +
4298 +       activate_idle_task(idle);
4299 +       set_tsk_need_resched(rq->curr);
4300 +
4301 +       grq_unlock_irqrestore(&flags);
4302 +}
4303 +
4304 +/*
4305 + * Ensures that the idle task is using init_mm right before its cpu goes
4306 + * offline.
4307 + */
4308 +void idle_task_exit(void)
4309 +{
4310 +       struct mm_struct *mm = current->active_mm;
4311 +
4312 +       BUG_ON(cpu_online(smp_processor_id()));
4313 +
4314 +       if (mm != &init_mm)
4315 +               switch_mm(mm, &init_mm, current);
4316 +       mmdrop(mm);
4317 +}
4318 +
4319 +#endif /* CONFIG_HOTPLUG_CPU */
4320 +
4321 +#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
4322 +
4323 +static struct ctl_table sd_ctl_dir[] = {
4324 +       {
4325 +               .procname       = "sched_domain",
4326 +               .mode           = 0555,
4327 +       },
4328 +       {0, },
4329 +};
4330 +
4331 +static struct ctl_table sd_ctl_root[] = {
4332 +       {
4333 +               .ctl_name       = CTL_KERN,
4334 +               .procname       = "kernel",
4335 +               .mode           = 0555,
4336 +               .child          = sd_ctl_dir,
4337 +       },
4338 +       {0, },
4339 +};
4340 +
4341 +static struct ctl_table *sd_alloc_ctl_entry(int n)
4342 +{
4343 +       struct ctl_table *entry =
4344 +               kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
4345 +
4346 +       return entry;
4347 +}
4348 +
4349 +static void sd_free_ctl_entry(struct ctl_table **tablep)
4350 +{
4351 +       struct ctl_table *entry;
4352 +
4353 +       /*
4354 +        * In the intermediate directories, both the child directory and
4355 +        * procname are dynamically allocated and could fail but the mode
4356 +        * will always be set. In the lowest directory the names are
4357 +        * static strings and all have proc handlers.
4358 +        */
4359 +       for (entry = *tablep; entry->mode; entry++) {
4360 +               if (entry->child)
4361 +                       sd_free_ctl_entry(&entry->child);
4362 +               if (entry->proc_handler == NULL)
4363 +                       kfree(entry->procname);
4364 +       }
4365 +
4366 +       kfree(*tablep);
4367 +       *tablep = NULL;
4368 +}
4369 +
4370 +static void
4371 +set_table_entry(struct ctl_table *entry,
4372 +               const char *procname, void *data, int maxlen,
4373 +               mode_t mode, proc_handler *proc_handler)
4374 +{
4375 +       entry->procname = procname;
4376 +       entry->data = data;
4377 +       entry->maxlen = maxlen;
4378 +       entry->mode = mode;
4379 +       entry->proc_handler = proc_handler;
4380 +}
4381 +
4382 +static struct ctl_table *
4383 +sd_alloc_ctl_domain_table(struct sched_domain *sd)
4384 +{
4385 +       struct ctl_table *table = sd_alloc_ctl_entry(13);
4386 +
4387 +       if (table == NULL)
4388 +               return NULL;
4389 +
4390 +       set_table_entry(&table[0], "min_interval", &sd->min_interval,
4391 +               sizeof(long), 0644, proc_doulongvec_minmax);
4392 +       set_table_entry(&table[1], "max_interval", &sd->max_interval,
4393 +               sizeof(long), 0644, proc_doulongvec_minmax);
4394 +       set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
4395 +               sizeof(int), 0644, proc_dointvec_minmax);
4396 +       set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
4397 +               sizeof(int), 0644, proc_dointvec_minmax);
4398 +       set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
4399 +               sizeof(int), 0644, proc_dointvec_minmax);
4400 +       set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
4401 +               sizeof(int), 0644, proc_dointvec_minmax);
4402 +       set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
4403 +               sizeof(int), 0644, proc_dointvec_minmax);
4404 +       set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
4405 +               sizeof(int), 0644, proc_dointvec_minmax);
4406 +       set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
4407 +               sizeof(int), 0644, proc_dointvec_minmax);
4408 +       set_table_entry(&table[9], "cache_nice_tries",
4409 +               &sd->cache_nice_tries,
4410 +               sizeof(int), 0644, proc_dointvec_minmax);
4411 +       set_table_entry(&table[10], "flags", &sd->flags,
4412 +               sizeof(int), 0644, proc_dointvec_minmax);
4413 +       set_table_entry(&table[11], "name", sd->name,
4414 +               CORENAME_MAX_SIZE, 0444, proc_dostring);
4415 +       /* &table[12] is terminator */
4416 +
4417 +       return table;
4418 +}
4419 +
4420 +static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
4421 +{
4422 +       struct ctl_table *entry, *table;
4423 +       struct sched_domain *sd;
4424 +       int domain_num = 0, i;
4425 +       char buf[32];
4426 +
4427 +       for_each_domain(cpu, sd)
4428 +               domain_num++;
4429 +       entry = table = sd_alloc_ctl_entry(domain_num + 1);
4430 +       if (table == NULL)
4431 +               return NULL;
4432 +
4433 +       i = 0;
4434 +       for_each_domain(cpu, sd) {
4435 +               snprintf(buf, 32, "domain%d", i);
4436 +               entry->procname = kstrdup(buf, GFP_KERNEL);
4437 +               entry->mode = 0555;
4438 +               entry->child = sd_alloc_ctl_domain_table(sd);
4439 +               entry++;
4440 +               i++;
4441 +       }
4442 +       return table;
4443 +}
4444 +
4445 +static struct ctl_table_header *sd_sysctl_header;
4446 +static void register_sched_domain_sysctl(void)
4447 +{
4448 +       int i, cpu_num = num_online_cpus();
4449 +       struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
4450 +       char buf[32];
4451 +
4452 +       WARN_ON(sd_ctl_dir[0].child);
4453 +       sd_ctl_dir[0].child = entry;
4454 +
4455 +       if (entry == NULL)
4456 +               return;
4457 +
4458 +       for_each_online_cpu(i) {
4459 +               snprintf(buf, 32, "cpu%d", i);
4460 +               entry->procname = kstrdup(buf, GFP_KERNEL);
4461 +               entry->mode = 0555;
4462 +               entry->child = sd_alloc_ctl_cpu_table(i);
4463 +               entry++;
4464 +       }
4465 +
4466 +       WARN_ON(sd_sysctl_header);
4467 +       sd_sysctl_header = register_sysctl_table(sd_ctl_root);
4468 +}
4469 +
4470 +/* may be called multiple times per register */
4471 +static void unregister_sched_domain_sysctl(void)
4472 +{
4473 +       if (sd_sysctl_header)
4474 +               unregister_sysctl_table(sd_sysctl_header);
4475 +       sd_sysctl_header = NULL;
4476 +       if (sd_ctl_dir[0].child)
4477 +               sd_free_ctl_entry(&sd_ctl_dir[0].child);
4478 +}
4479 +#else
4480 +static void register_sched_domain_sysctl(void)
4481 +{
4482 +}
4483 +static void unregister_sched_domain_sysctl(void)
4484 +{
4485 +}
4486 +#endif
4487 +
4488 +static void set_rq_online(struct rq *rq)
4489 +{
4490 +       if (!rq->online) {
4491 +               cpumask_set_cpu(rq->cpu, rq->rd->online);
4492 +               rq->online = 1;
4493 +       }
4494 +}
4495 +
4496 +static void set_rq_offline(struct rq *rq)
4497 +{
4498 +       if (rq->online) {
4499 +               cpumask_clear_cpu(rq->cpu, rq->rd->online);
4500 +               rq->online = 0;
4501 +       }
4502 +}
4503 +
4504 +#ifdef CONFIG_HOTPLUG_CPU
4505 +/*
4506 + * This cpu is going down, so walk over the tasklist and find tasks that can
4507 + * only run on this cpu and remove their affinity. Store their value in
4508 + * unplugged_mask so it can be restored once their correct cpu is online. No
4509 + * need to do anything special since they'll just move on next reschedule if
4510 + * they're running.
4511 + */
4512 +static void remove_cpu(unsigned long cpu)
4513 +{
4514 +       struct task_struct *p, *t;
4515 +
4516 +       read_lock(&tasklist_lock);
4517 +
4518 +       do_each_thread(t, p) {
4519 +               cpumask_t cpus_remaining;
4520 +
4521 +               cpus_and(cpus_remaining, p->cpus_allowed, cpu_online_map);
4522 +               cpu_clear(cpu, cpus_remaining);
4523 +               if (cpus_empty(cpus_remaining)) {
4524 +                       p->unplugged_mask = p->cpus_allowed;
4525 +                       p->cpus_allowed = cpu_possible_map;
4526 +               }
4527 +       } while_each_thread(t, p);
4528 +
4529 +       read_unlock(&tasklist_lock);
4530 +}
4531 +
4532 +/*
4533 + * This cpu is coming up so add it to the cpus_allowed.
4534 + */
4535 +static void add_cpu(unsigned long cpu)
4536 +{
4537 +       struct task_struct *p, *t;
4538 +
4539 +       read_lock(&tasklist_lock);
4540 +
4541 +       do_each_thread(t, p) {
4542 +               /* Have we taken all the cpus from the unplugged_mask back */
4543 +               if (cpus_empty(p->unplugged_mask))
4544 +                       continue;
4545 +
4546 +               /* Was this cpu in the unplugged_mask mask */
4547 +               if (cpu_isset(cpu, p->unplugged_mask)) {
4548 +                       cpu_set(cpu, p->cpus_allowed);
4549 +                       if (cpus_subset(p->unplugged_mask, p->cpus_allowed)) {
4550 +                               /*
4551 +                                * Have we set more than the unplugged_mask?
4552 +                                * If so, that means we have remnants set from
4553 +                                * the unplug/plug cycle and need to remove
4554 +                                * them. Then clear the unplugged_mask as we've
4555 +                                * set all the cpus back.
4556 +                                */
4557 +                               p->cpus_allowed = p->unplugged_mask;
4558 +                               cpus_clear(p->unplugged_mask);
4559 +                       }
4560 +               }
4561 +       } while_each_thread(t, p);
4562 +
4563 +       read_unlock(&tasklist_lock);
4564 +}
4565 +#else
4566 +static void add_cpu(unsigned long cpu)
4567 +{
4568 +}
4569 +#endif
4570 +
4571 +/*
4572 + * migration_call - callback that gets triggered when a CPU is added.
4573 + */
4574 +static int __cpuinit
4575 +migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
4576 +{
4577 +       int cpu = (long)hcpu;
4578 +       unsigned long flags;
4579 +       struct rq *rq;
4580 +
4581 +       switch (action) {
4582 +
4583 +       case CPU_UP_PREPARE:
4584 +       case CPU_UP_PREPARE_FROZEN:
4585 +               break;
4586 +
4587 +       case CPU_ONLINE:
4588 +       case CPU_ONLINE_FROZEN:
4589 +               /* Update our root-domain */
4590 +               rq = cpu_rq(cpu);
4591 +               grq_lock_irqsave(&flags);
4592 +               if (rq->rd) {
4593 +                       BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
4594 +
4595 +                       set_rq_online(rq);
4596 +               }
4597 +               add_cpu(cpu);
4598 +               grq_unlock_irqrestore(&flags);
4599 +               break;
4600 +
4601 +#ifdef CONFIG_HOTPLUG_CPU
4602 +       case CPU_UP_CANCELED:
4603 +       case CPU_UP_CANCELED_FROZEN:
4604 +               break;
4605 +
4606 +       case CPU_DEAD:
4607 +       case CPU_DEAD_FROZEN:
4608 +               cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
4609 +               rq = cpu_rq(cpu);
4610 +               /* Idle task back to normal (off runqueue, low prio) */
4611 +               grq_lock_irq();
4612 +               remove_cpu(cpu);
4613 +               deactivate_task(rq->idle);
4614 +               rq->idle->static_prio = MAX_PRIO;
4615 +               __setscheduler(rq->idle, SCHED_NORMAL, 0);
4616 +               rq->idle->prio = PRIO_LIMIT;
4617 +               update_rq_clock(rq);
4618 +               grq_unlock_irq();
4619 +               cpuset_unlock();
4620 +               break;
4621 +
4622 +       case CPU_DYING:
4623 +       case CPU_DYING_FROZEN:
4624 +               rq = cpu_rq(cpu);
4625 +               grq_lock_irqsave(&flags);
4626 +               if (rq->rd) {
4627 +                       BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
4628 +                       set_rq_offline(rq);
4629 +               }
4630 +               grq_unlock_irqrestore(&flags);
4631 +               break;
4632 +#endif
4633 +       }
4634 +       return NOTIFY_OK;
4635 +}
4636 +
4637 +/*
4638 + * Register at high priority so that task migration (migrate_all_tasks)
4639 + * happens before everything else.  This has to be lower priority than
4640 + * the notifier in the perf_counter subsystem, though.
4641 + */
4642 +static struct notifier_block __cpuinitdata migration_notifier = {
4643 +       .notifier_call = migration_call,
4644 +       .priority = 10
4645 +};
4646 +
4647 +int __init migration_init(void)
4648 +{
4649 +       void *cpu = (void *)(long)smp_processor_id();
4650 +       int err;
4651 +
4652 +       /* Start one for the boot CPU: */
4653 +       err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
4654 +       BUG_ON(err == NOTIFY_BAD);
4655 +       migration_call(&migration_notifier, CPU_ONLINE, cpu);
4656 +       register_cpu_notifier(&migration_notifier);
4657 +
4658 +       return 0;
4659 +}
4660 +early_initcall(migration_init);
4661 +#endif
4662 +
4663 +/*
4664 + * sched_domains_mutex serializes calls to arch_init_sched_domains,
4665 + * detach_destroy_domains and partition_sched_domains.
4666 + */
4667 +static DEFINE_MUTEX(sched_domains_mutex);
4668 +
4669 +#ifdef CONFIG_SMP
4670 +
4671 +#ifdef CONFIG_SCHED_DEBUG
4672 +
4673 +static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
4674 +                                 struct cpumask *groupmask)
4675 +{
4676 +       struct sched_group *group = sd->groups;
4677 +       char str[256];
4678 +
4679 +       cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
4680 +       cpumask_clear(groupmask);
4681 +
4682 +       printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
4683 +
4684 +       if (!(sd->flags & SD_LOAD_BALANCE)) {
4685 +               printk("does not load-balance\n");
4686 +               if (sd->parent)
4687 +                       printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
4688 +                                       " has parent");
4689 +               return -1;
4690 +       }
4691 +
4692 +       printk(KERN_CONT "span %s level %s\n", str, sd->name);
4693 +
4694 +       if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
4695 +               printk(KERN_ERR "ERROR: domain->span does not contain "
4696 +                               "CPU%d\n", cpu);
4697 +       }
4698 +       if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
4699 +               printk(KERN_ERR "ERROR: domain->groups does not contain"
4700 +                               " CPU%d\n", cpu);
4701 +       }
4702 +
4703 +       printk(KERN_DEBUG "%*s groups:", level + 1, "");
4704 +       do {
4705 +               if (!group) {
4706 +                       printk("\n");
4707 +                       printk(KERN_ERR "ERROR: group is NULL\n");
4708 +                       break;
4709 +               }
4710 +
4711 +               if (!group->__cpu_power) {
4712 +                       printk(KERN_CONT "\n");
4713 +                       printk(KERN_ERR "ERROR: domain->cpu_power not "
4714 +                                       "set\n");
4715 +                       break;
4716 +               }
4717 +
4718 +               if (!cpumask_weight(sched_group_cpus(group))) {
4719 +                       printk(KERN_CONT "\n");
4720 +                       printk(KERN_ERR "ERROR: empty group\n");
4721 +                       break;
4722 +               }
4723 +
4724 +               if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
4725 +                       printk(KERN_CONT "\n");
4726 +                       printk(KERN_ERR "ERROR: repeated CPUs\n");
4727 +                       break;
4728 +               }
4729 +
4730 +               cpumask_or(groupmask, groupmask, sched_group_cpus(group));
4731 +
4732 +               cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
4733 +
4734 +               printk(KERN_CONT " %s", str);
4735 +               if (group->__cpu_power != SCHED_LOAD_SCALE) {
4736 +                       printk(KERN_CONT " (__cpu_power = %d)",
4737 +                               group->__cpu_power);
4738 +               }
4739 +
4740 +               group = group->next;
4741 +       } while (group != sd->groups);
4742 +       printk(KERN_CONT "\n");
4743 +
4744 +       if (!cpumask_equal(sched_domain_span(sd), groupmask))
4745 +               printk(KERN_ERR "ERROR: groups don't span domain->span\n");
4746 +
4747 +       if (sd->parent &&
4748 +           !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
4749 +               printk(KERN_ERR "ERROR: parent span is not a superset "
4750 +                       "of domain->span\n");
4751 +       return 0;
4752 +}
4753 +
4754 +static void sched_domain_debug(struct sched_domain *sd, int cpu)
4755 +{
4756 +       cpumask_var_t groupmask;
4757 +       int level = 0;
4758 +
4759 +       if (!sd) {
4760 +               printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
4761 +               return;
4762 +       }
4763 +
4764 +       printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
4765 +
4766 +       if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
4767 +               printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
4768 +               return;
4769 +       }
4770 +
4771 +       for (;;) {
4772 +               if (sched_domain_debug_one(sd, cpu, level, groupmask))
4773 +                       break;
4774 +               level++;
4775 +               sd = sd->parent;
4776 +               if (!sd)
4777 +                       break;
4778 +       }
4779 +       free_cpumask_var(groupmask);
4780 +}
4781 +#else /* !CONFIG_SCHED_DEBUG */
4782 +# define sched_domain_debug(sd, cpu) do { } while (0)
4783 +#endif /* CONFIG_SCHED_DEBUG */
4784 +
4785 +static int sd_degenerate(struct sched_domain *sd)
4786 +{
4787 +       if (cpumask_weight(sched_domain_span(sd)) == 1)
4788 +               return 1;
4789 +
4790 +       /* Following flags need at least 2 groups */
4791 +       if (sd->flags & (SD_LOAD_BALANCE |
4792 +                        SD_BALANCE_NEWIDLE |
4793 +                        SD_BALANCE_FORK |
4794 +                        SD_BALANCE_EXEC |
4795 +                        SD_SHARE_CPUPOWER |
4796 +                        SD_SHARE_PKG_RESOURCES)) {
4797 +               if (sd->groups != sd->groups->next)
4798 +                       return 0;
4799 +       }
4800 +
4801 +       /* Following flags don't use groups */
4802 +       if (sd->flags & (SD_WAKE_IDLE |
4803 +                        SD_WAKE_AFFINE |
4804 +                        SD_WAKE_BALANCE))
4805 +               return 0;
4806 +
4807 +       return 1;
4808 +}
4809 +
4810 +static int
4811 +sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
4812 +{
4813 +       unsigned long cflags = sd->flags, pflags = parent->flags;
4814 +
4815 +       if (sd_degenerate(parent))
4816 +               return 1;
4817 +
4818 +       if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
4819 +               return 0;
4820 +
4821 +       /* Does parent contain flags not in child? */
4822 +       /* WAKE_BALANCE is a subset of WAKE_AFFINE */
4823 +       if (cflags & SD_WAKE_AFFINE)
4824 +               pflags &= ~SD_WAKE_BALANCE;
4825 +       /* Flags needing groups don't count if only 1 group in parent */
4826 +       if (parent->groups == parent->groups->next) {
4827 +               pflags &= ~(SD_LOAD_BALANCE |
4828 +                               SD_BALANCE_NEWIDLE |
4829 +                               SD_BALANCE_FORK |
4830 +                               SD_BALANCE_EXEC |
4831 +                               SD_SHARE_CPUPOWER |
4832 +                               SD_SHARE_PKG_RESOURCES);
4833 +               if (nr_node_ids == 1)
4834 +                       pflags &= ~SD_SERIALIZE;
4835 +       }
4836 +       if (~cflags & pflags)
4837 +               return 0;
4838 +
4839 +       return 1;
4840 +}
4841 +
4842 +static void free_rootdomain(struct root_domain *rd)
4843 +{
4844 +       free_cpumask_var(rd->rto_mask);
4845 +       free_cpumask_var(rd->online);
4846 +       free_cpumask_var(rd->span);
4847 +       kfree(rd);
4848 +}
4849 +
4850 +static void rq_attach_root(struct rq *rq, struct root_domain *rd)
4851 +{
4852 +       struct root_domain *old_rd = NULL;
4853 +       unsigned long flags;
4854 +
4855 +       grq_lock_irqsave(&flags);
4856 +
4857 +       if (rq->rd) {
4858 +               old_rd = rq->rd;
4859 +
4860 +               if (cpumask_test_cpu(rq->cpu, old_rd->online))
4861 +                       set_rq_offline(rq);
4862 +
4863 +               cpumask_clear_cpu(rq->cpu, old_rd->span);
4864 +
4865 +               /*
4866 +                * If we dont want to free the old_rt yet then
4867 +                * set old_rd to NULL to skip the freeing later
4868 +                * in this function:
4869 +                */
4870 +               if (!atomic_dec_and_test(&old_rd->refcount))
4871 +                       old_rd = NULL;
4872 +       }
4873 +
4874 +       atomic_inc(&rd->refcount);
4875 +       rq->rd = rd;
4876 +
4877 +       cpumask_set_cpu(rq->cpu, rd->span);
4878 +       if (cpumask_test_cpu(rq->cpu, cpu_online_mask))
4879 +               set_rq_online(rq);
4880 +
4881 +       grq_unlock_irqrestore(&flags);
4882 +
4883 +       if (old_rd)
4884 +               free_rootdomain(old_rd);
4885 +}
4886 +
4887 +static int init_rootdomain(struct root_domain *rd, bool bootmem)
4888 +{
4889 +       gfp_t gfp = GFP_KERNEL;
4890 +
4891 +       memset(rd, 0, sizeof(*rd));
4892 +
4893 +       if (bootmem)
4894 +               gfp = GFP_NOWAIT;
4895 +
4896 +       if (!alloc_cpumask_var(&rd->span, gfp))
4897 +               goto out;
4898 +       if (!alloc_cpumask_var(&rd->online, gfp))
4899 +               goto free_span;
4900 +       if (!alloc_cpumask_var(&rd->rto_mask, gfp))
4901 +               goto free_online;
4902 +
4903 +       return 0;
4904 +
4905 +free_online:
4906 +       free_cpumask_var(rd->online);
4907 +free_span:
4908 +       free_cpumask_var(rd->span);
4909 +out:
4910 +       return -ENOMEM;
4911 +}
4912 +
4913 +static void init_defrootdomain(void)
4914 +{
4915 +       init_rootdomain(&def_root_domain, true);
4916 +
4917 +       atomic_set(&def_root_domain.refcount, 1);
4918 +}
4919 +
4920 +static struct root_domain *alloc_rootdomain(void)
4921 +{
4922 +       struct root_domain *rd;
4923 +
4924 +       rd = kmalloc(sizeof(*rd), GFP_KERNEL);
4925 +       if (!rd)
4926 +               return NULL;
4927 +
4928 +       if (init_rootdomain(rd, false) != 0) {
4929 +               kfree(rd);
4930 +               return NULL;
4931 +       }
4932 +
4933 +       return rd;
4934 +}
4935 +
4936 +/*
4937 + * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
4938 + * hold the hotplug lock.
4939 + */
4940 +static void
4941 +cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
4942 +{
4943 +       struct rq *rq = cpu_rq(cpu);
4944 +       struct sched_domain *tmp;
4945 +
4946 +       /* Remove the sched domains which do not contribute to scheduling. */
4947 +       for (tmp = sd; tmp; ) {
4948 +               struct sched_domain *parent = tmp->parent;
4949 +               if (!parent)
4950 +                       break;
4951 +
4952 +               if (sd_parent_degenerate(tmp, parent)) {
4953 +                       tmp->parent = parent->parent;
4954 +                       if (parent->parent)
4955 +                               parent->parent->child = tmp;
4956 +               } else
4957 +                       tmp = tmp->parent;
4958 +       }
4959 +
4960 +       if (sd && sd_degenerate(sd)) {
4961 +               sd = sd->parent;
4962 +               if (sd)
4963 +                       sd->child = NULL;
4964 +       }
4965 +
4966 +       sched_domain_debug(sd, cpu);
4967 +
4968 +       rq_attach_root(rq, rd);
4969 +       rcu_assign_pointer(rq->sd, sd);
4970 +}
4971 +
4972 +/* cpus with isolated domains */
4973 +static cpumask_var_t cpu_isolated_map;
4974 +
4975 +/* Setup the mask of cpus configured for isolated domains */
4976 +static int __init isolated_cpu_setup(char *str)
4977 +{
4978 +       cpulist_parse(str, cpu_isolated_map);
4979 +       return 1;
4980 +}
4981 +
4982 +__setup("isolcpus=", isolated_cpu_setup);
4983 +
4984 +/*
4985 + * init_sched_build_groups takes the cpumask we wish to span, and a pointer
4986 + * to a function which identifies what group(along with sched group) a CPU
4987 + * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
4988 + * (due to the fact that we keep track of groups covered with a struct cpumask).
4989 + *
4990 + * init_sched_build_groups will build a circular linked list of the groups
4991 + * covered by the given span, and will set each group's ->cpumask correctly,
4992 + * and ->cpu_power to 0.
4993 + */
4994 +static void
4995 +init_sched_build_groups(const struct cpumask *span,
4996 +                       const struct cpumask *cpu_map,
4997 +                       int (*group_fn)(int cpu, const struct cpumask *cpu_map,
4998 +                                       struct sched_group **sg,
4999 +                                       struct cpumask *tmpmask),
5000 +                       struct cpumask *covered, struct cpumask *tmpmask)
5001 +{
5002 +       struct sched_group *first = NULL, *last = NULL;
5003 +       int i;
5004 +
5005 +       cpumask_clear(covered);
5006 +
5007 +       for_each_cpu(i, span) {
5008 +               struct sched_group *sg;
5009 +               int group = group_fn(i, cpu_map, &sg, tmpmask);
5010 +               int j;
5011 +
5012 +               if (cpumask_test_cpu(i, covered))
5013 +                       continue;
5014 +
5015 +               cpumask_clear(sched_group_cpus(sg));
5016 +               sg->__cpu_power = 0;
5017 +
5018 +               for_each_cpu(j, span) {
5019 +                       if (group_fn(j, cpu_map, NULL, tmpmask) != group)
5020 +                               continue;
5021 +
5022 +                       cpumask_set_cpu(j, covered);
5023 +                       cpumask_set_cpu(j, sched_group_cpus(sg));
5024 +               }
5025 +               if (!first)
5026 +                       first = sg;
5027 +               if (last)
5028 +                       last->next = sg;
5029 +               last = sg;
5030 +       }
5031 +       last->next = first;
5032 +}
5033 +
5034 +#define SD_NODES_PER_DOMAIN 16
5035 +
5036 +#ifdef CONFIG_NUMA
5037 +
5038 +/**
5039 + * find_next_best_node - find the next node to include in a sched_domain
5040 + * @node: node whose sched_domain we're building
5041 + * @used_nodes: nodes already in the sched_domain
5042 + *
5043 + * Find the next node to include in a given scheduling domain. Simply
5044 + * finds the closest node not already in the @used_nodes map.
5045 + *
5046 + * Should use nodemask_t.
5047 + */
5048 +static int find_next_best_node(int node, nodemask_t *used_nodes)
5049 +{
5050 +       int i, n, val, min_val, best_node = 0;
5051 +
5052 +       min_val = INT_MAX;
5053 +
5054 +       for (i = 0; i < nr_node_ids; i++) {
5055 +               /* Start at @node */
5056 +               n = (node + i) % nr_node_ids;
5057 +
5058 +               if (!nr_cpus_node(n))
5059 +                       continue;
5060 +
5061 +               /* Skip already used nodes */
5062 +               if (node_isset(n, *used_nodes))
5063 +                       continue;
5064 +
5065 +               /* Simple min distance search */
5066 +               val = node_distance(node, n);
5067 +
5068 +               if (val < min_val) {
5069 +                       min_val = val;
5070 +                       best_node = n;
5071 +               }
5072 +       }
5073 +
5074 +       node_set(best_node, *used_nodes);
5075 +       return best_node;
5076 +}
5077 +
5078 +/**
5079 + * sched_domain_node_span - get a cpumask for a node's sched_domain
5080 + * @node: node whose cpumask we're constructing
5081 + * @span: resulting cpumask
5082 + *
5083 + * Given a node, construct a good cpumask for its sched_domain to span. It
5084 + * should be one that prevents unnecessary balancing, but also spreads tasks
5085 + * out optimally.
5086 + */
5087 +static void sched_domain_node_span(int node, struct cpumask *span)
5088 +{
5089 +       nodemask_t used_nodes;
5090 +       int i;
5091 +
5092 +       cpumask_clear(span);
5093 +       nodes_clear(used_nodes);
5094 +
5095 +       cpumask_or(span, span, cpumask_of_node(node));
5096 +       node_set(node, used_nodes);
5097 +
5098 +       for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
5099 +               int next_node = find_next_best_node(node, &used_nodes);
5100 +
5101 +               cpumask_or(span, span, cpumask_of_node(next_node));
5102 +       }
5103 +}
5104 +#endif /* CONFIG_NUMA */
5105 +
5106 +int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
5107 +
5108 +/*
5109 + * The cpus mask in sched_group and sched_domain hangs off the end.
5110 + *
5111 + * ( See the the comments in include/linux/sched.h:struct sched_group
5112 + *   and struct sched_domain. )
5113 + */
5114 +struct static_sched_group {
5115 +       struct sched_group sg;
5116 +       DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
5117 +};
5118 +
5119 +struct static_sched_domain {
5120 +       struct sched_domain sd;
5121 +       DECLARE_BITMAP(span, CONFIG_NR_CPUS);
5122 +};
5123 +
5124 +/*
5125 + * SMT sched-domains:
5126 + */
5127 +#ifdef CONFIG_SCHED_SMT
5128 +static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
5129 +static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
5130 +
5131 +static int
5132 +cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
5133 +                struct sched_group **sg, struct cpumask *unused)
5134 +{
5135 +       if (sg)
5136 +               *sg = &per_cpu(sched_group_cpus, cpu).sg;
5137 +       return cpu;
5138 +}
5139 +#endif /* CONFIG_SCHED_SMT */
5140 +
5141 +/*
5142 + * multi-core sched-domains:
5143 + */
5144 +#ifdef CONFIG_SCHED_MC
5145 +static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
5146 +static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
5147 +#endif /* CONFIG_SCHED_MC */
5148 +
5149 +#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
5150 +static int
5151 +cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
5152 +                 struct sched_group **sg, struct cpumask *mask)
5153 +{
5154 +       int group;
5155 +
5156 +       cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
5157 +       group = cpumask_first(mask);
5158 +       if (sg)
5159 +               *sg = &per_cpu(sched_group_core, group).sg;
5160 +       return group;
5161 +}
5162 +#elif defined(CONFIG_SCHED_MC)
5163 +static int
5164 +cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
5165 +                 struct sched_group **sg, struct cpumask *unused)
5166 +{
5167 +       if (sg)
5168 +               *sg = &per_cpu(sched_group_core, cpu).sg;
5169 +       return cpu;
5170 +}
5171 +#endif
5172 +
5173 +static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
5174 +static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
5175 +
5176 +static int
5177 +cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
5178 +                 struct sched_group **sg, struct cpumask *mask)
5179 +{
5180 +       int group;
5181 +#ifdef CONFIG_SCHED_MC
5182 +       cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
5183 +       group = cpumask_first(mask);
5184 +#elif defined(CONFIG_SCHED_SMT)
5185 +       cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
5186 +       group = cpumask_first(mask);
5187 +#else
5188 +       group = cpu;
5189 +#endif
5190 +       if (sg)
5191 +               *sg = &per_cpu(sched_group_phys, group).sg;
5192 +       return group;
5193 +}
5194 +
5195 +/**
5196 + * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
5197 + * @group: The group whose first cpu is to be returned.
5198 + */
5199 +static inline unsigned int group_first_cpu(struct sched_group *group)
5200 +{
5201 +       return cpumask_first(sched_group_cpus(group));
5202 +}
5203 +
5204 +#ifdef CONFIG_NUMA
5205 +/*
5206 + * The init_sched_build_groups can't handle what we want to do with node
5207 + * groups, so roll our own. Now each node has its own list of groups which
5208 + * gets dynamically allocated.
5209 + */
5210 +static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
5211 +static struct sched_group ***sched_group_nodes_bycpu;
5212 +
5213 +static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
5214 +static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
5215 +
5216 +static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
5217 +                                struct sched_group **sg,
5218 +                                struct cpumask *nodemask)
5219 +{
5220 +       int group;
5221 +
5222 +       cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
5223 +       group = cpumask_first(nodemask);
5224 +
5225 +       if (sg)
5226 +               *sg = &per_cpu(sched_group_allnodes, group).sg;
5227 +       return group;
5228 +}
5229 +
5230 +static void init_numa_sched_groups_power(struct sched_group *group_head)
5231 +{
5232 +       struct sched_group *sg = group_head;
5233 +       int j;
5234 +
5235 +       if (!sg)
5236 +               return;
5237 +       do {
5238 +               for_each_cpu(j, sched_group_cpus(sg)) {
5239 +                       struct sched_domain *sd;
5240 +
5241 +                       sd = &per_cpu(phys_domains, j).sd;
5242 +                       if (j != group_first_cpu(sd->groups)) {
5243 +                               /*
5244 +                                * Only add "power" once for each
5245 +                                * physical package.
5246 +                                */
5247 +                               continue;
5248 +                       }
5249 +
5250 +                       sg_inc_cpu_power(sg, sd->groups->__cpu_power);
5251 +               }
5252 +               sg = sg->next;
5253 +       } while (sg != group_head);
5254 +}
5255 +#endif /* CONFIG_NUMA */
5256 +
5257 +#ifdef CONFIG_NUMA
5258 +/* Free memory allocated for various sched_group structures */
5259 +static void free_sched_groups(const struct cpumask *cpu_map,
5260 +                             struct cpumask *nodemask)
5261 +{
5262 +       int cpu, i;
5263 +
5264 +       for_each_cpu(cpu, cpu_map) {
5265 +               struct sched_group **sched_group_nodes
5266 +                       = sched_group_nodes_bycpu[cpu];
5267 +
5268 +               if (!sched_group_nodes)
5269 +                       continue;
5270 +
5271 +               for (i = 0; i < nr_node_ids; i++) {
5272 +                       struct sched_group *oldsg, *sg = sched_group_nodes[i];
5273 +
5274 +                       cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
5275 +                       if (cpumask_empty(nodemask))
5276 +                               continue;
5277 +
5278 +                       if (sg == NULL)
5279 +                               continue;
5280 +                       sg = sg->next;
5281 +next_sg:
5282 +                       oldsg = sg;
5283 +                       sg = sg->next;
5284 +                       kfree(oldsg);
5285 +                       if (oldsg != sched_group_nodes[i])
5286 +                               goto next_sg;
5287 +               }
5288 +               kfree(sched_group_nodes);
5289 +               sched_group_nodes_bycpu[cpu] = NULL;
5290 +       }
5291 +}
5292 +#else /* !CONFIG_NUMA */
5293 +static void free_sched_groups(const struct cpumask *cpu_map,
5294 +                             struct cpumask *nodemask)
5295 +{
5296 +}
5297 +#endif /* CONFIG_NUMA */
5298 +
5299 +/*
5300 + * Initialize sched groups cpu_power.
5301 + *
5302 + * cpu_power indicates the capacity of sched group, which is used while
5303 + * distributing the load between different sched groups in a sched domain.
5304 + * Typically cpu_power for all the groups in a sched domain will be same unless
5305 + * there are asymmetries in the topology. If there are asymmetries, group
5306 + * having more cpu_power will pickup more load compared to the group having
5307 + * less cpu_power.
5308 + *
5309 + * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
5310 + * the maximum number of tasks a group can handle in the presence of other idle
5311 + * or lightly loaded groups in the same sched domain.
5312 + */
5313 +static void init_sched_groups_power(int cpu, struct sched_domain *sd)
5314 +{
5315 +       struct sched_domain *child;
5316 +       struct sched_group *group;
5317 +
5318 +       WARN_ON(!sd || !sd->groups);
5319 +
5320 +       if (cpu != group_first_cpu(sd->groups))
5321 +               return;
5322 +
5323 +       child = sd->child;
5324 +
5325 +       sd->groups->__cpu_power = 0;
5326 +
5327 +       /*
5328 +        * For perf policy, if the groups in child domain share resources
5329 +        * (for example cores sharing some portions of the cache hierarchy
5330 +        * or SMT), then set this domain groups cpu_power such that each group
5331 +        * can handle only one task, when there are other idle groups in the
5332 +        * same sched domain.
5333 +        */
5334 +       if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
5335 +                      (child->flags &
5336 +                       (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
5337 +               sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
5338 +               return;
5339 +       }
5340 +
5341 +       /*
5342 +        * add cpu_power of each child group to this groups cpu_power
5343 +        */
5344 +       group = child->groups;
5345 +       do {
5346 +               sg_inc_cpu_power(sd->groups, group->__cpu_power);
5347 +               group = group->next;
5348 +       } while (group != child->groups);
5349 +}
5350 +
5351 +/*
5352 + * Initializers for schedule domains
5353 + * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
5354 + */
5355 +
5356 +#ifdef CONFIG_SCHED_DEBUG
5357 +# define SD_INIT_NAME(sd, type)                sd->name = #type
5358 +#else
5359 +# define SD_INIT_NAME(sd, type)                do { } while (0)
5360 +#endif
5361 +
5362 +#define        SD_INIT(sd, type)       sd_init_##type(sd)
5363 +
5364 +#define SD_INIT_FUNC(type)     \
5365 +static noinline void sd_init_##type(struct sched_domain *sd)   \
5366 +{                                                              \
5367 +       memset(sd, 0, sizeof(*sd));                             \
5368 +       *sd = SD_##type##_INIT;                                 \
5369 +       sd->level = SD_LV_##type;                               \
5370 +       SD_INIT_NAME(sd, type);                                 \
5371 +}
5372 +
5373 +SD_INIT_FUNC(CPU)
5374 +#ifdef CONFIG_NUMA
5375 + SD_INIT_FUNC(ALLNODES)
5376 + SD_INIT_FUNC(NODE)
5377 +#endif
5378 +#ifdef CONFIG_SCHED_SMT
5379 + SD_INIT_FUNC(SIBLING)
5380 +#endif
5381 +#ifdef CONFIG_SCHED_MC
5382 + SD_INIT_FUNC(MC)
5383 +#endif
5384 +
5385 +static int default_relax_domain_level = -1;
5386 +
5387 +static int __init setup_relax_domain_level(char *str)
5388 +{
5389 +       unsigned long val;
5390 +
5391 +       val = simple_strtoul(str, NULL, 0);
5392 +       if (val < SD_LV_MAX)
5393 +               default_relax_domain_level = val;
5394 +
5395 +       return 1;
5396 +}
5397 +__setup("relax_domain_level=", setup_relax_domain_level);
5398 +
5399 +static void set_domain_attribute(struct sched_domain *sd,
5400 +                                struct sched_domain_attr *attr)
5401 +{
5402 +       int request;
5403 +
5404 +       if (!attr || attr->relax_domain_level < 0) {
5405 +               if (default_relax_domain_level < 0)
5406 +                       return;
5407 +               else
5408 +                       request = default_relax_domain_level;
5409 +       } else
5410 +               request = attr->relax_domain_level;
5411 +       if (request < sd->level) {
5412 +               /* turn off idle balance on this domain */
5413 +               sd->flags &= ~(SD_WAKE_IDLE|SD_BALANCE_NEWIDLE);
5414 +       } else {
5415 +               /* turn on idle balance on this domain */
5416 +               sd->flags |= (SD_WAKE_IDLE_FAR|SD_BALANCE_NEWIDLE);
5417 +       }
5418 +}
5419 +
5420 +/*
5421 + * Build sched domains for a given set of cpus and attach the sched domains
5422 + * to the individual cpus
5423 + */
5424 +static int __build_sched_domains(const struct cpumask *cpu_map,
5425 +                                struct sched_domain_attr *attr)
5426 +{
5427 +       int i, err = -ENOMEM;
5428 +       struct root_domain *rd;
5429 +       cpumask_var_t nodemask, this_sibling_map, this_core_map, send_covered,
5430 +               tmpmask;
5431 +#ifdef CONFIG_NUMA
5432 +       cpumask_var_t domainspan, covered, notcovered;
5433 +       struct sched_group **sched_group_nodes = NULL;
5434 +       int sd_allnodes = 0;
5435 +
5436 +       if (!alloc_cpumask_var(&domainspan, GFP_KERNEL))
5437 +               goto out;
5438 +       if (!alloc_cpumask_var(&covered, GFP_KERNEL))
5439 +               goto free_domainspan;
5440 +       if (!alloc_cpumask_var(&notcovered, GFP_KERNEL))
5441 +               goto free_covered;
5442 +#endif
5443 +
5444 +       if (!alloc_cpumask_var(&nodemask, GFP_KERNEL))
5445 +               goto free_notcovered;
5446 +       if (!alloc_cpumask_var(&this_sibling_map, GFP_KERNEL))
5447 +               goto free_nodemask;
5448 +       if (!alloc_cpumask_var(&this_core_map, GFP_KERNEL))
5449 +               goto free_this_sibling_map;
5450 +       if (!alloc_cpumask_var(&send_covered, GFP_KERNEL))
5451 +               goto free_this_core_map;
5452 +       if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
5453 +               goto free_send_covered;
5454 +
5455 +#ifdef CONFIG_NUMA
5456 +       /*
5457 +        * Allocate the per-node list of sched groups
5458 +        */
5459 +       sched_group_nodes = kcalloc(nr_node_ids, sizeof(struct sched_group *),
5460 +                                   GFP_KERNEL);
5461 +       if (!sched_group_nodes) {
5462 +               printk(KERN_WARNING "Can not alloc sched group node list\n");
5463 +               goto free_tmpmask;
5464 +       }
5465 +#endif
5466 +
5467 +       rd = alloc_rootdomain();
5468 +       if (!rd) {
5469 +               printk(KERN_WARNING "Cannot alloc root domain\n");
5470 +               goto free_sched_groups;
5471 +       }
5472 +
5473 +#ifdef CONFIG_NUMA
5474 +       sched_group_nodes_bycpu[cpumask_first(cpu_map)] = sched_group_nodes;
5475 +#endif
5476 +
5477 +       /*
5478 +        * Set up domains for cpus specified by the cpu_map.
5479 +        */
5480 +       for_each_cpu(i, cpu_map) {
5481 +               struct sched_domain *sd = NULL, *p;
5482 +
5483 +               cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map);
5484 +
5485 +#ifdef CONFIG_NUMA
5486 +               if (cpumask_weight(cpu_map) >
5487 +                               SD_NODES_PER_DOMAIN*cpumask_weight(nodemask)) {
5488 +                       sd = &per_cpu(allnodes_domains, i).sd;
5489 +                       SD_INIT(sd, ALLNODES);
5490 +                       set_domain_attribute(sd, attr);
5491 +                       cpumask_copy(sched_domain_span(sd), cpu_map);
5492 +                       cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
5493 +                       p = sd;
5494 +                       sd_allnodes = 1;
5495 +               } else
5496 +                       p = NULL;
5497 +
5498 +               sd = &per_cpu(node_domains, i).sd;
5499 +               SD_INIT(sd, NODE);
5500 +               set_domain_attribute(sd, attr);
5501 +               sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
5502 +               sd->parent = p;
5503 +               if (p)
5504 +                       p->child = sd;
5505 +               cpumask_and(sched_domain_span(sd),
5506 +                           sched_domain_span(sd), cpu_map);
5507 +#endif
5508 +
5509 +               p = sd;
5510 +               sd = &per_cpu(phys_domains, i).sd;
5511 +               SD_INIT(sd, CPU);
5512 +               set_domain_attribute(sd, attr);
5513 +               cpumask_copy(sched_domain_span(sd), nodemask);
5514 +               sd->parent = p;
5515 +               if (p)
5516 +                       p->child = sd;
5517 +               cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
5518 +
5519 +#ifdef CONFIG_SCHED_MC
5520 +               p = sd;
5521 +               sd = &per_cpu(core_domains, i).sd;
5522 +               SD_INIT(sd, MC);
5523 +               set_domain_attribute(sd, attr);
5524 +               cpumask_and(sched_domain_span(sd), cpu_map,
5525 +                                                  cpu_coregroup_mask(i));
5526 +               sd->parent = p;
5527 +               p->child = sd;
5528 +               cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
5529 +#endif
5530 +
5531 +#ifdef CONFIG_SCHED_SMT
5532 +               p = sd;
5533 +               sd = &per_cpu(cpu_domains, i).sd;
5534 +               SD_INIT(sd, SIBLING);
5535 +               set_domain_attribute(sd, attr);
5536 +               cpumask_and(sched_domain_span(sd),
5537 +                           topology_thread_cpumask(i), cpu_map);
5538 +               sd->parent = p;
5539 +               p->child = sd;
5540 +               cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
5541 +#endif
5542 +       }
5543 +
5544 +#ifdef CONFIG_SCHED_SMT
5545 +       /* Set up CPU (sibling) groups */
5546 +       for_each_cpu(i, cpu_map) {
5547 +               cpumask_and(this_sibling_map,
5548 +                           topology_thread_cpumask(i), cpu_map);
5549 +               if (i != cpumask_first(this_sibling_map))
5550 +                       continue;
5551 +
5552 +               init_sched_build_groups(this_sibling_map, cpu_map,
5553 +                                       &cpu_to_cpu_group,
5554 +                                       send_covered, tmpmask);
5555 +       }
5556 +#endif
5557 +
5558 +#ifdef CONFIG_SCHED_MC
5559 +       /* Set up multi-core groups */
5560 +       for_each_cpu(i, cpu_map) {
5561 +               cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map);
5562 +               if (i != cpumask_first(this_core_map))
5563 +                       continue;
5564 +
5565 +               init_sched_build_groups(this_core_map, cpu_map,
5566 +                                       &cpu_to_core_group,
5567 +                                       send_covered, tmpmask);
5568 +       }
5569 +#endif
5570 +
5571 +       /* Set up physical groups */
5572 +       for (i = 0; i < nr_node_ids; i++) {
5573 +               cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
5574 +               if (cpumask_empty(nodemask))
5575 +                       continue;
5576 +
5577 +               init_sched_build_groups(nodemask, cpu_map,
5578 +                                       &cpu_to_phys_group,
5579 +                                       send_covered, tmpmask);
5580 +       }
5581 +
5582 +#ifdef CONFIG_NUMA
5583 +       /* Set up node groups */
5584 +       if (sd_allnodes) {
5585 +               init_sched_build_groups(cpu_map, cpu_map,
5586 +                                       &cpu_to_allnodes_group,
5587 +                                       send_covered, tmpmask);
5588 +       }
5589 +
5590 +       for (i = 0; i < nr_node_ids; i++) {
5591 +               /* Set up node groups */
5592 +               struct sched_group *sg, *prev;
5593 +               int j;
5594 +
5595 +               cpumask_clear(covered);
5596 +               cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
5597 +               if (cpumask_empty(nodemask)) {
5598 +                       sched_group_nodes[i] = NULL;
5599 +                       continue;
5600 +               }
5601 +
5602 +               sched_domain_node_span(i, domainspan);
5603 +               cpumask_and(domainspan, domainspan, cpu_map);
5604 +
5605 +               sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
5606 +                                 GFP_KERNEL, i);
5607 +               if (!sg) {
5608 +                       printk(KERN_WARNING "Can not alloc domain group for "
5609 +                               "node %d\n", i);
5610 +                       goto error;
5611 +               }
5612 +               sched_group_nodes[i] = sg;
5613 +               for_each_cpu(j, nodemask) {
5614 +                       struct sched_domain *sd;
5615 +
5616 +                       sd = &per_cpu(node_domains, j).sd;
5617 +                       sd->groups = sg;
5618 +               }
5619 +               sg->__cpu_power = 0;
5620 +               cpumask_copy(sched_group_cpus(sg), nodemask);
5621 +               sg->next = sg;
5622 +               cpumask_or(covered, covered, nodemask);
5623 +               prev = sg;
5624 +
5625 +               for (j = 0; j < nr_node_ids; j++) {
5626 +                       int n = (i + j) % nr_node_ids;
5627 +
5628 +                       cpumask_complement(notcovered, covered);
5629 +                       cpumask_and(tmpmask, notcovered, cpu_map);
5630 +                       cpumask_and(tmpmask, tmpmask, domainspan);
5631 +                       if (cpumask_empty(tmpmask))
5632 +                               break;
5633 +
5634 +                       cpumask_and(tmpmask, tmpmask, cpumask_of_node(n));
5635 +                       if (cpumask_empty(tmpmask))
5636 +                               continue;
5637 +
5638 +                       sg = kmalloc_node(sizeof(struct sched_group) +
5639 +                                         cpumask_size(),
5640 +                                         GFP_KERNEL, i);
5641 +                       if (!sg) {
5642 +                               printk(KERN_WARNING
5643 +                               "Can not alloc domain group for node %d\n", j);
5644 +                               goto error;
5645 +                       }
5646 +                       sg->__cpu_power = 0;
5647 +                       cpumask_copy(sched_group_cpus(sg), tmpmask);
5648 +                       sg->next = prev->next;
5649 +                       cpumask_or(covered, covered, tmpmask);
5650 +                       prev->next = sg;
5651 +                       prev = sg;
5652 +               }
5653 +       }
5654 +#endif
5655 +
5656 +       /* Calculate CPU power for physical packages and nodes */
5657 +#ifdef CONFIG_SCHED_SMT
5658 +       for_each_cpu(i, cpu_map) {
5659 +               struct sched_domain *sd = &per_cpu(cpu_domains, i).sd;
5660 +
5661 +               init_sched_groups_power(i, sd);
5662 +       }
5663 +#endif
5664 +#ifdef CONFIG_SCHED_MC
5665 +       for_each_cpu(i, cpu_map) {
5666 +               struct sched_domain *sd = &per_cpu(core_domains, i).sd;
5667 +
5668 +               init_sched_groups_power(i, sd);
5669 +       }
5670 +#endif
5671 +
5672 +       for_each_cpu(i, cpu_map) {
5673 +               struct sched_domain *sd = &per_cpu(phys_domains, i).sd;
5674 +
5675 +               init_sched_groups_power(i, sd);
5676 +       }
5677 +
5678 +#ifdef CONFIG_NUMA
5679 +       for (i = 0; i < nr_node_ids; i++)
5680 +               init_numa_sched_groups_power(sched_group_nodes[i]);
5681 +
5682 +       if (sd_allnodes) {
5683 +               struct sched_group *sg;
5684 +
5685 +               cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
5686 +                                                               tmpmask);
5687 +               init_numa_sched_groups_power(sg);
5688 +       }
5689 +#endif
5690 +
5691 +       /* Attach the domains */
5692 +       for_each_cpu(i, cpu_map) {
5693 +               struct sched_domain *sd;
5694 +#ifdef CONFIG_SCHED_SMT
5695 +               sd = &per_cpu(cpu_domains, i).sd;
5696 +#elif defined(CONFIG_SCHED_MC)
5697 +               sd = &per_cpu(core_domains, i).sd;
5698 +#else
5699 +               sd = &per_cpu(phys_domains, i).sd;
5700 +#endif
5701 +               cpu_attach_domain(sd, rd, i);
5702 +       }
5703 +
5704 +       err = 0;
5705 +
5706 +free_tmpmask:
5707 +       free_cpumask_var(tmpmask);
5708 +free_send_covered:
5709 +       free_cpumask_var(send_covered);
5710 +free_this_core_map:
5711 +       free_cpumask_var(this_core_map);
5712 +free_this_sibling_map:
5713 +       free_cpumask_var(this_sibling_map);
5714 +free_nodemask:
5715 +       free_cpumask_var(nodemask);
5716 +free_notcovered:
5717 +#ifdef CONFIG_NUMA
5718 +       free_cpumask_var(notcovered);
5719 +free_covered:
5720 +       free_cpumask_var(covered);
5721 +free_domainspan:
5722 +       free_cpumask_var(domainspan);
5723 +out:
5724 +#endif
5725 +       return err;
5726 +
5727 +free_sched_groups:
5728 +#ifdef CONFIG_NUMA
5729 +       kfree(sched_group_nodes);
5730 +#endif
5731 +       goto free_tmpmask;
5732 +
5733 +#ifdef CONFIG_NUMA
5734 +error:
5735 +       free_sched_groups(cpu_map, tmpmask);
5736 +       free_rootdomain(rd);
5737 +       goto free_tmpmask;
5738 +#endif
5739 +}
5740 +
5741 +static int build_sched_domains(const struct cpumask *cpu_map)
5742 +{
5743 +       return __build_sched_domains(cpu_map, NULL);
5744 +}
5745 +
5746 +static struct cpumask *doms_cur;       /* current sched domains */
5747 +static int ndoms_cur;          /* number of sched domains in 'doms_cur' */
5748 +static struct sched_domain_attr *dattr_cur;
5749 +                               /* attribues of custom domains in 'doms_cur' */
5750 +
5751 +/*
5752 + * Special case: If a kmalloc of a doms_cur partition (array of
5753 + * cpumask) fails, then fallback to a single sched domain,
5754 + * as determined by the single cpumask fallback_doms.
5755 + */
5756 +static cpumask_var_t fallback_doms;
5757 +
5758 +/*
5759 + * arch_update_cpu_topology lets virtualized architectures update the
5760 + * cpu core maps. It is supposed to return 1 if the topology changed
5761 + * or 0 if it stayed the same.
5762 + */
5763 +int __attribute__((weak)) arch_update_cpu_topology(void)
5764 +{
5765 +       return 0;
5766 +}
5767 +
5768 +/*
5769 + * Set up scheduler domains and groups. Callers must hold the hotplug lock.
5770 + * For now this just excludes isolated cpus, but could be used to
5771 + * exclude other special cases in the future.
5772 + */
5773 +static int arch_init_sched_domains(const struct cpumask *cpu_map)
5774 +{
5775 +       int err;
5776 +
5777 +       arch_update_cpu_topology();
5778 +       ndoms_cur = 1;
5779 +       doms_cur = kmalloc(cpumask_size(), GFP_KERNEL);
5780 +       if (!doms_cur)
5781 +               doms_cur = fallback_doms;
5782 +       cpumask_andnot(doms_cur, cpu_map, cpu_isolated_map);
5783 +       dattr_cur = NULL;
5784 +       err = build_sched_domains(doms_cur);
5785 +       register_sched_domain_sysctl();
5786 +
5787 +       return err;
5788 +}
5789 +
5790 +static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
5791 +                                      struct cpumask *tmpmask)
5792 +{
5793 +       free_sched_groups(cpu_map, tmpmask);
5794 +}
5795 +
5796 +/*
5797 + * Detach sched domains from a group of cpus specified in cpu_map
5798 + * These cpus will now be attached to the NULL domain
5799 + */
5800 +static void detach_destroy_domains(const struct cpumask *cpu_map)
5801 +{
5802 +       /* Save because hotplug lock held. */
5803 +       static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
5804 +       int i;
5805 +
5806 +       for_each_cpu(i, cpu_map)
5807 +               cpu_attach_domain(NULL, &def_root_domain, i);
5808 +       synchronize_sched();
5809 +       arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
5810 +}
5811 +
5812 +/* handle null as "default" */
5813 +static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
5814 +                       struct sched_domain_attr *new, int idx_new)
5815 +{
5816 +       struct sched_domain_attr tmp;
5817 +
5818 +       /* fast path */
5819 +       if (!new && !cur)
5820 +               return 1;
5821 +
5822 +       tmp = SD_ATTR_INIT;
5823 +       return !memcmp(cur ? (cur + idx_cur) : &tmp,
5824 +                       new ? (new + idx_new) : &tmp,
5825 +                       sizeof(struct sched_domain_attr));
5826 +}
5827 +
5828 +/*
5829 + * Partition sched domains as specified by the 'ndoms_new'
5830 + * cpumasks in the array doms_new[] of cpumasks. This compares
5831 + * doms_new[] to the current sched domain partitioning, doms_cur[].
5832 + * It destroys each deleted domain and builds each new domain.
5833 + *
5834 + * 'doms_new' is an array of cpumask's of length 'ndoms_new'.
5835 + * The masks don't intersect (don't overlap.) We should setup one
5836 + * sched domain for each mask. CPUs not in any of the cpumasks will
5837 + * not be load balanced. If the same cpumask appears both in the
5838 + * current 'doms_cur' domains and in the new 'doms_new', we can leave
5839 + * it as it is.
5840 + *
5841 + * The passed in 'doms_new' should be kmalloc'd. This routine takes
5842 + * ownership of it and will kfree it when done with it. If the caller
5843 + * failed the kmalloc call, then it can pass in doms_new == NULL &&
5844 + * ndoms_new == 1, and partition_sched_domains() will fallback to
5845 + * the single partition 'fallback_doms', it also forces the domains
5846 + * to be rebuilt.
5847 + *
5848 + * If doms_new == NULL it will be replaced with cpu_online_mask.
5849 + * ndoms_new == 0 is a special case for destroying existing domains,
5850 + * and it will not create the default domain.
5851 + *
5852 + * Call with hotplug lock held
5853 + */
5854 +/* FIXME: Change to struct cpumask *doms_new[] */
5855 +void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
5856 +                            struct sched_domain_attr *dattr_new)
5857 +{
5858 +       int i, j, n;
5859 +       int new_topology;
5860 +
5861 +       mutex_lock(&sched_domains_mutex);
5862 +
5863 +       /* always unregister in case we don't destroy any domains */
5864 +       unregister_sched_domain_sysctl();
5865 +
5866 +       /* Let architecture update cpu core mappings. */
5867 +       new_topology = arch_update_cpu_topology();
5868 +
5869 +       n = doms_new ? ndoms_new : 0;
5870 +
5871 +       /* Destroy deleted domains */
5872 +       for (i = 0; i < ndoms_cur; i++) {
5873 +               for (j = 0; j < n && !new_topology; j++) {
5874 +                       if (cpumask_equal(&doms_cur[i], &doms_new[j])
5875 +                           && dattrs_equal(dattr_cur, i, dattr_new, j))
5876 +                               goto match1;
5877 +               }
5878 +               /* no match - a current sched domain not in new doms_new[] */
5879 +               detach_destroy_domains(doms_cur + i);
5880 +match1:
5881 +               ;
5882 +       }
5883 +
5884 +       if (doms_new == NULL) {
5885 +               ndoms_cur = 0;
5886 +               doms_new = fallback_doms;
5887 +               cpumask_andnot(&doms_new[0], cpu_online_mask, cpu_isolated_map);
5888 +               WARN_ON_ONCE(dattr_new);
5889 +       }
5890 +
5891 +       /* Build new domains */
5892 +       for (i = 0; i < ndoms_new; i++) {
5893 +               for (j = 0; j < ndoms_cur && !new_topology; j++) {
5894 +                       if (cpumask_equal(&doms_new[i], &doms_cur[j])
5895 +                           && dattrs_equal(dattr_new, i, dattr_cur, j))
5896 +                               goto match2;
5897 +               }
5898 +               /* no match - add a new doms_new */
5899 +               __build_sched_domains(doms_new + i,
5900 +                                       dattr_new ? dattr_new + i : NULL);
5901 +match2:
5902 +               ;
5903 +       }
5904 +
5905 +       /* Remember the new sched domains */
5906 +       if (doms_cur != fallback_doms)
5907 +               kfree(doms_cur);
5908 +       kfree(dattr_cur);       /* kfree(NULL) is safe */
5909 +       doms_cur = doms_new;
5910 +       dattr_cur = dattr_new;
5911 +       ndoms_cur = ndoms_new;
5912 +
5913 +       register_sched_domain_sysctl();
5914 +
5915 +       mutex_unlock(&sched_domains_mutex);
5916 +}
5917 +
5918 +#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
5919 +static void arch_reinit_sched_domains(void)
5920 +{
5921 +       get_online_cpus();
5922 +
5923 +       /* Destroy domains first to force the rebuild */
5924 +       partition_sched_domains(0, NULL, NULL);
5925 +
5926 +       rebuild_sched_domains();
5927 +       put_online_cpus();
5928 +}
5929 +
5930 +static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
5931 +{
5932 +       unsigned int level = 0;
5933 +
5934 +       if (sscanf(buf, "%u", &level) != 1)
5935 +               return -EINVAL;
5936 +
5937 +       /*
5938 +        * level is always be positive so don't check for
5939 +        * level < POWERSAVINGS_BALANCE_NONE which is 0
5940 +        * What happens on 0 or 1 byte write,
5941 +        * need to check for count as well?
5942 +        */
5943 +
5944 +       if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
5945 +               return -EINVAL;
5946 +
5947 +       if (smt)
5948 +               sched_smt_power_savings = level;
5949 +       else
5950 +               sched_mc_power_savings = level;
5951 +
5952 +       arch_reinit_sched_domains();
5953 +
5954 +       return count;
5955 +}
5956 +
5957 +#ifdef CONFIG_SCHED_MC
5958 +static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
5959 +                                          char *page)
5960 +{
5961 +       return sprintf(page, "%u\n", sched_mc_power_savings);
5962 +}
5963 +static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
5964 +                                           const char *buf, size_t count)
5965 +{
5966 +       return sched_power_savings_store(buf, count, 0);
5967 +}
5968 +static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
5969 +                        sched_mc_power_savings_show,
5970 +                        sched_mc_power_savings_store);
5971 +#endif
5972 +
5973 +#ifdef CONFIG_SCHED_SMT
5974 +static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
5975 +                                           char *page)
5976 +{
5977 +       return sprintf(page, "%u\n", sched_smt_power_savings);
5978 +}
5979 +static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
5980 +                                            const char *buf, size_t count)
5981 +{
5982 +       return sched_power_savings_store(buf, count, 1);
5983 +}
5984 +static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
5985 +                  sched_smt_power_savings_show,
5986 +                  sched_smt_power_savings_store);
5987 +#endif
5988 +
5989 +int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
5990 +{
5991 +       int err = 0;
5992 +
5993 +#ifdef CONFIG_SCHED_SMT
5994 +       if (smt_capable())
5995 +               err = sysfs_create_file(&cls->kset.kobj,
5996 +                                       &attr_sched_smt_power_savings.attr);
5997 +#endif
5998 +#ifdef CONFIG_SCHED_MC
5999 +       if (!err && mc_capable())
6000 +               err = sysfs_create_file(&cls->kset.kobj,
6001 +                                       &attr_sched_mc_power_savings.attr);
6002 +#endif
6003 +       return err;
6004 +}
6005 +#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
6006 +
6007 +#ifndef CONFIG_CPUSETS
6008 +/*
6009 + * Add online and remove offline CPUs from the scheduler domains.
6010 + * When cpusets are enabled they take over this function.
6011 + */
6012 +static int update_sched_domains(struct notifier_block *nfb,
6013 +                               unsigned long action, void *hcpu)
6014 +{
6015 +       switch (action) {
6016 +       case CPU_ONLINE:
6017 +       case CPU_ONLINE_FROZEN:
6018 +       case CPU_DEAD:
6019 +       case CPU_DEAD_FROZEN:
6020 +               partition_sched_domains(1, NULL, NULL);
6021 +               return NOTIFY_OK;
6022 +
6023 +       default:
6024 +               return NOTIFY_DONE;
6025 +       }
6026 +}
6027 +#endif
6028 +
6029 +static int update_runtime(struct notifier_block *nfb,
6030 +                               unsigned long action, void *hcpu)
6031 +{
6032 +       switch (action) {
6033 +       case CPU_DOWN_PREPARE:
6034 +       case CPU_DOWN_PREPARE_FROZEN:
6035 +               return NOTIFY_OK;
6036 +
6037 +       case CPU_DOWN_FAILED:
6038 +       case CPU_DOWN_FAILED_FROZEN:
6039 +       case CPU_ONLINE:
6040 +       case CPU_ONLINE_FROZEN:
6041 +               return NOTIFY_OK;
6042 +
6043 +       default:
6044 +               return NOTIFY_DONE;
6045 +       }
6046 +}
6047 +
6048 +void __init sched_init_smp(void)
6049 +{
6050 +       cpumask_var_t non_isolated_cpus;
6051 +
6052 +       alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
6053 +
6054 +#if defined(CONFIG_NUMA)
6055 +       sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
6056 +                                                               GFP_KERNEL);
6057 +       BUG_ON(sched_group_nodes_bycpu == NULL);
6058 +#endif
6059 +       get_online_cpus();
6060 +       mutex_lock(&sched_domains_mutex);
6061 +       arch_init_sched_domains(cpu_online_mask);
6062 +       cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
6063 +       if (cpumask_empty(non_isolated_cpus))
6064 +               cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
6065 +       mutex_unlock(&sched_domains_mutex);
6066 +       put_online_cpus();
6067 +
6068 +#ifndef CONFIG_CPUSETS
6069 +       /* XXX: Theoretical race here - CPU may be hotplugged now */
6070 +       hotcpu_notifier(update_sched_domains, 0);
6071 +#endif
6072 +
6073 +       /* RT runtime code needs to handle some hotplug events */
6074 +       hotcpu_notifier(update_runtime, 0);
6075 +
6076 +       /* Move init over to a non-isolated CPU */
6077 +       if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
6078 +               BUG();
6079 +       free_cpumask_var(non_isolated_cpus);
6080 +
6081 +       alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
6082 +
6083 +       /*
6084 +        * Assume that every added cpu gives us slightly less overall latency
6085 +        * allowing us to increase the base rr_interval, but in a non linear
6086 +        * fashion.
6087 +        */
6088 +       rr_interval *= 1 + ilog2(num_online_cpus());
6089 +}
6090 +#else
6091 +void __init sched_init_smp(void)
6092 +{
6093 +}
6094 +#endif /* CONFIG_SMP */
6095 +
6096 +unsigned int sysctl_timer_migration = 1;
6097 +
6098 +int in_sched_functions(unsigned long addr)
6099 +{
6100 +       return in_lock_functions(addr) ||
6101 +               (addr >= (unsigned long)__sched_text_start
6102 +               && addr < (unsigned long)__sched_text_end);
6103 +}
6104 +
6105 +void __init sched_init(void)
6106 +{
6107 +       int i;
6108 +       int highest_cpu = 0;
6109 +
6110 +       prio_ratios[0] = 100;
6111 +       for (i = 1 ; i < PRIO_RANGE ; i++)
6112 +               prio_ratios[i] = prio_ratios[i - 1] * 11 / 10;
6113 +
6114 +#ifdef CONFIG_SMP
6115 +       init_defrootdomain();
6116 +       cpus_clear(grq.cpu_idle_map);
6117 +#endif
6118 +       spin_lock_init(&grq.lock);
6119 +       for_each_possible_cpu(i) {
6120 +               struct rq *rq;
6121 +
6122 +               rq = cpu_rq(i);
6123 +               INIT_LIST_HEAD(&rq->queue);
6124 +               rq->rq_deadline = 0;
6125 +               rq->rq_prio = 0;
6126 +               rq->cpu = i;
6127 +               rq->user_pc = rq->nice_pc = rq->softirq_pc = rq->system_pc =
6128 +                             rq->iowait_pc = rq->idle_pc = 0;
6129 +#ifdef CONFIG_SMP
6130 +               rq->sd = NULL;
6131 +               rq->rd = NULL;
6132 +               rq->online = 0;
6133 +               INIT_LIST_HEAD(&rq->migration_queue);
6134 +               rq_attach_root(rq, &def_root_domain);
6135 +#endif
6136 +               atomic_set(&rq->nr_iowait, 0);
6137 +               highest_cpu = i;
6138 +       }
6139 +       grq.iso_ticks = grq.nr_running = grq.nr_uninterruptible = 0;
6140 +       for (i = 0; i < PRIO_LIMIT; i++)
6141 +               INIT_LIST_HEAD(grq.queue + i);
6142 +       bitmap_zero(grq.prio_bitmap, PRIO_LIMIT);
6143 +       /* delimiter for bitsearch */
6144 +       __set_bit(PRIO_LIMIT, grq.prio_bitmap);
6145 +
6146 +#ifdef CONFIG_SMP
6147 +       nr_cpu_ids = highest_cpu + 1;
6148 +#endif
6149 +
6150 +#ifdef CONFIG_PREEMPT_NOTIFIERS
6151 +       INIT_HLIST_HEAD(&init_task.preempt_notifiers);
6152 +#endif
6153 +
6154 +#ifdef CONFIG_RT_MUTEXES
6155 +       plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
6156 +#endif
6157 +
6158 +       /*
6159 +        * The boot idle thread does lazy MMU switching as well:
6160 +        */
6161 +       atomic_inc(&init_mm.mm_count);
6162 +       enter_lazy_tlb(&init_mm, current);
6163 +
6164 +       /*
6165 +        * Make us the idle thread. Technically, schedule() should not be
6166 +        * called from this thread, however somewhere below it might be,
6167 +        * but because we are the idle thread, we just pick up running again
6168 +        * when this runqueue becomes "idle".
6169 +        */
6170 +       init_idle(current, smp_processor_id());
6171 +
6172 +       /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
6173 +       alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
6174 +#ifdef CONFIG_SMP
6175 +#ifdef CONFIG_NO_HZ
6176 +       alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
6177 +       alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
6178 +#endif
6179 +       alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
6180 +#endif /* SMP */
6181 +       perf_counter_init();
6182 +}
6183 +
6184 +#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
6185 +void __might_sleep(char *file, int line)
6186 +{
6187 +#ifdef in_atomic
6188 +       static unsigned long prev_jiffy;        /* ratelimiting */
6189 +
6190 +       if ((in_atomic() || irqs_disabled()) &&
6191 +           system_state == SYSTEM_RUNNING && !oops_in_progress) {
6192 +               if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
6193 +                       return;
6194 +               prev_jiffy = jiffies;
6195 +               printk(KERN_ERR "BUG: sleeping function called from invalid"
6196 +                               " context at %s:%d\n", file, line);
6197 +               printk("in_atomic():%d, irqs_disabled():%d\n",
6198 +                       in_atomic(), irqs_disabled());
6199 +               debug_show_held_locks(current);
6200 +               if (irqs_disabled())
6201 +                       print_irqtrace_events(current);
6202 +               dump_stack();
6203 +       }
6204 +#endif
6205 +}
6206 +EXPORT_SYMBOL(__might_sleep);
6207 +#endif
6208 +
6209 +#ifdef CONFIG_MAGIC_SYSRQ
6210 +void normalize_rt_tasks(void)
6211 +{
6212 +       struct task_struct *g, *p;
6213 +       unsigned long flags;
6214 +       struct rq *rq;
6215 +       int queued;
6216 +
6217 +       read_lock_irq(&tasklist_lock);
6218 +
6219 +       do_each_thread(g, p) {
6220 +               if (!rt_task(p) && !iso_task(p))
6221 +                       continue;
6222 +
6223 +               spin_lock_irqsave(&p->pi_lock, flags);
6224 +               rq = __task_grq_lock(p);
6225 +               update_rq_clock(rq);
6226 +
6227 +               queued = task_queued_only(p);
6228 +               if (queued)
6229 +                       dequeue_task(p);
6230 +               __setscheduler(p, SCHED_NORMAL, 0);
6231 +               if (task_running(p))
6232 +                       resched_task(p);
6233 +               if (queued) {
6234 +                       enqueue_task(p);
6235 +                       try_preempt(p);
6236 +               }
6237 +
6238 +               __task_grq_unlock();
6239 +               spin_unlock_irqrestore(&p->pi_lock, flags);
6240 +       } while_each_thread(g, p);
6241 +
6242 +       read_unlock_irq(&tasklist_lock);
6243 +}
6244 +#endif /* CONFIG_MAGIC_SYSRQ */
6245 +
6246 +#ifdef CONFIG_IA64
6247 +/*
6248 + * These functions are only useful for the IA64 MCA handling.
6249 + *
6250 + * They can only be called when the whole system has been
6251 + * stopped - every CPU needs to be quiescent, and no scheduling
6252 + * activity can take place. Using them for anything else would
6253 + * be a serious bug, and as a result, they aren't even visible
6254 + * under any other configuration.
6255 + */
6256 +
6257 +/**
6258 + * curr_task - return the current task for a given cpu.
6259 + * @cpu: the processor in question.
6260 + *
6261 + * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6262 + */
6263 +struct task_struct *curr_task(int cpu)
6264 +{
6265 +       return cpu_curr(cpu);
6266 +}
6267 +
6268 +/**
6269 + * set_curr_task - set the current task for a given cpu.
6270 + * @cpu: the processor in question.
6271 + * @p: the task pointer to set.
6272 + *
6273 + * Description: This function must only be used when non-maskable interrupts
6274 + * are serviced on a separate stack.  It allows the architecture to switch the
6275 + * notion of the current task on a cpu in a non-blocking manner.  This function
6276 + * must be called with all CPU's synchronized, and interrupts disabled, the
6277 + * and caller must save the original value of the current task (see
6278 + * curr_task() above) and restore that value before reenabling interrupts and
6279 + * re-starting the system.
6280 + *
6281 + * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
6282 + */
6283 +void set_curr_task(int cpu, struct task_struct *p)
6284 +{
6285 +       cpu_curr(cpu) = p;
6286 +}
6287 +
6288 +#endif
6289 +
6290 +/*
6291 + * Use precise platform statistics if available:
6292 + */
6293 +#ifdef CONFIG_VIRT_CPU_ACCOUNTING
6294 +cputime_t task_utime(struct task_struct *p)
6295 +{
6296 +       return p->utime;
6297 +}
6298 +
6299 +cputime_t task_stime(struct task_struct *p)
6300 +{
6301 +       return p->stime;
6302 +}
6303 +#else
6304 +cputime_t task_utime(struct task_struct *p)
6305 +{
6306 +       clock_t utime = cputime_to_clock_t(p->utime),
6307 +               total = utime + cputime_to_clock_t(p->stime);
6308 +       u64 temp;
6309 +
6310 +       temp = (u64)nsec_to_clock_t(p->se.sum_exec_runtime);
6311 +
6312 +       if (total) {
6313 +               temp *= utime;
6314 +               do_div(temp, total);
6315 +       }
6316 +       utime = (clock_t)temp;
6317 +
6318 +       p->prev_utime = max(p->prev_utime, clock_t_to_cputime(utime));
6319 +       return p->prev_utime;
6320 +}
6321 +
6322 +cputime_t task_stime(struct task_struct *p)
6323 +{
6324 +       clock_t stime;
6325 +
6326 +       stime = nsec_to_clock_t(p->se.sum_exec_runtime) -
6327 +                       cputime_to_clock_t(task_utime(p));
6328 +
6329 +       if (stime >= 0)
6330 +               p->prev_stime = max(p->prev_stime, clock_t_to_cputime(stime));
6331 +
6332 +       return p->prev_stime;
6333 +}
6334 +#endif
6335 +
6336 +inline cputime_t task_gtime(struct task_struct *p)
6337 +{
6338 +       return p->gtime;
6339 +}
6340 +
6341 +void __cpuinit init_idle_bootup_task(struct task_struct *idle)
6342 +{}
6343 +
6344 +#ifdef CONFIG_SCHED_DEBUG
6345 +void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
6346 +{}
6347 +
6348 +void proc_sched_set_task(struct task_struct *p)
6349 +{}
6350 +#endif
6351 --- a/kernel/sysctl.c
6352 +++ b/kernel/sysctl.c
6353 @@ -86,6 +86,8 @@ extern int percpu_pagelist_fraction;
6354  extern int compat_log;
6355  extern int latencytop_enabled;
6356  extern int sysctl_nr_open_min, sysctl_nr_open_max;
6357 +extern int rr_interval;
6358 +extern int sched_iso_cpu;
6359  #ifndef CONFIG_MMU
6360  extern int sysctl_nr_trim_pages;
6361  #endif
6362 @@ -103,7 +105,8 @@ static int zero;
6363  static int __maybe_unused one = 1;
6364  static int __maybe_unused two = 2;
6365  static unsigned long one_ul = 1;
6366 -static int one_hundred = 100;
6367 +static int __read_mostly one_hundred = 100;
6368 +static int __maybe_unused __read_mostly five_thousand = 5000;
6369  
6370  /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
6371  static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
6372 @@ -238,7 +241,7 @@ static struct ctl_table root_table[] = {
6373         { .ctl_name = 0 }
6374  };
6375  
6376 -#ifdef CONFIG_SCHED_DEBUG
6377 +#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SCHED_CFS)
6378  static int min_sched_granularity_ns = 100000;          /* 100 usecs */
6379  static int max_sched_granularity_ns = NSEC_PER_SEC;    /* 1 second */
6380  static int min_wakeup_granularity_ns;                  /* 0 usecs */
6381 @@ -246,7 +249,7 @@ static int max_wakeup_granularity_ns = N
6382  #endif
6383  
6384  static struct ctl_table kern_table[] = {
6385 -#ifdef CONFIG_SCHED_DEBUG
6386 +#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SCHED_CFS)
6387         {
6388                 .ctl_name       = CTL_UNNUMBERED,
6389                 .procname       = "sched_min_granularity_ns",
6390 @@ -342,6 +345,7 @@ static struct ctl_table kern_table[] = {
6391                 .extra2         = &one,
6392         },
6393  #endif
6394 +#ifdef CONFIG_SCHED_CFS
6395         {
6396                 .ctl_name       = CTL_UNNUMBERED,
6397                 .procname       = "sched_rt_period_us",
6398 @@ -366,6 +370,7 @@ static struct ctl_table kern_table[] = {
6399                 .mode           = 0644,
6400                 .proc_handler   = &proc_dointvec,
6401         },
6402 +#endif
6403  #ifdef CONFIG_PROVE_LOCKING
6404         {
6405                 .ctl_name       = CTL_UNNUMBERED,
6406 @@ -798,6 +803,30 @@ static struct ctl_table kern_table[] = {
6407                 .proc_handler   = &proc_dointvec,
6408         },
6409  #endif
6410 +#ifdef CONFIG_SCHED_BFS
6411 +       {
6412 +               .ctl_name       = CTL_UNNUMBERED,
6413 +               .procname       = "rr_interval",
6414 +               .data           = &rr_interval,
6415 +               .maxlen         = sizeof (int),
6416 +               .mode           = 0644,
6417 +               .proc_handler   = &proc_dointvec_minmax,
6418 +               .strategy       = &sysctl_intvec,
6419 +               .extra1         = &one,
6420 +               .extra2         = &five_thousand,
6421 +       },
6422 +       {
6423 +               .ctl_name       = CTL_UNNUMBERED,
6424 +               .procname       = "iso_cpu",
6425 +               .data           = &sched_iso_cpu,
6426 +               .maxlen         = sizeof (int),
6427 +               .mode           = 0644,
6428 +               .proc_handler   = &proc_dointvec_minmax,
6429 +               .strategy       = &sysctl_intvec,
6430 +               .extra1         = &zero,
6431 +               .extra2         = &one_hundred,
6432 +       },
6433 +#endif
6434  #if defined(CONFIG_S390) && defined(CONFIG_SMP)
6435         {
6436                 .ctl_name       = KERN_SPIN_RETRY,
6437 --- a/kernel/workqueue.c
6438 +++ b/kernel/workqueue.c
6439 @@ -317,7 +317,9 @@ static int worker_thread(void *__cwq)
6440         if (cwq->wq->freezeable)
6441                 set_freezable();
6442  
6443 +#ifdef CONFIG_SCHED_CFS
6444         set_user_nice(current, -5);
6445 +#endif
6446  
6447         for (;;) {
6448                 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);