sched.c 173 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
/*
 *  kernel/sched.c
 *
 *  Kernel scheduler and related syscalls
 *
 *  Copyright (C) 1991-2002  Linus Torvalds
 *
 *  1996-12-23  Modified by Dave Grothe to fix bugs in semaphores and
 *		make semaphores SMP safe
 *  1998-11-19	Implemented schedule_timeout() and related stuff
 *		by Andrea Arcangeli
 *  2002-01-04	New ultra-scalable O(1) scheduler by Ingo Molnar:
 *		hybrid priority-list and round-robin design with
 *		an array-switch method of distributing timeslices
 *		and per-CPU runqueues.  Cleanups and useful suggestions
 *		by Davide Libenzi, preemptible kernel bits by Robert Love.
 *  2003-09-03	Interactivity tuning by Con Kolivas.
 *  2004-04-02	Scheduler domains code by Nick Piggin
 */

#include <linux/mm.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <linux/highmem.h>
#include <linux/smp_lock.h>
#include <asm/mmu_context.h>
#include <linux/interrupt.h>
30
#include <linux/capability.h>
Linus Torvalds's avatar
Linus Torvalds committed
31
32
#include <linux/completion.h>
#include <linux/kernel_stat.h>
33
#include <linux/debug_locks.h>
Linus Torvalds's avatar
Linus Torvalds committed
34
35
36
37
#include <linux/security.h>
#include <linux/notifier.h>
#include <linux/profile.h>
#include <linux/suspend.h>
38
#include <linux/vmalloc.h>
Linus Torvalds's avatar
Linus Torvalds committed
39
40
41
42
43
44
45
46
47
48
49
50
51
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/timer.h>
#include <linux/rcupdate.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/percpu.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/times.h>
52
#include <linux/tsacct_kern.h>
53
#include <linux/kprobes.h>
54
#include <linux/delayacct.h>
Linus Torvalds's avatar
Linus Torvalds committed
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
#include <asm/tlb.h>

#include <asm/unistd.h>

/*
 * Convert user-nice values [ -20 ... 0 ... 19 ]
 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
 * and back.
 */
#define NICE_TO_PRIO(nice)	(MAX_RT_PRIO + (nice) + 20)
#define PRIO_TO_NICE(prio)	((prio) - MAX_RT_PRIO - 20)
#define TASK_NICE(p)		PRIO_TO_NICE((p)->static_prio)

/*
 * 'User priority' is the nice value converted to something we
 * can work with better when scaling various scheduler parameters,
 * it's a [ 0 ... 39 ] range.
 */
#define USER_PRIO(p)		((p)-MAX_RT_PRIO)
#define TASK_USER_PRIO(p)	USER_PRIO((p)->static_prio)
#define MAX_USER_PRIO		(USER_PRIO(MAX_PRIO))

/*
 * Some helpers for converting nanosecond timing to jiffy resolution
 */
#define NS_TO_JIFFIES(TIME)	((TIME) / (1000000000 / HZ))
#define JIFFIES_TO_NS(TIME)	((TIME) * (1000000000 / HZ))

/*
 * These are the 'tuning knobs' of the scheduler:
 *
 * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
 * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
 * Timeslices get refilled after they expire.
 */
#define MIN_TIMESLICE		max(5 * HZ / 1000, 1)
#define DEF_TIMESLICE		(100 * HZ / 1000)
#define ON_RUNQUEUE_WEIGHT	 30
#define CHILD_PENALTY		 95
#define PARENT_PENALTY		100
#define EXIT_WEIGHT		  3
#define PRIO_BONUS_RATIO	 25
#define MAX_BONUS		(MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
#define INTERACTIVE_DELTA	  2
#define MAX_SLEEP_AVG		(DEF_TIMESLICE * MAX_BONUS)
#define STARVATION_LIMIT	(MAX_SLEEP_AVG)
#define NS_MAX_SLEEP_AVG	(JIFFIES_TO_NS(MAX_SLEEP_AVG))

/*
 * If a task is 'interactive' then we reinsert it in the active
 * array after it has expired its current timeslice. (it will not
 * continue to run immediately, it will still roundrobin with
 * other interactive tasks.)
 *
 * This part scales the interactivity limit depending on niceness.
 *
 * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
 * Here are a few examples of different nice levels:
 *
 *  TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
 *  TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
 *  TASK_INTERACTIVE(  0): [1,1,1,1,0,0,0,0,0,0,0]
 *  TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
 *  TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
 *
 * (the X axis represents the possible -5 ... 0 ... +5 dynamic
 *  priority range a task can explore, a value of '1' means the
 *  task is rated interactive.)
 *
 * Ie. nice +19 tasks can never get 'interactive' enough to be
 * reinserted into the active array. And only heavily CPU-hog nice -20
 * tasks will be expired. Default nice 0 tasks are somewhere between,
 * it takes some effort for them to get interactive, but it's not
 * too hard.
 */

#define CURRENT_BONUS(p) \
	(NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
		MAX_SLEEP_AVG)

#define GRANULARITY	(10 * HZ / 1000 ? : 1)

#ifdef CONFIG_SMP
#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
			num_online_cpus())
#else
#define TIMESLICE_GRANULARITY(p)	(GRANULARITY * \
		(1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
#endif

#define SCALE(v1,v1_max,v2_max) \
	(v1) * (v2_max) / (v1_max)

#define DELTA(p) \
150
151
	(SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \
		INTERACTIVE_DELTA)
Linus Torvalds's avatar
Linus Torvalds committed
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172

#define TASK_INTERACTIVE(p) \
	((p)->prio <= (p)->static_prio - DELTA(p))

#define INTERACTIVE_SLEEP(p) \
	(JIFFIES_TO_NS(MAX_SLEEP_AVG * \
		(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))

#define TASK_PREEMPTS_CURR(p, rq) \
	((p)->prio < (rq)->curr->prio)

/*
 * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
 * to time slice values: [800ms ... 100ms ... 5ms]
 *
 * The higher a thread's priority, the bigger timeslices
 * it gets during one round of execution. But even the lowest
 * priority thread gets MIN_TIMESLICE worth of execution time.
 */

#define SCALE_PRIO(x, prio) \
173
	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
Linus Torvalds's avatar
Linus Torvalds committed
174

175
static unsigned int static_prio_timeslice(int static_prio)
Linus Torvalds's avatar
Linus Torvalds committed
176
{
177
178
	if (static_prio < NICE_TO_PRIO(0))
		return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
Linus Torvalds's avatar
Linus Torvalds committed
179
	else
180
		return SCALE_PRIO(DEF_TIMESLICE, static_prio);
Linus Torvalds's avatar
Linus Torvalds committed
181
}
182

183
static inline unsigned int task_timeslice(struct task_struct *p)
184
185
186
187
{
	return static_prio_timeslice(p->static_prio);
}

Linus Torvalds's avatar
Linus Torvalds committed
188
189
190
191
192
193
/*
 * These are the runqueue data structures:
 */

struct prio_array {
	unsigned int nr_active;
194
	DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
Linus Torvalds's avatar
Linus Torvalds committed
195
196
197
198
199
200
201
202
203
204
	struct list_head queue[MAX_PRIO];
};

/*
 * This is the main, per-CPU runqueue data structure.
 *
 * Locking rule: those places that want to lock multiple runqueues
 * (such as the load balancing or the thread migration code), lock
 * acquire operations must be ordered by ascending &runqueue.
 */
205
struct rq {
Linus Torvalds's avatar
Linus Torvalds committed
206
207
208
209
210
211
212
	spinlock_t lock;

	/*
	 * nr_running and cpu_load should be in the same cacheline because
	 * remote CPUs use both these fields when doing load calculation.
	 */
	unsigned long nr_running;
213
	unsigned long raw_weighted_load;
Linus Torvalds's avatar
Linus Torvalds committed
214
#ifdef CONFIG_SMP
Nick Piggin's avatar
Nick Piggin committed
215
	unsigned long cpu_load[3];
Linus Torvalds's avatar
Linus Torvalds committed
216
217
218
219
220
221
222
223
224
225
226
227
228
#endif
	unsigned long long nr_switches;

	/*
	 * This is part of a global counter where only the total sum
	 * over all CPUs matters. A task can increase this counter on
	 * one CPU and if it got migrated afterwards it may decrease
	 * it on another CPU. Always updated under the runqueue lock:
	 */
	unsigned long nr_uninterruptible;

	unsigned long expired_timestamp;
	unsigned long long timestamp_last_tick;
229
	struct task_struct *curr, *idle;
Linus Torvalds's avatar
Linus Torvalds committed
230
	struct mm_struct *prev_mm;
231
	struct prio_array *active, *expired, arrays[2];
Linus Torvalds's avatar
Linus Torvalds committed
232
233
234
235
236
237
238
239
240
	int best_expired_prio;
	atomic_t nr_iowait;

#ifdef CONFIG_SMP
	struct sched_domain *sd;

	/* For active balancing */
	int active_balance;
	int push_cpu;
241
	int cpu;		/* cpu of this runqueue */
Linus Torvalds's avatar
Linus Torvalds committed
242

243
	struct task_struct *migration_thread;
Linus Torvalds's avatar
Linus Torvalds committed
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
	struct list_head migration_queue;
#endif

#ifdef CONFIG_SCHEDSTATS
	/* latency stats */
	struct sched_info rq_sched_info;

	/* sys_sched_yield() stats */
	unsigned long yld_exp_empty;
	unsigned long yld_act_empty;
	unsigned long yld_both_empty;
	unsigned long yld_cnt;

	/* schedule() stats */
	unsigned long sched_switch;
	unsigned long sched_cnt;
	unsigned long sched_goidle;

	/* try_to_wake_up() stats */
	unsigned long ttwu_cnt;
	unsigned long ttwu_local;
#endif
266
	struct lock_class_key rq_lock_key;
Linus Torvalds's avatar
Linus Torvalds committed
267
268
};

269
static DEFINE_PER_CPU(struct rq, runqueues);
Linus Torvalds's avatar
Linus Torvalds committed
270

271
272
273
274
275
276
277
278
279
static inline int cpu_of(struct rq *rq)
{
#ifdef CONFIG_SMP
	return rq->cpu;
#else
	return 0;
#endif
}

Nick Piggin's avatar
Nick Piggin committed
280
281
/*
 * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
282
 * See detach_destroy_domains: synchronize_sched for details.
Nick Piggin's avatar
Nick Piggin committed
283
284
285
286
 *
 * The domain tree of any CPU may only be accessed from within
 * preempt-disabled sections.
 */
287
288
#define for_each_domain(cpu, __sd) \
	for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
Linus Torvalds's avatar
Linus Torvalds committed
289
290
291
292
293
294
295

#define cpu_rq(cpu)		(&per_cpu(runqueues, (cpu)))
#define this_rq()		(&__get_cpu_var(runqueues))
#define task_rq(p)		cpu_rq(task_cpu(p))
#define cpu_curr(cpu)		(cpu_rq(cpu)->curr)

#ifndef prepare_arch_switch
296
297
298
299
300
301
302
# define prepare_arch_switch(next)	do { } while (0)
#endif
#ifndef finish_arch_switch
# define finish_arch_switch(prev)	do { } while (0)
#endif

#ifndef __ARCH_WANT_UNLOCKED_CTXSW
303
static inline int task_running(struct rq *rq, struct task_struct *p)
304
305
306
307
{
	return rq->curr == p;
}

308
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
309
310
311
{
}

312
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
313
{
314
315
316
317
#ifdef CONFIG_DEBUG_SPINLOCK
	/* this is a valid case when another task releases the spinlock */
	rq->lock.owner = current;
#endif
318
319
320
321
322
323
324
	/*
	 * If we are tracking spinlock dependencies then we have to
	 * fix up the runqueue lock - which gets 'carried over' from
	 * prev into current:
	 */
	spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);

325
326
327
328
	spin_unlock_irq(&rq->lock);
}

#else /* __ARCH_WANT_UNLOCKED_CTXSW */
329
static inline int task_running(struct rq *rq, struct task_struct *p)
330
331
332
333
334
335
336
337
{
#ifdef CONFIG_SMP
	return p->oncpu;
#else
	return rq->curr == p;
#endif
}

338
static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
{
#ifdef CONFIG_SMP
	/*
	 * We can optimise this out completely for !SMP, because the
	 * SMP rebalancing from interrupt is the only thing that cares
	 * here.
	 */
	next->oncpu = 1;
#endif
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	spin_unlock_irq(&rq->lock);
#else
	spin_unlock(&rq->lock);
#endif
}

355
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
356
357
358
359
360
361
362
363
364
365
366
367
{
#ifdef CONFIG_SMP
	/*
	 * After ->oncpu is cleared, the task can be moved to a different CPU.
	 * We must ensure this doesn't happen until the switch is completely
	 * finished.
	 */
	smp_wmb();
	prev->oncpu = 0;
#endif
#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	local_irq_enable();
Linus Torvalds's avatar
Linus Torvalds committed
368
#endif
369
370
}
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
Linus Torvalds's avatar
Linus Torvalds committed
371

372
373
374
375
/*
 * __task_rq_lock - lock the runqueue a given task resides on.
 * Must be called interrupts disabled.
 */
376
static inline struct rq *__task_rq_lock(struct task_struct *p)
377
378
	__acquires(rq->lock)
{
379
	struct rq *rq;
380
381
382
383
384
385
386
387
388
389
390

repeat_lock_task:
	rq = task_rq(p);
	spin_lock(&rq->lock);
	if (unlikely(rq != task_rq(p))) {
		spin_unlock(&rq->lock);
		goto repeat_lock_task;
	}
	return rq;
}

Linus Torvalds's avatar
Linus Torvalds committed
391
392
393
394
395
/*
 * task_rq_lock - lock the runqueue a given task resides on and disable
 * interrupts.  Note the ordering: we can safely lookup the task_rq without
 * explicitly disabling preemption.
 */
396
static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
Linus Torvalds's avatar
Linus Torvalds committed
397
398
	__acquires(rq->lock)
{
399
	struct rq *rq;
Linus Torvalds's avatar
Linus Torvalds committed
400
401
402
403
404
405
406
407
408
409
410
411

repeat_lock_task:
	local_irq_save(*flags);
	rq = task_rq(p);
	spin_lock(&rq->lock);
	if (unlikely(rq != task_rq(p))) {
		spin_unlock_irqrestore(&rq->lock, *flags);
		goto repeat_lock_task;
	}
	return rq;
}

412
static inline void __task_rq_unlock(struct rq *rq)
413
414
415
416
417
	__releases(rq->lock)
{
	spin_unlock(&rq->lock);
}

418
static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
Linus Torvalds's avatar
Linus Torvalds committed
419
420
421
422
423
424
425
426
427
428
	__releases(rq->lock)
{
	spin_unlock_irqrestore(&rq->lock, *flags);
}

#ifdef CONFIG_SCHEDSTATS
/*
 * bump this up when changing the output format or the meaning of an existing
 * format, so that tools can adapt (or abort)
 */
429
#define SCHEDSTAT_VERSION 12
Linus Torvalds's avatar
Linus Torvalds committed
430
431
432
433
434
435
436
437

static int show_schedstat(struct seq_file *seq, void *v)
{
	int cpu;

	seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
	seq_printf(seq, "timestamp %lu\n", jiffies);
	for_each_online_cpu(cpu) {
438
		struct rq *rq = cpu_rq(cpu);
Linus Torvalds's avatar
Linus Torvalds committed
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
#ifdef CONFIG_SMP
		struct sched_domain *sd;
		int dcnt = 0;
#endif

		/* runqueue-specific stats */
		seq_printf(seq,
		    "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
		    cpu, rq->yld_both_empty,
		    rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
		    rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
		    rq->ttwu_cnt, rq->ttwu_local,
		    rq->rq_sched_info.cpu_time,
		    rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);

		seq_printf(seq, "\n");

#ifdef CONFIG_SMP
		/* domain-specific stats */
Nick Piggin's avatar
Nick Piggin committed
458
		preempt_disable();
Linus Torvalds's avatar
Linus Torvalds committed
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
		for_each_domain(cpu, sd) {
			enum idle_type itype;
			char mask_str[NR_CPUS];

			cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
			seq_printf(seq, "domain%d %s", dcnt++, mask_str);
			for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
					itype++) {
				seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu",
				    sd->lb_cnt[itype],
				    sd->lb_balanced[itype],
				    sd->lb_failed[itype],
				    sd->lb_imbalance[itype],
				    sd->lb_gained[itype],
				    sd->lb_hot_gained[itype],
				    sd->lb_nobusyq[itype],
				    sd->lb_nobusyg[itype]);
			}
477
			seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
Linus Torvalds's avatar
Linus Torvalds committed
478
			    sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
479
480
			    sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
			    sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
Linus Torvalds's avatar
Linus Torvalds committed
481
482
			    sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance);
		}
Nick Piggin's avatar
Nick Piggin committed
483
		preempt_enable();
Linus Torvalds's avatar
Linus Torvalds committed
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
#endif
	}
	return 0;
}

static int schedstat_open(struct inode *inode, struct file *file)
{
	unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
	char *buf = kmalloc(size, GFP_KERNEL);
	struct seq_file *m;
	int res;

	if (!buf)
		return -ENOMEM;
	res = single_open(file, show_schedstat, NULL);
	if (!res) {
		m = file->private_data;
		m->buf = buf;
		m->size = size;
	} else
		kfree(buf);
	return res;
}

struct file_operations proc_schedstat_operations = {
	.open    = schedstat_open,
	.read    = seq_read,
	.llseek  = seq_lseek,
	.release = single_release,
};

515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
/*
 * Expects runqueue lock to be held for atomicity of update
 */
static inline void
rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
{
	if (rq) {
		rq->rq_sched_info.run_delay += delta_jiffies;
		rq->rq_sched_info.pcnt++;
	}
}

/*
 * Expects runqueue lock to be held for atomicity of update
 */
static inline void
rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
{
	if (rq)
		rq->rq_sched_info.cpu_time += delta_jiffies;
}
Linus Torvalds's avatar
Linus Torvalds committed
536
537
538
# define schedstat_inc(rq, field)	do { (rq)->field++; } while (0)
# define schedstat_add(rq, field, amt)	do { (rq)->field += (amt); } while (0)
#else /* !CONFIG_SCHEDSTATS */
539
540
541
542
543
544
static inline void
rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
{}
static inline void
rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
{}
Linus Torvalds's avatar
Linus Torvalds committed
545
546
547
548
549
550
551
# define schedstat_inc(rq, field)	do { } while (0)
# define schedstat_add(rq, field, amt)	do { } while (0)
#endif

/*
 * rq_lock - lock a given runqueue and disable interrupts.
 */
552
static inline struct rq *this_rq_lock(void)
Linus Torvalds's avatar
Linus Torvalds committed
553
554
	__acquires(rq->lock)
{
555
	struct rq *rq;
Linus Torvalds's avatar
Linus Torvalds committed
556
557
558
559
560
561
562
563

	local_irq_disable();
	rq = this_rq();
	spin_lock(&rq->lock);

	return rq;
}

564
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Linus Torvalds's avatar
Linus Torvalds committed
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
/*
 * Called when a process is dequeued from the active array and given
 * the cpu.  We should note that with the exception of interactive
 * tasks, the expired queue will become the active queue after the active
 * queue is empty, without explicitly dequeuing and requeuing tasks in the
 * expired queue.  (Interactive tasks may be requeued directly to the
 * active queue, thus delaying tasks in the expired queue from running;
 * see scheduler_tick()).
 *
 * This function is only called from sched_info_arrive(), rather than
 * dequeue_task(). Even though a task may be queued and dequeued multiple
 * times as it is shuffled about, we're really interested in knowing how
 * long it was from the *first* time it was queued to the time that it
 * finally hit a cpu.
 */
580
static inline void sched_info_dequeued(struct task_struct *t)
Linus Torvalds's avatar
Linus Torvalds committed
581
582
583
584
585
586
587
588
589
{
	t->sched_info.last_queued = 0;
}

/*
 * Called when a task finally hits the cpu.  We can now calculate how
 * long it was waiting to run.  We also note when it began so that we
 * can keep stats on how long its timeslice is.
 */
590
static void sched_info_arrive(struct task_struct *t)
Linus Torvalds's avatar
Linus Torvalds committed
591
{
592
	unsigned long now = jiffies, delta_jiffies = 0;
Linus Torvalds's avatar
Linus Torvalds committed
593
594

	if (t->sched_info.last_queued)
595
		delta_jiffies = now - t->sched_info.last_queued;
Linus Torvalds's avatar
Linus Torvalds committed
596
	sched_info_dequeued(t);
597
	t->sched_info.run_delay += delta_jiffies;
Linus Torvalds's avatar
Linus Torvalds committed
598
599
600
	t->sched_info.last_arrival = now;
	t->sched_info.pcnt++;

601
	rq_sched_info_arrive(task_rq(t), delta_jiffies);
Linus Torvalds's avatar
Linus Torvalds committed
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
}

/*
 * Called when a process is queued into either the active or expired
 * array.  The time is noted and later used to determine how long we
 * had to wait for us to reach the cpu.  Since the expired queue will
 * become the active queue after active queue is empty, without dequeuing
 * and requeuing any tasks, we are interested in queuing to either. It
 * is unusual but not impossible for tasks to be dequeued and immediately
 * requeued in the same or another array: this can happen in sched_yield(),
 * set_user_nice(), and even load_balance() as it moves tasks from runqueue
 * to runqueue.
 *
 * This function is only called from enqueue_task(), but also only updates
 * the timestamp if it is already not set.  It's assumed that
 * sched_info_dequeued() will clear that stamp when appropriate.
 */
619
static inline void sched_info_queued(struct task_struct *t)
Linus Torvalds's avatar
Linus Torvalds committed
620
{
621
622
623
	if (unlikely(sched_info_on()))
		if (!t->sched_info.last_queued)
			t->sched_info.last_queued = jiffies;
Linus Torvalds's avatar
Linus Torvalds committed
624
625
626
627
628
629
}

/*
 * Called when a process ceases being the active-running process, either
 * voluntarily or involuntarily.  Now we can calculate how long we ran.
 */
630
static inline void sched_info_depart(struct task_struct *t)
Linus Torvalds's avatar
Linus Torvalds committed
631
{
632
	unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
Linus Torvalds's avatar
Linus Torvalds committed
633

634
635
	t->sched_info.cpu_time += delta_jiffies;
	rq_sched_info_depart(task_rq(t), delta_jiffies);
Linus Torvalds's avatar
Linus Torvalds committed
636
637
638
639
640
641
642
}

/*
 * Called when tasks are switched involuntarily due, typically, to expiring
 * their time slice.  (This may also be called when switching to or from
 * the idle task.)  We are only called when prev != next.
 */
643
static inline void
644
__sched_info_switch(struct task_struct *prev, struct task_struct *next)
Linus Torvalds's avatar
Linus Torvalds committed
645
{
646
	struct rq *rq = task_rq(prev);
Linus Torvalds's avatar
Linus Torvalds committed
647
648
649
650
651
652
653
654
655
656
657
658

	/*
	 * prev now departs the cpu.  It's not interesting to record
	 * stats about how efficient we were at scheduling the idle
	 * process, however.
	 */
	if (prev != rq->idle)
		sched_info_depart(prev);

	if (next != rq->idle)
		sched_info_arrive(next);
}
659
660
661
662
663
664
static inline void
sched_info_switch(struct task_struct *prev, struct task_struct *next)
{
	if (unlikely(sched_info_on()))
		__sched_info_switch(prev, next);
}
Linus Torvalds's avatar
Linus Torvalds committed
665
666
667
#else
#define sched_info_queued(t)		do { } while (0)
#define sched_info_switch(t, next)	do { } while (0)
668
#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
Linus Torvalds's avatar
Linus Torvalds committed
669
670
671
672

/*
 * Adding/removing a task to/from a priority array:
 */
673
static void dequeue_task(struct task_struct *p, struct prio_array *array)
Linus Torvalds's avatar
Linus Torvalds committed
674
675
676
677
678
679
680
{
	array->nr_active--;
	list_del(&p->run_list);
	if (list_empty(array->queue + p->prio))
		__clear_bit(p->prio, array->bitmap);
}

681
static void enqueue_task(struct task_struct *p, struct prio_array *array)
Linus Torvalds's avatar
Linus Torvalds committed
682
683
684
685
686
687
688
689
690
691
692
693
{
	sched_info_queued(p);
	list_add_tail(&p->run_list, array->queue + p->prio);
	__set_bit(p->prio, array->bitmap);
	array->nr_active++;
	p->array = array;
}

/*
 * Put task to the end of the run list without the overhead of dequeue
 * followed by enqueue.
 */
694
static void requeue_task(struct task_struct *p, struct prio_array *array)
Linus Torvalds's avatar
Linus Torvalds committed
695
696
697
698
{
	list_move_tail(&p->run_list, array->queue + p->prio);
}

699
700
static inline void
enqueue_task_head(struct task_struct *p, struct prio_array *array)
Linus Torvalds's avatar
Linus Torvalds committed
701
702
703
704
705
706
707
708
{
	list_add(&p->run_list, array->queue + p->prio);
	__set_bit(p->prio, array->bitmap);
	array->nr_active++;
	p->array = array;
}

/*
709
 * __normal_prio - return the priority that is based on the static
Linus Torvalds's avatar
Linus Torvalds committed
710
711
712
713
714
715
716
717
718
719
720
721
 * priority but is modified by bonuses/penalties.
 *
 * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
 * into the -5 ... 0 ... +5 bonus/penalty range.
 *
 * We use 25% of the full 0...39 priority range so that:
 *
 * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
 * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
 *
 * Both properties are important to certain workloads.
 */
722

723
static inline int __normal_prio(struct task_struct *p)
Linus Torvalds's avatar
Linus Torvalds committed
724
725
726
727
728
729
730
731
732
733
734
735
736
{
	int bonus, prio;

	bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;

	prio = p->static_prio - bonus;
	if (prio < MAX_RT_PRIO)
		prio = MAX_RT_PRIO;
	if (prio > MAX_PRIO-1)
		prio = MAX_PRIO-1;
	return prio;
}

737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
/*
 * To aid in avoiding the subversion of "niceness" due to uneven distribution
 * of tasks with abnormal "nice" values across CPUs the contribution that
 * each task makes to its run queue's load is weighted according to its
 * scheduling class and "nice" value.  For SCHED_NORMAL tasks this is just a
 * scaled version of the new time slice allocation that they receive on time
 * slice expiry etc.
 */

/*
 * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
 * If static_prio_timeslice() is ever changed to break this assumption then
 * this code will need modification
 */
#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
#define LOAD_WEIGHT(lp) \
	(((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
#define PRIO_TO_LOAD_WEIGHT(prio) \
	LOAD_WEIGHT(static_prio_timeslice(prio))
#define RTPRIO_TO_LOAD_WEIGHT(rp) \
	(PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))

759
static void set_load_weight(struct task_struct *p)
760
{
761
	if (has_rt_policy(p)) {
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
#ifdef CONFIG_SMP
		if (p == task_rq(p)->migration_thread)
			/*
			 * The migration thread does the actual balancing.
			 * Giving its load any weight will skew balancing
			 * adversely.
			 */
			p->load_weight = 0;
		else
#endif
			p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
	} else
		p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
}

777
static inline void
778
inc_raw_weighted_load(struct rq *rq, const struct task_struct *p)
779
780
781
782
{
	rq->raw_weighted_load += p->load_weight;
}

783
static inline void
784
dec_raw_weighted_load(struct rq *rq, const struct task_struct *p)
785
786
787
788
{
	rq->raw_weighted_load -= p->load_weight;
}

789
static inline void inc_nr_running(struct task_struct *p, struct rq *rq)
790
791
792
793
794
{
	rq->nr_running++;
	inc_raw_weighted_load(rq, p);
}

795
static inline void dec_nr_running(struct task_struct *p, struct rq *rq)
796
797
798
799
800
{
	rq->nr_running--;
	dec_raw_weighted_load(rq, p);
}

801
802
803
804
805
806
807
/*
 * Calculate the expected normal priority: i.e. priority
 * without taking RT-inheritance into account. Might be
 * boosted by interactivity modifiers. Changes upon fork,
 * setprio syscalls, and whenever the interactivity
 * estimator recalculates.
 */
808
static inline int normal_prio(struct task_struct *p)
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
{
	int prio;

	if (has_rt_policy(p))
		prio = MAX_RT_PRIO-1 - p->rt_priority;
	else
		prio = __normal_prio(p);
	return prio;
}

/*
 * Calculate the current priority, i.e. the priority
 * taken into account by the scheduler. This value might
 * be boosted by RT tasks, or might be boosted by
 * interactivity modifiers. Will be RT if the task got
 * RT-boosted. If not then it returns p->normal_prio.
 */
826
static int effective_prio(struct task_struct *p)
827
828
829
830
831
832
833
834
835
836
837
838
{
	p->normal_prio = normal_prio(p);
	/*
	 * If we are RT tasks or we were boosted to RT priority,
	 * keep the priority unchanged. Otherwise, update priority
	 * to the normal priority:
	 */
	if (!rt_prio(p->prio))
		return p->normal_prio;
	return p->prio;
}

Linus Torvalds's avatar
Linus Torvalds committed
839
840
841
/*
 * __activate_task - move a task to the runqueue.
 */
842
static void __activate_task(struct task_struct *p, struct rq *rq)
Linus Torvalds's avatar
Linus Torvalds committed
843
{
844
	struct prio_array *target = rq->active;
845

846
	if (batch_task(p))
847
848
		target = rq->expired;
	enqueue_task(p, target);
849
	inc_nr_running(p, rq);
Linus Torvalds's avatar
Linus Torvalds committed
850
851
852
853
854
}

/*
 * __activate_idle_task - move idle task to the _front_ of runqueue.
 */
855
static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
Linus Torvalds's avatar
Linus Torvalds committed
856
857
{
	enqueue_task_head(p, rq->active);
858
	inc_nr_running(p, rq);
Linus Torvalds's avatar
Linus Torvalds committed
859
860
}

861
862
863
864
/*
 * Recalculate p->normal_prio and p->prio after having slept,
 * updating the sleep-average too:
 */
865
static int recalc_task_prio(struct task_struct *p, unsigned long long now)
Linus Torvalds's avatar
Linus Torvalds committed
866
867
{
	/* Caller must always ensure 'now >= p->timestamp' */
868
	unsigned long sleep_time = now - p->timestamp;
Linus Torvalds's avatar
Linus Torvalds committed
869

870
	if (batch_task(p))
871
		sleep_time = 0;
Linus Torvalds's avatar
Linus Torvalds committed
872
873
874

	if (likely(sleep_time > 0)) {
		/*
875
876
877
		 * This ceiling is set to the lowest priority that would allow
		 * a task to be reinserted into the active array on timeslice
		 * completion.
Linus Torvalds's avatar
Linus Torvalds committed
878
		 */
879
		unsigned long ceiling = INTERACTIVE_SLEEP(p);
880

881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
		if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
			/*
			 * Prevents user tasks from achieving best priority
			 * with one single large enough sleep.
			 */
			p->sleep_avg = ceiling;
			/*
			 * Using INTERACTIVE_SLEEP() as a ceiling places a
			 * nice(0) task 1ms sleep away from promotion, and
			 * gives it 700ms to round-robin with no chance of
			 * being demoted.  This is more than generous, so
			 * mark this sleep as non-interactive to prevent the
			 * on-runqueue bonus logic from intervening should
			 * this task not receive cpu immediately.
			 */
			p->sleep_type = SLEEP_NONINTERACTIVE;
Linus Torvalds's avatar
Linus Torvalds committed
897
898
899
900
901
902
		} else {
			/*
			 * Tasks waking from uninterruptible sleep are
			 * limited in their sleep_avg rise as they
			 * are likely to be waiting on I/O
			 */
903
			if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
904
				if (p->sleep_avg >= ceiling)
Linus Torvalds's avatar
Linus Torvalds committed
905
906
					sleep_time = 0;
				else if (p->sleep_avg + sleep_time >=
907
908
909
					 ceiling) {
						p->sleep_avg = ceiling;
						sleep_time = 0;
Linus Torvalds's avatar
Linus Torvalds committed
910
911
912
913
914
915
916
917
918
919
920
921
922
923
				}
			}

			/*
			 * This code gives a bonus to interactive tasks.
			 *
			 * The boost works by updating the 'average sleep time'
			 * value here, based on ->timestamp. The more time a
			 * task spends sleeping, the higher the average gets -
			 * and the higher the priority boost gets as well.
			 */
			p->sleep_avg += sleep_time;

		}
924
925
		if (p->sleep_avg > NS_MAX_SLEEP_AVG)
			p->sleep_avg = NS_MAX_SLEEP_AVG;
Linus Torvalds's avatar
Linus Torvalds committed
926
927
	}

928
	return effective_prio(p);
Linus Torvalds's avatar
Linus Torvalds committed
929
930
931
932
933
934
935
936
}

/*
 * activate_task - move a task to the runqueue and do priority recalculation
 *
 * Update all the scheduling statistics stuff. (sleep average
 * calculation, priority modifiers, etc.)
 */
937
static void activate_task(struct task_struct *p, struct rq *rq, int local)
Linus Torvalds's avatar
Linus Torvalds committed
938
939
940
941
942
943
944
{
	unsigned long long now;

	now = sched_clock();
#ifdef CONFIG_SMP
	if (!local) {
		/* Compensate for drifting sched_clock */
945
		struct rq *this_rq = this_rq();
Linus Torvalds's avatar
Linus Torvalds committed
946
947
948
949
950
		now = (now - this_rq->timestamp_last_tick)
			+ rq->timestamp_last_tick;
	}
#endif

951
952
	if (!rt_task(p))
		p->prio = recalc_task_prio(p, now);
Linus Torvalds's avatar
Linus Torvalds committed
953
954
955
956
957

	/*
	 * This checks to make sure it's not an uninterruptible task
	 * that is now waking up.
	 */
958
	if (p->sleep_type == SLEEP_NORMAL) {
Linus Torvalds's avatar
Linus Torvalds committed
959
960
961
962
963
964
965
966
		/*
		 * Tasks which were woken up by interrupts (ie. hw events)
		 * are most likely of interactive nature. So we give them
		 * the credit of extending their sleep time to the period
		 * of time they spend on the runqueue, waiting for execution
		 * on a CPU, first time around:
		 */
		if (in_interrupt())
967
			p->sleep_type = SLEEP_INTERRUPTED;
Linus Torvalds's avatar
Linus Torvalds committed
968
969
970
971
972
		else {
			/*
			 * Normal first-time wakeups get a credit too for
			 * on-runqueue time, but it will be weighted down:
			 */
973
			p->sleep_type = SLEEP_INTERACTIVE;
Linus Torvalds's avatar
Linus Torvalds committed
974
975
976
977
978
979
980
981
982
983
		}
	}
	p->timestamp = now;

	__activate_task(p, rq);
}

/*
 * deactivate_task - remove a task from the runqueue.
 */
984
static void deactivate_task(struct task_struct *p, struct rq *rq)
Linus Torvalds's avatar
Linus Torvalds committed
985
{
986
	dec_nr_running(p, rq);
Linus Torvalds's avatar
Linus Torvalds committed
987
988
989
990
991
992
993
994
995
996
997
998
	dequeue_task(p, p->array);
	p->array = NULL;
}

/*
 * resched_task - mark a task 'to be rescheduled now'.
 *
 * On UP this means the setting of the need_resched flag, on SMP it
 * might also involve a cross-CPU call to trigger the scheduler on
 * the target CPU.
 */
#ifdef CONFIG_SMP
999
1000
1001
1002
1003

#ifndef tsk_is_polling
#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
#endif

1004
static void resched_task(struct task_struct *p)
Linus Torvalds's avatar
Linus Torvalds committed
1005
{
1006
	int cpu;
Linus Torvalds's avatar
Linus Torvalds committed
1007
1008
1009

	assert_spin_locked(&task_rq(p)->lock);

1010
1011
1012
1013
	if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
		return;

	set_tsk_thread_flag(p, TIF_NEED_RESCHED);
Linus Torvalds's avatar
Linus Torvalds committed
1014

1015
1016
1017
1018
	cpu = task_cpu(p);
	if (cpu == smp_processor_id())
		return;

1019
	/* NEED_RESCHED must be visible before we test polling */
1020
	smp_mb();
1021
	if (!tsk_is_polling(p))
1022
		smp_send_reschedule(cpu);
Linus Torvalds's avatar
Linus Torvalds committed
1023
1024
}
#else
1025
static inline void resched_task(struct task_struct *p)
Linus Torvalds's avatar
Linus Torvalds committed
1026
{
1027
	assert_spin_locked(&task_rq(p)->lock);
Linus Torvalds's avatar
Linus Torvalds committed
1028
1029
1030
1031
1032
1033
1034
1035
	set_tsk_need_resched(p);
}
#endif

/**
 * task_curr - is this task currently executing on a CPU?
 * @p: the task in question.
 */
1036
inline int task_curr(const struct task_struct *p)
Linus Torvalds's avatar
Linus Torvalds committed
1037
1038
1039
1040
{
	return cpu_curr(task_cpu(p)) == p;
}

1041
1042
1043
1044
1045
1046
/* Used instead of source_load when we know the type == 0 */
unsigned long weighted_cpuload(const int cpu)
{
	return cpu_rq(cpu)->raw_weighted_load;
}

Linus Torvalds's avatar
Linus Torvalds committed
1047
#ifdef CONFIG_SMP
1048
struct migration_req {
Linus Torvalds's avatar
Linus Torvalds committed
1049
1050
	struct list_head list;

1051
	struct task_struct *task;
Linus Torvalds's avatar
Linus Torvalds committed
1052
1053
1054
	int dest_cpu;

	struct completion done;
1055
};
Linus Torvalds's avatar
Linus Torvalds committed
1056
1057
1058
1059
1060

/*
 * The task's runqueue lock must be held.
 * Returns true if you have to wait for migration thread.
 */
1061
static int
1062
migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
Linus Torvalds's avatar
Linus Torvalds committed
1063
{
1064
	struct rq *rq = task_rq(p);
Linus Torvalds's avatar
Linus Torvalds committed
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078

	/*
	 * If the task is not on a runqueue (and not running), then
	 * it is sufficient to simply update the task's cpu field.
	 */
	if (!p->array && !task_running(rq, p)) {
		set_task_cpu(p, dest_cpu);
		return 0;
	}

	init_completion(&req->done);
	req->task = p;
	req->dest_cpu = dest_cpu;
	list_add(&req->list, &rq->migration_queue);
1079

Linus Torvalds's avatar
Linus Torvalds committed
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
	return 1;
}

/*
 * wait_task_inactive - wait for a thread to unschedule.
 *
 * The caller must ensure that the task *will* unschedule sometime soon,
 * else this function might spin for a *long* time. This function can't
 * be called with interrupts off, or it may introduce deadlock with
 * smp_call_function() if an IPI is sent by the same process we are
 * waiting to become inactive.
 */
1092
void wait_task_inactive(struct task_struct *p)
Linus Torvalds's avatar
Linus Torvalds committed
1093
1094
{
	unsigned long flags;
1095
	struct rq *rq;
Linus Torvalds's avatar
Linus Torvalds committed
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
	int preempted;

repeat:
	rq = task_rq_lock(p, &flags);
	/* Must be off runqueue entirely, not preempted. */
	if (unlikely(p->array || task_running(rq, p))) {
		/* If it's preempted, we yield.  It could be a while. */
		preempted = !task_running(rq, p);
		task_rq_unlock(rq, &flags);
		cpu_relax();
		if (preempted)
			yield();
		goto repeat;
	}
	task_rq_unlock(rq, &flags);
}

/***
 * kick_process - kick a running thread to enter/exit the kernel
 * @p: the to-be-kicked thread
 *
 * Cause a process which is running on another CPU to enter
 * kernel-mode, without any delay. (to get signals handled.)
 *
 * NOTE: this function doesnt have to take the runqueue lock,
 * because all it wants to ensure is that the remote task enters
 * the kernel. If the IPI races and the task has been migrated
 * to another CPU then no harm is done and the purpose has been
 * achieved as well.
 */
1126
void kick_process(struct task_struct *p)
Linus Torvalds's avatar
Linus Torvalds committed
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
{
	int cpu;

	preempt_disable();
	cpu = task_cpu(p);
	if ((cpu != smp_processor_id()) && task_curr(p))
		smp_send_reschedule(cpu);
	preempt_enable();
}

/*
1138
1139
 * Return a low guess at the load of a migration-source cpu weighted
 * according to the scheduling class and "nice" value.
Linus Torvalds's avatar
Linus Torvalds committed
1140
1141
1142
1143
 *
 * We want to under-estimate the load of migration sources, to
 * balance conservatively.
 */
Nick Piggin's avatar
Nick Piggin committed
1144
static inline unsigned long source_load(int cpu, int type)
Linus Torvalds's avatar
Linus Torvalds committed
1145
{
1146
	struct rq *rq = cpu_rq(cpu);
1147

1148
	if (type == 0)
1149
		return rq->raw_weighted_load;
1150

1151
	return min(rq->cpu_load[type-1], rq->raw_weighted_load);
Linus Torvalds's avatar
Linus Torvalds committed
1152
1153
1154
}

/*
1155
1156
 * Return a high guess at the load of a migration-target cpu weighted
 * according to the scheduling class and "nice" value.
Linus Torvalds's avatar
Linus Torvalds committed
1157
 */
Nick Piggin's avatar
Nick Piggin committed
1158
static inline unsigned long target_load(int cpu, int type)
Linus Torvalds's avatar
Linus Torvalds committed
1159
{
1160
	struct rq *rq = cpu_rq(cpu);
1161

Nick Piggin's avatar
Nick Piggin committed
1162
	if (type == 0)
1163
		return rq->raw_weighted_load;
1164

1165
1166
1167
1168
1169
1170
1171
1172
	return max(rq->cpu_load[type-1], rq->raw_weighted_load);
}

/*
 * Return the average load per task on the cpu's run queue
 */
static inline unsigned long cpu_avg_load_per_task(int cpu)
{
1173
	struct rq *rq = cpu_rq(cpu);
1174
1175
	unsigned long n = rq->nr_running;

1176
	return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
Linus Torvalds's avatar
Linus Torvalds committed
1177
1178
}

Nick Piggin's avatar
Nick Piggin committed
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
/*
 * find_idlest_group finds and returns the least busy CPU group within the
 * domain.
 */
static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
{
	struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
	unsigned long min_load = ULONG_MAX, this_load = 0;
	int load_idx = sd->forkexec_idx;
	int imbalance = 100 + (sd->imbalance_pct-100)/2;

	do {
		unsigned long load, avg_load;
		int local_group;
		int i;

1196
1197
1198
1199
		/* Skip over this group if it has no CPUs allowed */
		if (!cpus_intersects(group->cpumask, p->cpus_allowed))
			goto nextgroup;

Nick Piggin's avatar
Nick Piggin committed
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
		local_group = cpu_isset(this_cpu, group->cpumask);

		/* Tally up the load of all CPUs in the group */
		avg_load = 0;

		for_each_cpu_mask(i, group->cpumask) {
			/* Bias balancing toward cpus of our domain */
			if (local_group)
				load = source_load(i, load_idx);
			else
				load = target_load(i, load_idx);

			avg_load += load;
		}

		/* Adjust by relative CPU power of the group */
		avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;

		if (local_group) {
			this_load = avg_load;
			this = group;
		} else if (avg_load < min_load) {
			min_load = avg_load;
			idlest = group;
		}
1225
nextgroup:
Nick Piggin's avatar
Nick Piggin committed
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
		group = group->next;
	} while (group != sd->groups);

	if (!idlest || 100*this_load < imbalance*min_load)
		return NULL;
	return idlest;
}

/*
 * find_idlest_queue - find the idlest runqueue among the cpus in group.
 */
Ingo Molnar's avatar
Ingo Molnar committed
1237
1238
static int
find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
Nick Piggin's avatar
Nick Piggin committed
1239
{
1240
	cpumask_t tmp;
Nick Piggin's avatar
Nick Piggin committed
1241
1242
1243
1244
	unsigned long load, min_load = ULONG_MAX;
	int idlest = -1;
	int i;

1245
1246
1247
1248
	/* Traverse only the allowed CPUs */
	cpus_and(tmp, group->cpumask, p->cpus_allowed);

	for_each_cpu_mask(i, tmp) {
1249
		load = weighted_cpuload(i);
Nick Piggin's avatar
Nick Piggin committed
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259

		if (load < min_load || (load == min_load && i == this_cpu)) {
			min_load = load;
			idlest = i;
		}
	}

	return idlest;
}

1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
/*
 * sched_balance_self: balance the current task (running on cpu) in domains
 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
 * SD_BALANCE_EXEC.
 *
 * Balance, ie. select the least loaded group.
 *
 * Returns the target CPU number, or the same CPU if no balancing is needed.
 *
 * preempt must be disabled.
 */
static int sched_balance_self(int cpu, int flag)
{
	struct task_struct *t = current;
	struct sched_domain *tmp, *sd = NULL;
Nick Piggin's avatar
Nick Piggin committed
1275

1276
	for_each_domain(cpu, tmp) {
1277
1278
1279
1280
1281
 		/*
 	 	 * If power savings logic is enabled for a domain, stop there.
 	 	 */
		if (tmp->flags & SD_POWERSAVINGS_BALANCE)
			break;
1282
1283
		if (tmp->flags & flag)
			sd = tmp;
1284
	}
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296

	while (sd) {
		cpumask_t span;
		struct sched_group *group;
		int new_cpu;
		int weight;

		span = sd->span;
		group = find_idlest_group(sd, t, cpu);
		if (!group)
			goto nextlevel;

1297
		new_cpu = find_idlest_cpu(group, t, cpu);
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
		if (new_cpu == -1 || new_cpu == cpu)
			goto nextlevel;

		/* Now try balancing at a lower domain level */
		cpu = new_cpu;
nextlevel:
		sd = NULL;
		weight = cpus_weight(span);
		for_each_domain(cpu, tmp) {
			if (weight <= cpus_weight(tmp->span))
				break;
			if (tmp->flags & flag)
				sd = tmp;
		}
		/* while loop will break here if sd == NULL */
	}

	return cpu;
}

#endif /* CONFIG_SMP */
Linus Torvalds's avatar
Linus Torvalds committed
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328

/*
 * wake_idle() will wake a task on an idle cpu if task->cpu is
 * not idle and an idle cpu is available.  The span of cpus to
 * search starts with cpus closest then further out as needed,
 * so we always favor a closer, idle cpu.
 *
 * Returns the CPU we should wake onto.
 */
#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1329
static int wake_idle(int cpu, struct task_struct *p)
Linus Torvalds's avatar
Linus Torvalds committed
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
{
	cpumask_t tmp;
	struct sched_domain *sd;
	int i;

	if (idle_cpu(cpu))
		return cpu;

	for_each_domain(cpu, sd) {
		if (sd->flags & SD_WAKE_IDLE) {
Nick Piggin's avatar
Nick Piggin committed
1340
			cpus_and(tmp, sd->span, p->cpus_allowed);
Linus Torvalds's avatar
Linus Torvalds committed
1341
1342
1343
1344
1345
			for_each_cpu_mask(i, tmp) {
				if (idle_cpu(i))
					return i;
			}
		}
Nick Piggin's avatar
Nick Piggin committed
1346
1347
		else
			break;
Linus Torvalds's avatar
Linus Torvalds committed
1348
1349
1350
1351
	}
	return cpu;
}
#else
1352
static inline int wake_idle(int cpu, struct task_struct *p)
Linus Torvalds's avatar
Linus Torvalds committed
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
{
	return cpu;
}
#endif

/***
 * try_to_wake_up - wake up a thread
 * @p: the to-be-woken-up thread
 * @state: the mask of task states that can be woken
 * @sync: do a synchronous wakeup?
 *
 * Put it on the run-queue if it's not already there. The "current"
 * thread is always on the run-queue (except when the actual
 * re-schedule is in progress), and as such you're allowed to do
 * the simpler "current->state = TASK_RUNNING" to mark yourself
 * runnable without the overhead of this.
 *
 * returns failure only if the task is already active.
 */
1372
static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
Linus Torvalds's avatar
Linus Torvalds committed
1373
1374
1375
1376
{
	int cpu, this_cpu, success = 0;
	unsigned long flags;
	long old_state;
1377
	struct rq *rq;
Linus Torvalds's avatar
Linus Torvalds committed
1378
#ifdef CONFIG_SMP
Nick Piggin's avatar
Nick Piggin committed
1379
	struct sched_domain *sd, *this_sd = NULL;
1380
	unsigned long load, this_load;
Linus Torvalds's avatar
Linus Torvalds committed
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
	int new_cpu;
#endif

	rq = task_rq_lock(p, &flags);
	old_state = p->state;
	if (!(old_state & state))
		goto out;

	if (p->array)
		goto out_running;

	cpu = task_cpu(p);
	this_cpu = smp_processor_id();

#ifdef CONFIG_SMP
	if (unlikely(task_running(rq, p)))
		goto out_activate;

Nick Piggin's avatar
Nick Piggin committed
1399
1400
	new_cpu = cpu;

Linus Torvalds's avatar
Linus Torvalds committed
1401
1402
1403
	schedstat_inc(rq, ttwu_cnt);
	if (cpu == this_cpu) {
		schedstat_inc(rq, ttwu_local);
Nick Piggin's avatar
Nick Piggin committed
1404
1405
1406
1407
1408
1409
1410
1411
		goto out_set_cpu;
	}

	for_each_domain(this_cpu, sd) {
		if (cpu_isset(cpu, sd->span)) {
			schedstat_inc(sd, ttwu_wake_remote);
			this_sd = sd;
			break;
Linus Torvalds's avatar
Linus Torvalds committed
1412
1413
1414
		}
	}

Nick Piggin's avatar
Nick Piggin committed
1415
	if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
Linus Torvalds's avatar
Linus Torvalds committed
1416
1417
1418
		goto out_set_cpu;

	/*
Nick Piggin's avatar
Nick Piggin committed
1419
	 * Check for affine wakeup and passive balancing possibilities.
Linus Torvalds's avatar
Linus Torvalds committed
1420
	 */
Nick Piggin's avatar
Nick Piggin committed
1421
1422
1423
	if (this_sd) {
		int idx = this_sd->wake_idx;
		unsigned int imbalance;
Linus Torvalds's avatar
Linus Torvalds committed
1424

1425
1426
		imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;

Nick Piggin's avatar
Nick Piggin committed
1427
1428
		load = source_load(cpu, idx);
		this_load = target_load(this_cpu, idx);
Linus Torvalds's avatar
Linus Torvalds committed
1429

Nick Piggin's avatar
Nick Piggin committed
1430
1431
		new_cpu = this_cpu; /* Wake to this CPU if we can */

1432
1433
		if (this_sd->flags & SD_WAKE_AFFINE) {
			unsigned long tl = this_load;
1434
1435
			unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu);

Linus Torvalds's avatar
Linus Torvalds committed
1436
			/*
1437
1438
1439
			 * If sync wakeup then subtract the (maximum possible)
			 * effect of the currently running task from the load
			 * of the current CPU:
Linus Torvalds's avatar
Linus Torvalds committed
1440
			 */
1441
			if (sync)
1442
				tl -= current->load_weight;
1443
1444

			if ((tl <= load &&
1445
1446
				tl + target_load(cpu, idx) <= tl_per_task) ||
				100*(tl + p->load_weight) <= imbalance*load) {
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
				/*
				 * This domain has SD_WAKE_AFFINE and
				 * p is cache cold in this domain, and
				 * there is no bad imbalance.
				 */
				schedstat_inc(this_sd, ttwu_move_affine);
				goto out_set_cpu;
			}
		}

		/*
		 * Start passive balancing when half the imbalance_pct
		 * limit is reached.
		 */
		if (this_sd->flags & SD_WAKE_BALANCE) {
			if (imbalance*this_load <= 100*load) {
				schedstat_inc(this_sd, ttwu_move_balance);
				goto out_set_cpu;
			}
Linus Torvalds's avatar
Linus Torvalds committed
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
		}
	}

	new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
out_set_cpu:
	new_cpu = wake_idle(new_cpu, p);
	if (new_cpu != cpu) {
		set_task_cpu(p, new_cpu);
		task_rq_unlock(rq, &flags);
		/* might preempt at this point */
		rq = task_rq_lock(p, &flags);
		old_state = p->state;
		if (!(old_state & state))
			goto out;
		if (p->array)
			goto out_running;

		this_cpu = smp_processor_id();
		cpu = task_cpu(p);
	}

out_activate:
#endif /* CONFIG_SMP */
	if (old_state == TASK_UNINTERRUPTIBLE) {
		rq->nr_uninterruptible--;
		/*
		 * Tasks on involuntary sleep don't earn
		 * sleep_avg beyond just interactive state.
		 */
1495
		p->sleep_type = SLEEP_NONINTERACTIVE;
1496
	} else
Linus Torvalds's avatar
Linus Torvalds committed
1497

1498
1499
	/*
	 * Tasks that have marked their sleep as noninteractive get
1500
1501
	 * woken up with their sleep average not weighted in an
	 * interactive way.
1502
	 */
1503
1504
1505
1506
1507
		if (old_state & TASK_NONINTERACTIVE)
			p->sleep_type = SLEEP_NONINTERACTIVE;


	activate_task(p, rq, cpu == this_cpu);
Linus Torvalds's avatar
Linus Torvalds committed
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
	/*
	 * Sync wakeups (i.e. those types of wakeups where the waker
	 * has indicated that it will leave the CPU in short order)
	 * don't trigger a preemption, if the woken up task will run on
	 * this cpu. (in this case the 'I will reschedule' promise of
	 * the waker guarantees that the freshly woken up task is going
	 * to be considered on this CPU.)
	 */
	if (!sync || cpu != this_cpu) {
		if (TASK_PREEMPTS_CURR(p, rq))
			resched_task(rq->curr);
	}
	success = 1;

out_running:
	p->state = TASK_RUNNING;
out:
	task_rq_unlock(rq, &flags);

	return success;
}

1530
int fastcall wake_up_process(struct task_struct *p)
Linus Torvalds's avatar
Linus Torvalds committed
1531
1532
1533
1534
1535
1536
{
	return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
				 TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
}
EXPORT_SYMBOL(wake_up_process);

1537
int fastcall wake_up_state(struct task_struct *p, unsigned int state)
Linus Torvalds's avatar
Linus Torvalds committed
1538
1539
1540
1541
1542
1543
1544
1545
{
	return try_to_wake_up(p, state, 0);
}

/*
 * Perform scheduler related setup for a newly forked process p.
 * p is forked by current.
 */
1546
void fastcall sched_fork(struct task_struct *p, int clone_flags)
Linus Torvalds's avatar
Linus Torvalds committed
1547
{
1548
1549
1550
1551
1552
1553
1554
	int cpu = get_cpu();

#ifdef CONFIG_SMP
	cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
#endif
	set_task_cpu(p, cpu);

Linus Torvalds's avatar
Linus Torvalds committed
1555
1556
1557
1558
1559
1560
1561
	/*
	 * We mark the process as running here, but have not actually
	 * inserted it onto the runqueue yet. This guarantees that
	 * nobody will actually run it, and a signal or other external
	 * event cannot wake it up and insert it on the runqueue either.
	 */
	p->state = TASK_RUNNING;
1562
1563
1564
1565
1566
1567

	/*
	 * Make sure we do not leak PI boosting priority to the child:
	 */
	p->prio = current->normal_prio;

Linus Torvalds's avatar
Linus Torvalds committed
1568
1569
	INIT_LIST_HEAD(&p->run_list);
	p->array = NULL;
1570
1571
1572
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
	if (unlikely(sched_info_on()))
		memset(&p->sched_info, 0, sizeof(p->sched_info));
Linus Torvalds's avatar
Linus Torvalds committed
1573
#endif
1574
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
1575
1576
	p->oncpu = 0;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1577
#ifdef CONFIG_PREEMPT
1578
	/* Want to start with kernel preemption disabled. */
1579
	task_thread_info(p)->preempt_count = 1;
Linus Torvalds's avatar
Linus Torvalds committed
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593