sched.h 79.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * cloning flags:
 */
#define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
#define CLONE_VM	0x00000100	/* set if VM shared between processes */
#define CLONE_FS	0x00000200	/* set if fs info shared between processes */
#define CLONE_FILES	0x00000400	/* set if open files shared between processes */
#define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
#define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
#define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
#define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
#define CLONE_THREAD	0x00010000	/* Same thread group? */
#define CLONE_NEWNS	0x00020000	/* New namespace group? */
#define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
#define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
#define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
#define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
#define CLONE_DETACHED		0x00400000	/* Unused, ignored */
#define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
#define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
24 25
/* 0x02000000 was previously the unused CLONE_STOPPED (Start in stopped state)
   and is now available for re-use. */
26
#define CLONE_NEWUTS		0x04000000	/* New utsname group? */
27
#define CLONE_NEWIPC		0x08000000	/* New ipcs */
28
#define CLONE_NEWUSER		0x10000000	/* New user namespace */
29
#define CLONE_NEWPID		0x20000000	/* New pid namespace */
30
#define CLONE_NEWNET		0x40000000	/* New network namespace */
31
#define CLONE_IO		0x80000000	/* Clone io context */
32 33 34 35 36 37 38 39

/*
 * Scheduling policies
 */
#define SCHED_NORMAL		0
#define SCHED_FIFO		1
#define SCHED_RR		2
#define SCHED_BATCH		3
40 41
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE		5
42 43
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK     0x40000000
44

45
#ifdef __KERNEL__
46 47 48 49 50

struct sched_param {
	int sched_priority;
};

Linus Torvalds's avatar
Linus Torvalds committed
51 52 53 54 55 56 57 58 59 60 61 62 63
#include <asm/param.h>	/* for HZ */

#include <linux/capability.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/nodemask.h>
64
#include <linux/mm_types.h>
Linus Torvalds's avatar
Linus Torvalds committed
65 66 67 68 69 70 71 72 73 74 75 76 77 78

#include <asm/system.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>

#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/signal.h>
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
79
#include <linux/proportions.h>
Linus Torvalds's avatar
Linus Torvalds committed
80
#include <linux/seccomp.h>
81
#include <linux/rcupdate.h>
82
#include <linux/rculist.h>
83
#include <linux/rtmutex.h>
Linus Torvalds's avatar
Linus Torvalds committed
84

85 86 87 88 89
#include <linux/time.h>
#include <linux/param.h>
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
90
#include <linux/task_io_accounting.h>
91
#include <linux/latencytop.h>
92
#include <linux/cred.h>
93 94

#include <asm/processor.h>
95

Linus Torvalds's avatar
Linus Torvalds committed
96
struct exec_domain;
97
struct futex_pi_state;
98
struct robust_list_head;
99
struct bio_list;
100
struct fs_struct;
101
struct perf_event_context;
102
struct blk_plug;
Linus Torvalds's avatar
Linus Torvalds committed
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120

/*
 * List of flags we want to share for kernel threads,
 * if only because they are not used by them anyway.
 */
#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)

/*
 * These are the constant used to fake the fixed-point load-average
 * counting. Some notes:
 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
 *    a load-average precision of 10 bits integer + 11 bits fractional
 *  - if you want to count load-averages more often, you need more
 *    precision, or rounding will get you. With 2-second counting freq,
 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
 *    11 bit fractions.
 */
extern unsigned long avenrun[];		/* Load averages */
121
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
Linus Torvalds's avatar
Linus Torvalds committed
122 123 124

#define FSHIFT		11		/* nr of bits of precision */
#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
125
#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
Linus Torvalds's avatar
Linus Torvalds committed
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141
#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
#define EXP_5		2014		/* 1/exp(5sec/5min) */
#define EXP_15		2037		/* 1/exp(5sec/15min) */

#define CALC_LOAD(load,exp,n) \
	load *= exp; \
	load += n*(FIXED_1-exp); \
	load >>= FSHIFT;

extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_iowait(void);
142
extern unsigned long nr_iowait_cpu(int cpu);
143 144 145
extern unsigned long this_cpu_load(void);


146
extern void calc_global_load(unsigned long ticks);
Linus Torvalds's avatar
Linus Torvalds committed
147

148 149
extern unsigned long get_parent_ip(unsigned long addr);

150 151
struct seq_file;
struct cfs_rq;
152
struct task_group;
153 154 155 156
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
extern void
157
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
158 159 160 161 162 163 164 165 166
#else
static inline void
proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
}
static inline void proc_sched_set_task(struct task_struct *p)
{
}
static inline void
167
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
168 169 170
{
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
171

172 173 174 175 176 177 178 179 180 181
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
Linus Torvalds's avatar
Linus Torvalds committed
182 183 184
#define TASK_RUNNING		0
#define TASK_INTERRUPTIBLE	1
#define TASK_UNINTERRUPTIBLE	2
Matthew Wilcox's avatar
Matthew Wilcox committed
185 186
#define __TASK_STOPPED		4
#define __TASK_TRACED		8
187 188 189 190
/* in tsk->exit_state */
#define EXIT_ZOMBIE		16
#define EXIT_DEAD		32
/* in tsk->state again */
191
#define TASK_DEAD		64
Matthew Wilcox's avatar
Matthew Wilcox committed
192
#define TASK_WAKEKILL		128
Peter Zijlstra's avatar
Peter Zijlstra committed
193
#define TASK_WAKING		256
194
#define TASK_STATE_MAX		512
Matthew Wilcox's avatar
Matthew Wilcox committed
195

196
#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKW"
197

198 199
extern char ___assert_task_state[1 - 2*!!(
		sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
Matthew Wilcox's avatar
Matthew Wilcox committed
200 201 202 203 204

/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
Linus Torvalds's avatar
Linus Torvalds committed
205

206 207
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
Matthew Wilcox's avatar
Matthew Wilcox committed
208
#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
209 210 211

/* get_task_state() */
#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
Matthew Wilcox's avatar
Matthew Wilcox committed
212 213
				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
				 __TASK_TRACED)
214

Matthew Wilcox's avatar
Matthew Wilcox committed
215 216
#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
217
#define task_is_dead(task)	((task)->exit_state != 0)
218
#define task_is_stopped_or_traced(task)	\
Matthew Wilcox's avatar
Matthew Wilcox committed
219
			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
220
#define task_contributes_to_load(task)	\
221
				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
222
				 (task->flags & PF_FREEZING) == 0)
Linus Torvalds's avatar
Linus Torvalds committed
223 224 225 226 227 228

#define __set_task_state(tsk, state_value)		\
	do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value)		\
	set_mb((tsk)->state, (state_value))

229 230 231 232 233 234 235 236 237 238 239
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
 *	set_current_state(TASK_UNINTERRUPTIBLE);
 *	if (do_i_need_to_sleep())
 *		schedule();
 *
 * If the caller does not need such serialisation then use __set_current_state()
 */
Linus Torvalds's avatar
Linus Torvalds committed
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
#define __set_current_state(state_value)			\
	do { current->state = (state_value); } while (0)
#define set_current_state(state_value)		\
	set_mb(current->state, (state_value))

/* Task command name length */
#define TASK_COMM_LEN 16

#include <linux/spinlock.h>

/*
 * This serializes "schedule()" and also protects
 * the run-queue from deletions/modifications (but
 * _adding_ to the beginning of the run-queue has
 * a separate lock).
 */
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;

259
struct task_struct;
Linus Torvalds's avatar
Linus Torvalds committed
260

261 262 263 264
#ifdef CONFIG_PROVE_RCU
extern int lockdep_tasklist_lock_is_held(void);
#endif /* #ifdef CONFIG_PROVE_RCU */

Linus Torvalds's avatar
Linus Torvalds committed
265 266
extern void sched_init(void);
extern void sched_init_smp(void);
267
extern asmlinkage void schedule_tail(struct task_struct *prev);
268
extern void init_idle(struct task_struct *idle, int cpu);
269
extern void init_idle_bootup_task(struct task_struct *idle);
Linus Torvalds's avatar
Linus Torvalds committed
270

271
extern int runqueue_is_locked(int cpu);
Ingo Molnar's avatar
Ingo Molnar committed
272

273
extern cpumask_var_t nohz_cpu_mask;
274
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
275 276
extern void select_nohz_load_balancer(int stop_tick);
extern int get_nohz_timer_target(void);
277
#else
278
static inline void select_nohz_load_balancer(int stop_tick) { }
279
#endif
Linus Torvalds's avatar
Linus Torvalds committed
280

281
/*
282
 * Only dump TASK_* tasks. (0 for all tasks)
283 284 285 286 287
 */
extern void show_state_filter(unsigned long state_filter);

static inline void show_state(void)
{
288
	show_state_filter(0);
289 290
}

Linus Torvalds's avatar
Linus Torvalds committed
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
extern void show_regs(struct pt_regs *);

/*
 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
 * task), SP is the stack pointer of the first frame that should be shown in the back
 * trace (or NULL if the entire call-chain of the task should be shown).
 */
extern void show_stack(struct task_struct *task, unsigned long *sp);

void io_schedule(void);
long io_schedule_timeout(long timeout);

extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);

308 309
extern void sched_show_task(struct task_struct *p);

310
#ifdef CONFIG_LOCKUP_DETECTOR
311
extern void touch_softlockup_watchdog(void);
312
extern void touch_softlockup_watchdog_sync(void);
313
extern void touch_all_softlockup_watchdogs(void);
314 315 316
extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
				  void __user *buffer,
				  size_t *lenp, loff_t *ppos);
317
extern unsigned int  softlockup_panic;
318
void lockup_detector_init(void);
319 320 321 322
#else
static inline void touch_softlockup_watchdog(void)
{
}
323 324 325
static inline void touch_softlockup_watchdog_sync(void)
{
}
326 327 328
static inline void touch_all_softlockup_watchdogs(void)
{
}
329 330 331
static inline void lockup_detector_init(void)
{
}
332 333
#endif

334 335 336 337 338 339
#ifdef CONFIG_DETECT_HUNG_TASK
extern unsigned int  sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_check_count;
extern unsigned long sysctl_hung_task_timeout_secs;
extern unsigned long sysctl_hung_task_warnings;
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
340
					 void __user *buffer,
341
					 size_t *lenp, loff_t *ppos);
342 343 344
#else
/* Avoid need for ifdefs elsewhere in the code */
enum { sysctl_hung_task_timeout_secs = 0 };
345
#endif
346

Linus Torvalds's avatar
Linus Torvalds committed
347 348
/* Attach to any functions which should be ignored in wchan output. */
#define __sched		__attribute__((__section__(".sched.text")))
349 350 351 352

/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];

Linus Torvalds's avatar
Linus Torvalds committed
353 354 355 356
/* Is this address in the __sched functions? */
extern int in_sched_functions(unsigned long addr);

#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
357
extern signed long schedule_timeout(signed long timeout);
358
extern signed long schedule_timeout_interruptible(signed long timeout);
359
extern signed long schedule_timeout_killable(signed long timeout);
360
extern signed long schedule_timeout_uninterruptible(signed long timeout);
Linus Torvalds's avatar
Linus Torvalds committed
361
asmlinkage void schedule(void);
362
extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
Linus Torvalds's avatar
Linus Torvalds committed
363

364
struct nsproxy;
365
struct user_namespace;
Linus Torvalds's avatar
Linus Torvalds committed
366

367 368 369 370 371 372 373 374 375 376 377 378 379
/*
 * Default maximum number of active map areas, this limits the number of vmas
 * per mm struct. Users can overwrite this number by sysctl but there is a
 * problem.
 *
 * When a program's coredump is generated as ELF format, a section is created
 * per a vma. In ELF, the number of sections is represented in unsigned short.
 * This means the number of sections should be smaller than 65535 at coredump.
 * Because the kernel adds some informative sections to a image of program at
 * generating coredump, we need some margin. The number of extra sections is
 * 1-3 now and depends on arch. We use "5" as safe margin, here.
 */
#define MAPCOUNT_ELF_CORE_MARGIN	(5)
380
#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
Linus Torvalds's avatar
Linus Torvalds committed
381 382 383 384 385

extern int sysctl_max_map_count;

#include <linux/aio.h>

386 387
#ifdef CONFIG_MMU
extern void arch_pick_mmap_layout(struct mm_struct *mm);
Linus Torvalds's avatar
Linus Torvalds committed
388 389 390 391 392 393 394
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
		       unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
			  unsigned long len, unsigned long pgoff,
			  unsigned long flags);
395 396
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
397 398 399
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
400

401

402 403 404 405
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);

/* mm flags */
406
/* dumpable bits */
407 408
#define MMF_DUMPABLE      0  /* core dump is permitted */
#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
409

410
#define MMF_DUMPABLE_BITS 2
411
#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
412 413 414 415 416 417

/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE	2
#define MMF_DUMP_ANON_SHARED	3
#define MMF_DUMP_MAPPED_PRIVATE	4
#define MMF_DUMP_MAPPED_SHARED	5
418
#define MMF_DUMP_ELF_HEADERS	6
419 420
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED  8
421

422
#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
423
#define MMF_DUMP_FILTER_BITS	7
424 425 426
#define MMF_DUMP_FILTER_MASK \
	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
427
	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
428 429 430 431 432 433 434
	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)

#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
#else
# define MMF_DUMP_MASK_DEFAULT_ELF	0
#endif
435 436
					/* leave room for more dump flags */
#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */
Andrea Arcangeli's avatar
Andrea Arcangeli committed
437
#define MMF_VM_HUGEPAGE		17	/* set when VM_HUGEPAGE is set on vma */
438 439

#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
440

Linus Torvalds's avatar
Linus Torvalds committed
441 442 443 444
struct sighand_struct {
	atomic_t		count;
	struct k_sigaction	action[_NSIG];
	spinlock_t		siglock;
Davide Libenzi's avatar
Davide Libenzi committed
445
	wait_queue_head_t	signalfd_wqh;
Linus Torvalds's avatar
Linus Torvalds committed
446 447
};

448
struct pacct_struct {
449 450
	int			ac_flag;
	long			ac_exitcode;
451
	unsigned long		ac_mem;
452 453
	cputime_t		ac_utime, ac_stime;
	unsigned long		ac_minflt, ac_majflt;
454 455
};

456 457 458
struct cpu_itimer {
	cputime_t expires;
	cputime_t incr;
459 460
	u32 error;
	u32 incr_error;
461 462
};

463 464 465 466 467
/**
 * struct task_cputime - collected CPU time counts
 * @utime:		time spent in user mode, in &cputime_t units
 * @stime:		time spent in kernel mode, in &cputime_t units
 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
468
 *
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
 * This structure groups together three kinds of CPU time that are
 * tracked for threads and thread groups.  Most things considering
 * CPU time want to group these counts together and treat all three
 * of them in parallel.
 */
struct task_cputime {
	cputime_t utime;
	cputime_t stime;
	unsigned long long sum_exec_runtime;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp	stime
#define virt_exp	utime
#define sched_exp	sum_exec_runtime

484 485 486 487 488 489 490
#define INIT_CPUTIME	\
	(struct task_cputime) {					\
		.utime = cputime_zero,				\
		.stime = cputime_zero,				\
		.sum_exec_runtime = 0,				\
	}

491 492 493
/*
 * Disable preemption until the scheduler is running.
 * Reset by start_kernel()->sched_init()->init_idle().
494 495 496
 *
 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
 * before the scheduler is active -- see should_resched().
497
 */
498
#define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
499

500
/**
501 502 503 504 505
 * struct thread_group_cputimer - thread group interval timer counts
 * @cputime:		thread group interval timers.
 * @running:		non-zero when there are timers running and
 * 			@cputime receives updates.
 * @lock:		lock for fields in this struct.
506 507
 *
 * This structure contains the version of task_cputime, above, that is
508
 * used for thread group CPU timer calculations.
509
 */
510 511 512 513
struct thread_group_cputimer {
	struct task_cputime cputime;
	int running;
	spinlock_t lock;
514 515
};

516
#include <linux/rwsem.h>
517 518
struct autogroup;

Linus Torvalds's avatar
Linus Torvalds committed
519
/*
520
 * NOTE! "signal_struct" does not have its own
Linus Torvalds's avatar
Linus Torvalds committed
521 522 523 524 525 526
 * locking, because a shared signal_struct always
 * implies a shared sighand_struct, so locking
 * sighand_struct is always a proper superset of
 * the locking of signal_struct.
 */
struct signal_struct {
527
	atomic_t		sigcnt;
Linus Torvalds's avatar
Linus Torvalds committed
528
	atomic_t		live;
529
	int			nr_threads;
Linus Torvalds's avatar
Linus Torvalds committed
530 531 532 533

	wait_queue_head_t	wait_chldexit;	/* for wait4() */

	/* current thread group signal load-balancing target: */
534
	struct task_struct	*curr_target;
Linus Torvalds's avatar
Linus Torvalds committed
535 536 537 538 539 540 541 542 543 544 545 546

	/* shared signal handling: */
	struct sigpending	shared_pending;

	/* thread group exit support */
	int			group_exit_code;
	/* overloaded:
	 * - notify group_exit_task when ->count is equal to notify_count
	 * - everyone except group_exit_task is stopped during signal delivery
	 *   of fatal signals, group_exit_task processes the signal.
	 */
	int			notify_count;
547
	struct task_struct	*group_exit_task;
Linus Torvalds's avatar
Linus Torvalds committed
548 549 550 551 552 553 554 555 556

	/* thread group stop support, overloads group_exit_code too */
	int			group_stop_count;
	unsigned int		flags; /* see SIGNAL_* flags below */

	/* POSIX.1b Interval Timers */
	struct list_head posix_timers;

	/* ITIMER_REAL timer for the process */
557
	struct hrtimer real_timer;
558
	struct pid *leader_pid;
559
	ktime_t it_real_incr;
Linus Torvalds's avatar
Linus Torvalds committed
560

561 562 563 564 565 566
	/*
	 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
	 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
	 * values are defined to 0 and 1 respectively
	 */
	struct cpu_itimer it[2];
Linus Torvalds's avatar
Linus Torvalds committed
567

568
	/*
569 570
	 * Thread group totals for process CPU timers.
	 * See thread_group_cputimer(), et al, for details.
571
	 */
572
	struct thread_group_cputimer cputimer;
573 574 575 576 577 578

	/* Earliest-expiration cache. */
	struct task_cputime cputime_expires;

	struct list_head cpu_timers[3];

579
	struct pid *tty_old_pgrp;
580

Linus Torvalds's avatar
Linus Torvalds committed
581 582 583 584 585
	/* boolean value for session group leader */
	int leader;

	struct tty_struct *tty; /* NULL if no tty */

586 587 588
#ifdef CONFIG_SCHED_AUTOGROUP
	struct autogroup *autogroup;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
589 590 591 592 593 594
	/*
	 * Cumulative resource counters for dead threads in the group,
	 * and for reaped dead child processes forked by this group.
	 * Live threads maintain their own counters and add to these
	 * in __exit_signal, except for the group leader.
	 */
595
	cputime_t utime, stime, cutime, cstime;
596 597
	cputime_t gtime;
	cputime_t cgtime;
598 599 600
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
	cputime_t prev_utime, prev_stime;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
601 602
	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
603
	unsigned long inblock, oublock, cinblock, coublock;
604
	unsigned long maxrss, cmaxrss;
605
	struct task_io_accounting ioac;
Linus Torvalds's avatar
Linus Torvalds committed
606

607 608 609 610 611 612 613 614
	/*
	 * Cumulative ns of schedule CPU time fo dead threads in the
	 * group, not including a zombie group leader, (This only differs
	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
	 * other than jiffies.)
	 */
	unsigned long long sum_sched_runtime;

Linus Torvalds's avatar
Linus Torvalds committed
615 616 617 618 619 620 621 622 623 624 625
	/*
	 * We don't bother to synchronize most readers of this at all,
	 * because there is no reader checking a limit that actually needs
	 * to get both rlim_cur and rlim_max atomically, and either one
	 * alone is a single word that can safely be read normally.
	 * getrlimit/setrlimit use task_lock(current->group_leader) to
	 * protect this instead of the siglock, because they really
	 * have no need to disable irqs.
	 */
	struct rlimit rlim[RLIM_NLIMITS];

626 627 628
#ifdef CONFIG_BSD_PROCESS_ACCT
	struct pacct_struct pacct;	/* per-process accounting information */
#endif
629 630 631
#ifdef CONFIG_TASKSTATS
	struct taskstats *stats;
#endif
632 633 634 635
#ifdef CONFIG_AUDIT
	unsigned audit_tty;
	struct tty_audit_buf *tty_audit_buf;
#endif
636 637 638 639 640 641 642 643 644 645
#ifdef CONFIG_CGROUPS
	/*
	 * The threadgroup_fork_lock prevents threads from forking with
	 * CLONE_THREAD while held for writing. Use this for fork-sensitive
	 * threadgroup-wide operations. It's taken for reading in fork.c in
	 * copy_process().
	 * Currently only needed write-side by cgroups.
	 */
	struct rw_semaphore threadgroup_fork_lock;
#endif
646

647 648
	int oom_adj;		/* OOM kill score adjustment (bit shift) */
	int oom_score_adj;	/* OOM kill score adjustment */
649 650
	int oom_score_adj_min;	/* OOM kill score adjustment minimum value.
				 * Only settable by CAP_SYS_RESOURCE. */
651 652 653 654

	struct mutex cred_guard_mutex;	/* guard against foreign influences on
					 * credential calculations
					 * (notably. ptrace) */
Linus Torvalds's avatar
Linus Torvalds committed
655 656
};

657 658 659 660 661
/* Context switch must be unlocked if interrupts are to be enabled */
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
# define __ARCH_WANT_UNLOCKED_CTXSW
#endif

Linus Torvalds's avatar
Linus Torvalds committed
662 663 664 665
/*
 * Bits in flags field of signal_struct.
 */
#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
666 667
#define SIGNAL_STOP_CONTINUED	0x00000002 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT	0x00000004 /* group exit in progress */
668 669 670 671 672 673
/*
 * Pending notifications to parent.
 */
#define SIGNAL_CLD_STOPPED	0x00000010
#define SIGNAL_CLD_CONTINUED	0x00000020
#define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
Linus Torvalds's avatar
Linus Torvalds committed
674

675 676
#define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */

677 678 679 680 681 682 683
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
		(sig->group_exit_task != NULL);
}

Linus Torvalds's avatar
Linus Torvalds committed
684 685 686 687 688 689 690 691
/*
 * Some day this will be a full-fledged user tracking system..
 */
struct user_struct {
	atomic_t __count;	/* reference count */
	atomic_t processes;	/* How many processes does this user have? */
	atomic_t files;		/* How many open files does this user have? */
	atomic_t sigpending;	/* How many pending signals does this user have? */
692
#ifdef CONFIG_INOTIFY_USER
Robert Love's avatar
Robert Love committed
693 694 695
	atomic_t inotify_watches; /* How many inotify watches does this user have? */
	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
#endif
696 697 698
#ifdef CONFIG_FANOTIFY
	atomic_t fanotify_listeners;
#endif
699
#ifdef CONFIG_EPOLL
700
	atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
701
#endif
702
#ifdef CONFIG_POSIX_MQUEUE
Linus Torvalds's avatar
Linus Torvalds committed
703 704
	/* protected by mq_lock	*/
	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
705
#endif
Linus Torvalds's avatar
Linus Torvalds committed
706 707 708 709 710 711 712 713
	unsigned long locked_shm; /* How many pages of mlocked shm ? */

#ifdef CONFIG_KEYS
	struct key *uid_keyring;	/* UID specific keyring */
	struct key *session_keyring;	/* UID's default session keyring */
#endif

	/* Hash table maintenance information */
714
	struct hlist_node uidhash_node;
Linus Torvalds's avatar
Linus Torvalds committed
715
	uid_t uid;
716
	struct user_namespace *user_ns;
717

718
#ifdef CONFIG_PERF_EVENTS
719 720
	atomic_long_t locked_vm;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
721 722
};

723
extern int uids_sysfs_init(void);
724

Linus Torvalds's avatar
Linus Torvalds committed
725 726 727 728 729
extern struct user_struct *find_user(uid_t);

extern struct user_struct root_user;
#define INIT_USER (&root_user)

730

Linus Torvalds's avatar
Linus Torvalds committed
731 732 733
struct backing_dev_info;
struct reclaim_state;

734
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Linus Torvalds's avatar
Linus Torvalds committed
735 736
struct sched_info {
	/* cumulative counters */
737
	unsigned long pcount;	      /* # of times run on this cpu */
738
	unsigned long long run_delay; /* time spent waiting on a runqueue */
Linus Torvalds's avatar
Linus Torvalds committed
739 740

	/* timestamps */
741 742
	unsigned long long last_arrival,/* when we last ran on a cpu */
			   last_queued;	/* when we were last queued to run */
Linus Torvalds's avatar
Linus Torvalds committed
743
};
744
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
Linus Torvalds's avatar
Linus Torvalds committed
745

746 747 748 749 750 751 752 753 754 755 756 757 758 759
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
	spinlock_t	lock;
	unsigned int	flags;	/* Private per-task flags */

	/* For each stat XXX, add following, aligned appropriately
	 *
	 * struct timespec XXX_start, XXX_end;
	 * u64 XXX_delay;
	 * u32 XXX_count;
	 *
	 * Atomicity of updates to XXX_delay, XXX_count protected by
	 * single lock above (split into XXX_lock if contention is an issue).
	 */
760 761 762 763 764 765 766 767 768 769 770 771 772

	/*
	 * XXX_count is incremented on every XXX operation, the delay
	 * associated with the operation is added to XXX_delay.
	 * XXX_delay contains the accumulated delay time in nanoseconds.
	 */
	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
	u64 blkio_delay;	/* wait for sync block io completion */
	u64 swapin_delay;	/* wait for swapin block io completion */
	u32 blkio_count;	/* total count of the number of sync block */
				/* io operations performed */
	u32 swapin_count;	/* total count of the number of swapin block */
				/* io operations performed */
773 774 775 776

	struct timespec freepages_start, freepages_end;
	u64 freepages_delay;	/* wait for memory reclaim */
	u32 freepages_count;	/* total count of memory reclaim */
777
};
778 779 780 781 782 783 784 785 786 787 788
#endif	/* CONFIG_TASK_DELAY_ACCT */

static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
	return 1;
#elif defined(CONFIG_TASK_DELAY_ACCT)
	extern int delayacct_on;
	return delayacct_on;
#else
	return 0;
789
#endif
790
}
791

792 793 794 795 796
enum cpu_idle_type {
	CPU_IDLE,
	CPU_NOT_IDLE,
	CPU_NEWLY_IDLE,
	CPU_MAX_IDLE_TYPES
Linus Torvalds's avatar
Linus Torvalds committed
797 798 799
};

/*
800 801 802 803 804 805 806 807 808 809
 * Increase resolution of nice-level calculations for 64-bit architectures.
 * The extra resolution improves shares distribution and load balancing of
 * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
 * hierarchies, especially on larger systems. This is not a user-visible change
 * and does not change the user-interface for setting shares/weights.
 *
 * We increase resolution only if we have enough bits to allow this increased
 * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution
 * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the
 * increased costs.
Linus Torvalds's avatar
Linus Torvalds committed
810
 */
811
#if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load  */
812 813 814 815 816 817 818 819
# define SCHED_LOAD_RESOLUTION	10
# define scale_load(w)		((w) << SCHED_LOAD_RESOLUTION)
# define scale_load_down(w)	((w) >> SCHED_LOAD_RESOLUTION)
#else
# define SCHED_LOAD_RESOLUTION	0
# define scale_load(w)		(w)
# define scale_load_down(w)	(w)
#endif
820

821
#define SCHED_LOAD_SHIFT	(10 + SCHED_LOAD_RESOLUTION)
822 823
#define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)

824 825 826 827 828
/*
 * Increase resolution of cpu_power calculations
 */
#define SCHED_POWER_SHIFT	10
#define SCHED_POWER_SCALE	(1L << SCHED_POWER_SHIFT)
Linus Torvalds's avatar
Linus Torvalds committed
829

830 831 832
/*
 * sched-domains (multiprocessor balancing) declarations:
 */
833
#ifdef CONFIG_SMP
834 835 836 837
#define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
#define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
#define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
838
#define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
839
#define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
840
#define SD_PREFER_LOCAL		0x0040  /* Prefer to keep tasks local to this domain */
841 842 843 844
#define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
#define SD_POWERSAVINGS_BALANCE	0x0100	/* Balance for power savings */
#define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
#define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
845
#define SD_ASYM_PACKING		0x0800  /* Place busy groups earlier in the domain */
846
#define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
847
#define SD_OVERLAP		0x2000	/* sched_domains of this level overlap */
848

849 850 851 852 853 854 855 856 857 858
enum powersavings_balance_level {
	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
	POWERSAVINGS_BALANCE_BASIC,	/* Fill one thread/core/package
					 * first for long running threads
					 */
	POWERSAVINGS_BALANCE_WAKEUP,	/* Also bias task wakeups to semi-idle
					 * cpu package for power savings
					 */
	MAX_POWERSAVINGS_BALANCE_LEVELS
};
859

860
extern int sched_mc_power_savings, sched_smt_power_savings;
861

862 863 864 865
static inline int sd_balance_for_mc_power(void)
{
	if (sched_smt_power_savings)
		return SD_POWERSAVINGS_BALANCE;
866

867 868 869 870
	if (!sched_mc_power_savings)
		return SD_PREFER_SIBLING;

	return 0;
871
}
872

873 874 875 876 877
static inline int sd_balance_for_package_power(void)
{
	if (sched_mc_power_savings | sched_smt_power_savings)
		return SD_POWERSAVINGS_BALANCE;

878
	return SD_PREFER_SIBLING;
879
}
880

881 882
extern int __weak arch_sd_sibiling_asym_packing(void);

883 884
/*
 * Optimise SD flags for power savings:
885
 * SD_BALANCE_NEWIDLE helps aggressive task consolidation and power savings.
886 887 888 889 890 891 892 893 894 895
 * Keep default SD flags if sched_{smt,mc}_power_saving=0
 */

static inline int sd_power_saving_flags(void)
{
	if (sched_mc_power_savings | sched_smt_power_savings)
		return SD_BALANCE_NEWIDLE;

	return 0;
}
Linus Torvalds's avatar
Linus Torvalds committed
896

897
struct sched_group_power {
898
	atomic_t ref;
Linus Torvalds's avatar
Linus Torvalds committed
899 900
	/*
	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
901
	 * single CPU.
902
	 */
903 904 905 906 907 908 909
	unsigned int power, power_orig;
};

struct sched_group {
	struct sched_group *next;	/* Must be a circular list */
	atomic_t ref;

910
	unsigned int group_weight;
911
	struct sched_group_power *sgp;
912

913 914 915 916 917 918 919 920
	/*
	 * The CPUs this group covers.
	 *
	 * NOTE: this field is variable length. (Allocated dynamically
	 * by attaching extra space to the end of the structure,
	 * depending on how many CPUs the kernel has booted up with)
	 */
	unsigned long cpumask[0];
Linus Torvalds's avatar
Linus Torvalds committed
921 922
};

923 924
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
{
925
	return to_cpumask(sg->cpumask);
926 927
}

928 929 930 931 932 933 934 935
struct sched_domain_attr {
	int relax_domain_level;
};

#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
	.relax_domain_level = -1,			\
}

936 937
extern int sched_domain_level_max;

Linus Torvalds's avatar
Linus Torvalds committed
938 939 940
struct sched_domain {
	/* These fields must be setup */
	struct sched_domain *parent;	/* top domain must be null terminated */
941
	struct sched_domain *child;	/* bottom domain must be null terminated */
Linus Torvalds's avatar
Linus Torvalds committed
942 943 944 945 946 947
	struct sched_group *groups;	/* the balancing groups of the domain */
	unsigned long min_interval;	/* Minimum balance interval ms */
	unsigned long max_interval;	/* Maximum balance interval ms */
	unsigned int busy_factor;	/* less balancing by factor if busy */
	unsigned int imbalance_pct;	/* No balance until over watermark */
	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
948 949 950 951
	unsigned int busy_idx;
	unsigned int idle_idx;
	unsigned int newidle_idx;
	unsigned int wake_idx;
952
	unsigned int forkexec_idx;
Peter Zijlstra's avatar
Peter Zijlstra committed
953
	unsigned int smt_gain;
Linus Torvalds's avatar
Linus Torvalds committed
954
	int flags;			/* See SD_* */
955
	int level;
Linus Torvalds's avatar
Linus Torvalds committed
956 957 958 959 960 961

	/* Runtime fields. */
	unsigned long last_balance;	/* init to jiffies. units in jiffies */
	unsigned int balance_interval;	/* initialise to 1. units in ms. */
	unsigned int nr_balance_failed; /* initialise to 0 */

962 963
	u64 last_update;

Linus Torvalds's avatar
Linus Torvalds committed
964 965
#ifdef CONFIG_SCHEDSTATS
	/* load_balance() stats */
966 967 968 969 970 971 972 973
	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
Linus Torvalds's avatar
Linus Torvalds committed
974 975

	/* Active load balancing */
976 977 978
	unsigned int alb_count;
	unsigned int alb_failed;
	unsigned int alb_pushed;
Linus Torvalds's avatar
Linus Torvalds committed
979

980
	/* SD_BALANCE_EXEC stats */
981 982 983
	unsigned int sbe_count;
	unsigned int sbe_balanced;
	unsigned int sbe_pushed;
Linus Torvalds's avatar
Linus Torvalds committed
984

985
	/* SD_BALANCE_FORK stats */
986 987 988
	unsigned int sbf_count;
	unsigned int sbf_balanced;
	unsigned int sbf_pushed;
989

Linus Torvalds's avatar
Linus Torvalds committed
990
	/* try_to_wake_up() stats */
991 992 993
	unsigned int ttwu_wake_remote;
	unsigned int ttwu_move_affine;
	unsigned int ttwu_move_balance;
Linus Torvalds's avatar
Linus Torvalds committed
994
#endif
995 996 997
#ifdef CONFIG_SCHED_DEBUG
	char *name;
#endif
998 999 1000 1001
	union {
		void *private;		/* used during construction */
		struct rcu_head rcu;	/* used during destruction */
	};
1002

1003
	unsigned int span_weight;
1004 1005 1006 1007 1008 1009 1010 1011
	/*
	 * Span of all CPUs in this domain.
	 *
	 * NOTE: this field is variable length. (Allocated dynamically
	 * by attaching extra space to the end of the structure,
	 * depending on how many CPUs the kernel has booted up with)
	 */
	unsigned long span[0];
Linus Torvalds's avatar
Linus Torvalds committed
1012 1013
};

1014 1015
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
1016
	return to_cpumask(sd->span);
1017 1018
}

1019
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1020
				    struct sched_domain_attr *dattr_new);
1021

1022 1023 1024 1025
/* Allocate an array of sched domains, for partition_sched_domains(). */
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);

1026 1027 1028 1029 1030 1031 1032 1033
/* Test a flag in parent sched domain */
static inline int test_sd_parent(struct sched_domain *sd, int flag)
{
	if (sd->parent && (sd->parent->flags & flag))
		return 1;

	return 0;
}
1034

1035 1036 1037
unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);

1038
#else /* CONFIG_SMP */
Linus Torvalds's avatar
Linus Torvalds committed
1039

1040
struct sched_domain_attr;
1041

1042
static inline void
1043
partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1044 1045
			struct sched_domain_attr *dattr_new)
{
1046
}
1047
#endif	/* !CONFIG_SMP */
Linus Torvalds's avatar
Linus Torvalds committed
1048

1049

Linus Torvalds's avatar
Linus Torvalds committed
1050 1051 1052
struct io_context;			/* See blkdev.h */


1053
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1054
extern void prefetch_stack(struct task_struct *t);
1055 1056 1057
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1058 1059 1060

struct audit_context;		/* See audit.c */
struct mempolicy;
1061
struct pipe_inode_info;
1062
struct uts_namespace;
Linus Torvalds's avatar
Linus Torvalds committed
1063

1064 1065 1066
struct rq;
struct sched_domain;

1067 1068 1069 1070
/*
 * wake flags
 */
#define WF_SYNC		0x01		/* waker goes to sleep after wakup */
Peter Zijlstra's avatar
Peter Zijlstra committed
1071
#define WF_FORK		0x02		/* child wakeup after fork */
1072
#define WF_MIGRATED	0x04		/* internal use, task got migrated */
1073

1074
#define ENQUEUE_WAKEUP		1
1075 1076 1077 1078 1079 1080
#define ENQUEUE_HEAD		2
#ifdef CONFIG_SMP
#define ENQUEUE_WAKING		4	/* sched_class::task_waking was called */
#else
#define ENQUEUE_WAKING		0
#endif
1081 1082 1083

#define DEQUEUE_SLEEP		1

1084
struct sched_class {
1085
	const struct sched_class *next;
1086

1087 1088
	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags);
	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags);
1089
	void (*yield_task) (struct rq *rq);
1090
	bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
1091

1092
	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
1093

1094
	struct task_struct * (*pick_next_task) (struct rq *rq);
1095
	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
1096

1097
#ifdef CONFIG_SMP
1098
	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
1099

1100 1101
	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
	void (*post_schedule) (struct rq *this_rq);
1102
	void (*task_waking) (struct task_struct *task);
1103
	void (*task_woken) (struct rq *this_rq, struct task_struct *task);
1104

1105
	void (*set_cpus_allowed)(struct task_struct *p,
1106
				 const struct cpumask *newmask);
1107

1108 1109
	void (*rq_online)(struct rq *rq);
	void (*rq_offline)(struct rq *rq);
1110 1111 1112 1113
#endif

	void (*set_curr_task) (struct rq *rq);
	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
1114
	void (*task_fork) (struct task_struct *p);
1115

1116 1117
	void (*switched_from) (struct rq *this_rq, struct task_struct *task);
	void (*switched_to) (struct rq *this_rq, struct task_struct *task);
1118
	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
1119
			     int oldprio);
Peter Zijlstra's avatar
Peter Zijlstra committed
1120

1121 1122
	unsigned int (*get_rr_interval) (struct rq *rq,
					 struct task_struct *task);
1123

Peter Zijlstra's avatar
Peter Zijlstra committed
1124
#ifdef CONFIG_FAIR_GROUP_SCHED
1125
	void (*task_move_group) (struct task_struct *p, int on_rq);
Peter Zijlstra's avatar
Peter Zijlstra committed
1126
#endif
1127 1128 1129 1130 1131 1132
};

struct load_weight {
	unsigned long weight, inv_weight;
};

1133
#ifdef CONFIG_SCHEDSTATS
1134
struct sched_statistics {
1135
	u64			wait_start;
1136
	u64			wait_max;
1137 1138
	u64			wait_count;
	u64			wait_sum;
1139 1140
	u64			iowait_count;
	u64			iowait_sum;
1141

1142 1143
	u64			sleep_start;
	u64			sleep_max;
1144 1145 1146
	s64			sum_sleep_runtime;

	u64			block_start;
1147 1148
	u64			block_max;
	u64			exec_max;
1149
	u64			slice_max;
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165

	u64			nr_migrations_cold;
	u64			nr_failed_migrations_affine;
	u64			nr_failed_migrations_running;
	u64			nr_failed_migrations_hot;
	u64			nr_forced_migrations;

	u64			nr_wakeups;
	u64			nr_wakeups_sync;
	u64			nr_wakeups_migrate;
	u64			nr_wakeups_local;
	u64			nr_wakeups_remote;
	u64			nr_wakeups_affine;
	u64			nr_wakeups_affine_attempts;
	u64			nr_wakeups_passive;
	u64			nr_wakeups_idle;
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183
};
#endif

struct sched_entity {
	struct load_weight	load;		/* for load-balancing */
	struct rb_node		run_node;
	struct list_head	group_node;
	unsigned int		on_rq;

	u64			exec_start;
	u64			sum_exec_runtime;
	u64			vruntime;
	u64			prev_sum_exec_runtime;

	u64			nr_migrations;

#ifdef CONFIG_SCHEDSTATS
	struct sched_statistics statistics;
1184 1185
#endif

1186 1187 1188 1189 1190 1191 1192 1193
#ifdef CONFIG_FAIR_GROUP_SCHED
	struct sched_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct cfs_rq		*cfs_rq;
	/* rq "owned" by this entity/group: */
	struct cfs_rq		*my_q;
#endif
};
1194

Peter Zijlstra's avatar
Peter Zijlstra committed
1195 1196
struct sched_rt_entity {
	struct list_head run_list;
1197
	unsigned long timeout;
1198
	unsigned int time_slice;
1199 1200
	int nr_cpus_allowed;

1201
	struct sched_rt_entity *back;
1202
#ifdef CONFIG_RT_GROUP_SCHED
1203 1204 1205 1206 1207 1208
	struct sched_rt_entity	*parent;
	/* rq on which this entity is (to be) queued: */
	struct rt_rq		*rt_rq;
	/* rq "owned" by this entity/group: */
	struct rt_rq		*my_q;
#endif
Peter Zijlstra's avatar
Peter Zijlstra committed
1209 1210
};

1211 1212
struct rcu_node;

1213 1214 1215
enum perf_event_task_context {
	perf_invalid_context = -1,
	perf_hw_context = 0,
1216
	perf_sw_context,
1217 1218 1219
	perf_nr_task_contexts,
};

Linus Torvalds's avatar
Linus Torvalds committed
1220 1221
struct task_struct {
	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
1222
	void *stack;
Linus Torvalds's avatar
Linus Torvalds committed
1223
	atomic_t usage;
1224 1225
	unsigned int flags;	/* per process flags, defined below */
	unsigned int ptrace;
Linus Torvalds's avatar
Linus Torvalds committed
1226

1227
#ifdef CONFIG_SMP
1228
	struct task_struct *wake_entry;
1229
	int on_cpu;
1230
#endif
Peter Zijlstra's avatar
Peter Zijlstra committed
1231
	int on_rq;
1232

1233
	int prio, static_prio, normal_prio;
1234
	unsigned int rt_priority;
1235
	const struct sched_class *sched_class;
1236
	struct sched_entity se;
Peter Zijlstra's avatar
Peter Zijlstra committed
1237
	struct sched_rt_entity rt;
Linus Torvalds's avatar
Linus Torvalds committed
1238

1239 1240 1241 1242 1243
#ifdef CONFIG_PREEMPT_NOTIFIERS
	/* list of struct preempt_notifier: */
	struct hlist_head preempt_notifiers;
#endif

1244 1245 1246 1247 1248 1249 1250 1251 1252
	/*
	 * fpu_counter contains the number of consecutive context switches
	 * that the FPU is used. If this is over a threshold, the lazy fpu
	 * saving becomes unlazy to save the trap. This is an unsigned char
	 * so that after 256 times the counter wraps and the behavior turns
	 * lazy again; this to deal with bursty apps that only use FPU for
	 * a short time
	 */
	unsigned char fpu_counter;
1253
#ifdef CONFIG_BLK_DEV_IO_TRACE
1254
	unsigned int btrace_seq;
1255
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1256

1257
	unsigned int policy;
Linus Torvalds's avatar
Linus Torvalds committed
1258 1259
	cpumask_t cpus_allowed;

1260
#ifdef CONFIG_PREEMPT_RCU
1261
	int rcu_read_lock_nesting;
1262
	char rcu_read_unlock_special;
1263 1264 1265
#if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU)
	int rcu_boosted;
#endif /* #if defined(CONFIG_RCU_BOOST) && defined(CONFIG_TREE_PREEMPT_RCU) */
1266
	struct list_head rcu_node_entry;
1267 1268 1269
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TREE_PREEMPT_RCU
	struct rcu_node *rcu_blocked_node;
1270
#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
1271 1272 1273
#ifdef CONFIG_RCU_BOOST
	struct rt_mutex *rcu_boost_mutex;
#endif /* #ifdef CONFIG_RCU_BOOST */
1274

1275
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Linus Torvalds's avatar
Linus Torvalds committed
1276 1277 1278 1279
	struct sched_info sched_info;
#endif

	struct list_head tasks;
1280
#ifdef CONFIG_SMP
1281
	struct plist_node pushable_tasks;
1282
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1283 1284

	struct mm_struct *mm, *active_mm;
1285 1286 1287
#ifdef CONFIG_COMPAT_BRK
	unsigned brk_randomized:1;
#endif
1288 1289 1290
#if defined(SPLIT_RSS_COUNTING)
	struct task_rss_stat	rss_stat;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1291
/* task state */
1292
	int exit_state;
Linus Torvalds's avatar
Linus Torvalds committed
1293 1294
	int exit_code, exit_signal;
	int pdeath_signal;  /*  The signal sent when the parent dies  */
1295
	unsigned int jobctl;	/* JOBCTL_*, siglock protected */
Linus Torvalds's avatar
Linus Torvalds committed
1296
	/* ??? */
1297
	unsigned int personality;
Linus Torvalds's avatar
Linus Torvalds committed
1298
	unsigned did_exec:1;
1299 1300
	unsigned in_execve:1;	/* Tell the LSMs that the process is doing an
				 * execve */
1301 1302
	unsigned in_iowait:1;

1303 1304 1305

	/* Revert to default priority/policy when forking */
	unsigned sched_reset_on_fork:1;
1306
	unsigned sched_contributes_to_load:1;
1307

Linus Torvalds's avatar
Linus Torvalds committed
1308 1309
	pid_t pid;
	pid_t tgid;
1310

1311
#ifdef CONFIG_CC_STACKPROTECTOR
1312 1313
	/* Canary value for the -fstack-protector gcc feature */
	unsigned long stack_canary;
1314
#endif
1315

Linus Torvalds's avatar
Linus Torvalds committed
1316 1317 1318
	/* 
	 * pointers to (original) parent process, youngest child, younger sibling,
	 * older sibling, respectively.  (p->father can be replaced with 
Roland McGrath's avatar
Roland McGrath committed
1319
	 * p->real_parent->pid)
Linus Torvalds's avatar
Linus Torvalds committed
1320
	 */
Roland McGrath's avatar
Roland McGrath committed
1321 1322
	struct task_struct *real_parent; /* real parent process */
	struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
Linus Torvalds's avatar
Linus Torvalds committed
1323
	/*
Roland McGrath's avatar
Roland McGrath committed
1324
	 * children/sibling forms the list of my natural children
Linus Torvalds's avatar
Linus Torvalds committed
1325 1326 1327 1328 1329
	 */
	struct list_head children;	/* list of my children */
	struct list_head sibling;	/* linkage in my parent's children list */
	struct task_struct *group_leader;	/* threadgroup leader */

Roland McGrath's avatar
Roland McGrath committed
1330 1331 1332 1333 1334 1335 1336 1337
	/*
	 * ptraced is the list of tasks this task is using ptrace on.
	 * This includes both natural children and PTRACE_ATTACH targets.
	 * p->ptrace_entry is p's link on the p->parent->ptraced list.
	 */
	struct list_head ptraced;
	struct list_head ptrace_entry;

Linus Torvalds's avatar
Linus Torvalds committed
1338
	/* PID/PID hash table linkage. */
1339
	struct pid_link pids[PIDTYPE_MAX];