sched.h 73.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H

4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * cloning flags:
 */
#define CSIGNAL		0x000000ff	/* signal mask to be sent at exit */
#define CLONE_VM	0x00000100	/* set if VM shared between processes */
#define CLONE_FS	0x00000200	/* set if fs info shared between processes */
#define CLONE_FILES	0x00000400	/* set if open files shared between processes */
#define CLONE_SIGHAND	0x00000800	/* set if signal handlers and blocked signals shared */
#define CLONE_PTRACE	0x00002000	/* set if we want to let tracing continue on the child too */
#define CLONE_VFORK	0x00004000	/* set if the parent wants the child to wake it up on mm_release */
#define CLONE_PARENT	0x00008000	/* set if we want to have the same parent as the cloner */
#define CLONE_THREAD	0x00010000	/* Same thread group? */
#define CLONE_NEWNS	0x00020000	/* New namespace group? */
#define CLONE_SYSVSEM	0x00040000	/* share system V SEM_UNDO semantics */
#define CLONE_SETTLS	0x00080000	/* create a new TLS for the child */
#define CLONE_PARENT_SETTID	0x00100000	/* set the TID in the parent */
#define CLONE_CHILD_CLEARTID	0x00200000	/* clear the TID in the child */
#define CLONE_DETACHED		0x00400000	/* Unused, ignored */
#define CLONE_UNTRACED		0x00800000	/* set if the tracing process can't force CLONE_PTRACE on this clone */
#define CLONE_CHILD_SETTID	0x01000000	/* set the TID in the child */
#define CLONE_STOPPED		0x02000000	/* Start in stopped state */
25
#define CLONE_NEWUTS		0x04000000	/* New utsname group? */
Kirill Korotaev's avatar
Kirill Korotaev committed
26
#define CLONE_NEWIPC		0x08000000	/* New ipcs */
Serge E. Hallyn's avatar
Serge E. Hallyn committed
27
#define CLONE_NEWUSER		0x10000000	/* New user namespace */
28
#define CLONE_NEWPID		0x20000000	/* New pid namespace */
29
#define CLONE_NEWNET		0x40000000	/* New network namespace */
30
#define CLONE_IO		0x80000000	/* Clone io context */
31 32 33 34 35 36 37 38

/*
 * Scheduling policies
 */
#define SCHED_NORMAL		0
#define SCHED_FIFO		1
#define SCHED_RR		2
#define SCHED_BATCH		3
Ingo Molnar's avatar
Ingo Molnar committed
39 40
/* SCHED_ISO: reserved but not implemented yet */
#define SCHED_IDLE		5
41 42
/* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */
#define SCHED_RESET_ON_FORK     0x40000000
43

44
#ifdef __KERNEL__
45 46 47 48 49

struct sched_param {
	int sched_priority;
};

Linus Torvalds's avatar
Linus Torvalds committed
50 51 52 53 54 55 56 57 58 59 60 61 62
#include <asm/param.h>	/* for HZ */

#include <linux/capability.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/nodemask.h>
63
#include <linux/mm_types.h>
Linus Torvalds's avatar
Linus Torvalds committed
64 65 66 67 68 69 70 71 72

#include <asm/system.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>

#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/signal.h>
73
#include <linux/path.h>
Linus Torvalds's avatar
Linus Torvalds committed
74 75 76 77 78
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
Peter Zijlstra's avatar
Peter Zijlstra committed
79
#include <linux/proportions.h>
Linus Torvalds's avatar
Linus Torvalds committed
80
#include <linux/seccomp.h>
Ingo Molnar's avatar
Ingo Molnar committed
81
#include <linux/rcupdate.h>
82
#include <linux/rculist.h>
Ingo Molnar's avatar
Ingo Molnar committed
83
#include <linux/rtmutex.h>
Linus Torvalds's avatar
Linus Torvalds committed
84

85 86 87 88 89
#include <linux/time.h>
#include <linux/param.h>
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
90
#include <linux/task_io_accounting.h>
91
#include <linux/kobject.h>
Arjan van de Ven's avatar
Arjan van de Ven committed
92
#include <linux/latencytop.h>
93
#include <linux/cred.h>
94 95

#include <asm/processor.h>
H. J. Lu's avatar
H. J. Lu committed
96

Linus Torvalds's avatar
Linus Torvalds committed
97
struct exec_domain;
98
struct futex_pi_state;
99
struct robust_list_head;
100
struct bio;
101
struct fs_struct;
102
struct bts_context;
103
struct perf_event_context;
Linus Torvalds's avatar
Linus Torvalds committed
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121

/*
 * List of flags we want to share for kernel threads,
 * if only because they are not used by them anyway.
 */
#define CLONE_KERNEL	(CLONE_FS | CLONE_FILES | CLONE_SIGHAND)

/*
 * These are the constant used to fake the fixed-point load-average
 * counting. Some notes:
 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
 *    a load-average precision of 10 bits integer + 11 bits fractional
 *  - if you want to count load-averages more often, you need more
 *    precision, or rounding will get you. With 2-second counting freq,
 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
 *    11 bit fractions.
 */
extern unsigned long avenrun[];		/* Load averages */
122
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
Linus Torvalds's avatar
Linus Torvalds committed
123 124 125

#define FSHIFT		11		/* nr of bits of precision */
#define FIXED_1		(1<<FSHIFT)	/* 1.0 as fixed-point */
126
#define LOAD_FREQ	(5*HZ+1)	/* 5 sec intervals */
Linus Torvalds's avatar
Linus Torvalds committed
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
#define EXP_1		1884		/* 1/exp(5sec/1min) as fixed-point */
#define EXP_5		2014		/* 1/exp(5sec/5min) */
#define EXP_15		2037		/* 1/exp(5sec/15min) */

#define CALC_LOAD(load,exp,n) \
	load *= exp; \
	load += n*(FIXED_1-exp); \
	load >>= FSHIFT;

extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_iowait(void);
143
extern void calc_global_load(void);
144
extern u64 cpu_nr_migrations(int cpu);
Linus Torvalds's avatar
Linus Torvalds committed
145

146 147
extern unsigned long get_parent_ip(unsigned long addr);

Ingo Molnar's avatar
Ingo Molnar committed
148 149
struct seq_file;
struct cfs_rq;
150
struct task_group;
Ingo Molnar's avatar
Ingo Molnar committed
151 152 153 154
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
extern void
155
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
Ingo Molnar's avatar
Ingo Molnar committed
156 157 158 159 160 161 162 163 164
#else
static inline void
proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
}
static inline void proc_sched_set_task(struct task_struct *p)
{
}
static inline void
165
print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
Ingo Molnar's avatar
Ingo Molnar committed
166 167 168
{
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
169

170 171
extern unsigned long long time_sync_thresh;

172 173 174 175 176 177 178 179 180 181
/*
 * Task state bitmask. NOTE! These bits are also
 * encoded in fs/proc/array.c: get_task_state().
 *
 * We have two separate sets of flags: task->state
 * is about runnability, while task->exit_state are
 * about the task exiting. Confusing, but this way
 * modifying one set can't modify the other one by
 * mistake.
 */
Linus Torvalds's avatar
Linus Torvalds committed
182 183 184
#define TASK_RUNNING		0
#define TASK_INTERRUPTIBLE	1
#define TASK_UNINTERRUPTIBLE	2
Matthew Wilcox's avatar
Matthew Wilcox committed
185 186
#define __TASK_STOPPED		4
#define __TASK_TRACED		8
187 188 189 190
/* in tsk->exit_state */
#define EXIT_ZOMBIE		16
#define EXIT_DEAD		32
/* in tsk->state again */
191
#define TASK_DEAD		64
Matthew Wilcox's avatar
Matthew Wilcox committed
192
#define TASK_WAKEKILL		128
Peter Zijlstra's avatar
Peter Zijlstra committed
193
#define TASK_WAKING		256
Matthew Wilcox's avatar
Matthew Wilcox committed
194 195 196 197 198

/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE		(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED		(TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED		(TASK_WAKEKILL | __TASK_TRACED)
Linus Torvalds's avatar
Linus Torvalds committed
199

200 201
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL		(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
Matthew Wilcox's avatar
Matthew Wilcox committed
202
#define TASK_ALL		(TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
203 204 205

/* get_task_state() */
#define TASK_REPORT		(TASK_RUNNING | TASK_INTERRUPTIBLE | \
Matthew Wilcox's avatar
Matthew Wilcox committed
206 207
				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
				 __TASK_TRACED)
208

Matthew Wilcox's avatar
Matthew Wilcox committed
209 210
#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
211
#define task_is_stopped_or_traced(task)	\
Matthew Wilcox's avatar
Matthew Wilcox committed
212
			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
213
#define task_contributes_to_load(task)	\
214
				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
215
				 (task->flags & PF_FREEZING) == 0)
Linus Torvalds's avatar
Linus Torvalds committed
216 217 218 219 220 221

#define __set_task_state(tsk, state_value)		\
	do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value)		\
	set_mb((tsk)->state, (state_value))

222 223 224 225 226 227 228 229 230 231 232
/*
 * set_current_state() includes a barrier so that the write of current->state
 * is correctly serialised wrt the caller's subsequent test of whether to
 * actually sleep:
 *
 *	set_current_state(TASK_UNINTERRUPTIBLE);
 *	if (do_i_need_to_sleep())
 *		schedule();
 *
 * If the caller does not need such serialisation then use __set_current_state()
 */
Linus Torvalds's avatar
Linus Torvalds committed
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
#define __set_current_state(state_value)			\
	do { current->state = (state_value); } while (0)
#define set_current_state(state_value)		\
	set_mb(current->state, (state_value))

/* Task command name length */
#define TASK_COMM_LEN 16

#include <linux/spinlock.h>

/*
 * This serializes "schedule()" and also protects
 * the run-queue from deletions/modifications (but
 * _adding_ to the beginning of the run-queue has
 * a separate lock).
 */
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;

252
struct task_struct;
Linus Torvalds's avatar
Linus Torvalds committed
253 254 255

extern void sched_init(void);
extern void sched_init_smp(void);
256
extern asmlinkage void schedule_tail(struct task_struct *prev);
257
extern void init_idle(struct task_struct *idle, int cpu);
258
extern void init_idle_bootup_task(struct task_struct *idle);
Linus Torvalds's avatar
Linus Torvalds committed
259

260
extern int runqueue_is_locked(int cpu);
261
extern void task_rq_unlock_wait(struct task_struct *p);
Ingo Molnar's avatar
Ingo Molnar committed
262

263
extern cpumask_var_t nohz_cpu_mask;
264 265
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern int select_nohz_load_balancer(int cpu);
266
extern int get_nohz_load_balancer(void);
267 268 269 270 271 272
#else
static inline int select_nohz_load_balancer(int cpu)
{
	return 0;
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
273

274
/*
275
 * Only dump TASK_* tasks. (0 for all tasks)
276 277 278 279 280
 */
extern void show_state_filter(unsigned long state_filter);

static inline void show_state(void)
{
281
	show_state_filter(0);
282 283
}

Linus Torvalds's avatar
Linus Torvalds committed
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
extern void show_regs(struct pt_regs *);

/*
 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
 * task), SP is the stack pointer of the first frame that should be shown in the back
 * trace (or NULL if the entire call-chain of the task should be shown).
 */
extern void show_stack(struct task_struct *task, unsigned long *sp);

void io_schedule(void);
long io_schedule_timeout(long timeout);

extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);

301 302
extern void sched_show_task(struct task_struct *p);

Ingo Molnar's avatar
Ingo Molnar committed
303
#ifdef CONFIG_DETECT_SOFTLOCKUP
304
extern void softlockup_tick(void);
Ingo Molnar's avatar
Ingo Molnar committed
305
extern void touch_softlockup_watchdog(void);
306
extern void touch_all_softlockup_watchdogs(void);
307 308 309
extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
				    struct file *filp, void __user *buffer,
				    size_t *lenp, loff_t *ppos);
310
extern unsigned int  softlockup_panic;
311
extern int softlockup_thresh;
Ingo Molnar's avatar
Ingo Molnar committed
312
#else
313
static inline void softlockup_tick(void)
Ingo Molnar's avatar
Ingo Molnar committed
314 315 316 317 318
{
}
static inline void touch_softlockup_watchdog(void)
{
}
319 320 321
static inline void touch_all_softlockup_watchdogs(void)
{
}
Ingo Molnar's avatar
Ingo Molnar committed
322 323
#endif

324 325 326 327 328 329 330 331 332
#ifdef CONFIG_DETECT_HUNG_TASK
extern unsigned int  sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_check_count;
extern unsigned long sysctl_hung_task_timeout_secs;
extern unsigned long sysctl_hung_task_warnings;
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
					 struct file *filp, void __user *buffer,
					 size_t *lenp, loff_t *ppos);
#endif
Ingo Molnar's avatar
Ingo Molnar committed
333

Linus Torvalds's avatar
Linus Torvalds committed
334 335
/* Attach to any functions which should be ignored in wchan output. */
#define __sched		__attribute__((__section__(".sched.text")))
336 337 338 339

/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];

Linus Torvalds's avatar
Linus Torvalds committed
340 341 342 343
/* Is this address in the __sched functions? */
extern int in_sched_functions(unsigned long addr);

#define	MAX_SCHEDULE_TIMEOUT	LONG_MAX
344
extern signed long schedule_timeout(signed long timeout);
345
extern signed long schedule_timeout_interruptible(signed long timeout);
Matthew Wilcox's avatar
Matthew Wilcox committed
346
extern signed long schedule_timeout_killable(signed long timeout);
347
extern signed long schedule_timeout_uninterruptible(signed long timeout);
Peter Zijlstra's avatar
Peter Zijlstra committed
348
asmlinkage void __schedule(void);
Linus Torvalds's avatar
Linus Torvalds committed
349
asmlinkage void schedule(void);
350
extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
Linus Torvalds's avatar
Linus Torvalds committed
351

352
struct nsproxy;
353
struct user_namespace;
Linus Torvalds's avatar
Linus Torvalds committed
354

355 356 357 358 359 360 361 362 363 364 365 366 367 368
/*
 * Default maximum number of active map areas, this limits the number of vmas
 * per mm struct. Users can overwrite this number by sysctl but there is a
 * problem.
 *
 * When a program's coredump is generated as ELF format, a section is created
 * per a vma. In ELF, the number of sections is represented in unsigned short.
 * This means the number of sections should be smaller than 65535 at coredump.
 * Because the kernel adds some informative sections to a image of program at
 * generating coredump, we need some margin. The number of extra sections is
 * 1-3 now and depends on arch. We use "5" as safe margin, here.
 */
#define MAPCOUNT_ELF_CORE_MARGIN	(5)
#define DEFAULT_MAX_MAP_COUNT	(USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
Linus Torvalds's avatar
Linus Torvalds committed
369 370 371 372 373 374 375 376 377 378 379 380

extern int sysctl_max_map_count;

#include <linux/aio.h>

extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
		       unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
			  unsigned long len, unsigned long pgoff,
			  unsigned long flags);
381 382
extern void arch_unmap_area(struct mm_struct *, unsigned long);
extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
Linus Torvalds's avatar
Linus Torvalds committed
383

384
#if USE_SPLIT_PTLOCKS
385 386 387 388
/*
 * The mm counters are not protected by its page_table_lock,
 * so must be incremented atomically.
 */
389 390 391 392 393
#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
394

395
#else  /* !USE_SPLIT_PTLOCKS */
396 397 398 399
/*
 * The mm counters are protected by its page_table_lock,
 * so can be incremented directly.
 */
Linus Torvalds's avatar
Linus Torvalds committed
400 401 402 403 404
#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
#define get_mm_counter(mm, member) ((mm)->_##member)
#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
#define inc_mm_counter(mm, member) (mm)->_##member++
#define dec_mm_counter(mm, member) (mm)->_##member--
405

406
#endif /* !USE_SPLIT_PTLOCKS */
407

408 409
#define get_mm_rss(mm)					\
	(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
410 411 412 413 414 415 416 417 418 419
#define update_hiwater_rss(mm)	do {			\
	unsigned long _rss = get_mm_rss(mm);		\
	if ((mm)->hiwater_rss < _rss)			\
		(mm)->hiwater_rss = _rss;		\
} while (0)
#define update_hiwater_vm(mm)	do {			\
	if ((mm)->hiwater_vm < (mm)->total_vm)		\
		(mm)->hiwater_vm = (mm)->total_vm;	\
} while (0)

420 421 422 423 424 425 426 427 428
static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
{
	return max(mm->hiwater_rss, get_mm_rss(mm));
}

static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
{
	return max(mm->hiwater_vm, mm->total_vm);
}
429

430 431 432 433
extern void set_dumpable(struct mm_struct *mm, int value);
extern int get_dumpable(struct mm_struct *mm);

/* mm flags */
434
/* dumpable bits */
435 436
#define MMF_DUMPABLE      0  /* core dump is permitted */
#define MMF_DUMP_SECURELY 1  /* core file is readable only by root */
Hugh Dickins's avatar
Hugh Dickins committed
437

438
#define MMF_DUMPABLE_BITS 2
Hugh Dickins's avatar
Hugh Dickins committed
439
#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
440 441 442 443 444 445

/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE	2
#define MMF_DUMP_ANON_SHARED	3
#define MMF_DUMP_MAPPED_PRIVATE	4
#define MMF_DUMP_MAPPED_SHARED	5
Roland McGrath's avatar
Roland McGrath committed
446
#define MMF_DUMP_ELF_HEADERS	6
447 448
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED  8
Hugh Dickins's avatar
Hugh Dickins committed
449

450
#define MMF_DUMP_FILTER_SHIFT	MMF_DUMPABLE_BITS
451
#define MMF_DUMP_FILTER_BITS	7
452 453 454
#define MMF_DUMP_FILTER_MASK \
	(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
455
	((1 << MMF_DUMP_ANON_PRIVATE) |	(1 << MMF_DUMP_ANON_SHARED) |\
456 457 458 459 460 461 462
	 (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)

#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
# define MMF_DUMP_MASK_DEFAULT_ELF	(1 << MMF_DUMP_ELF_HEADERS)
#else
# define MMF_DUMP_MASK_DEFAULT_ELF	0
#endif
Hugh Dickins's avatar
Hugh Dickins committed
463 464 465 466
					/* leave room for more dump flags */
#define MMF_VM_MERGEABLE	16	/* KSM may merge identical pages */

#define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
467

Linus Torvalds's avatar
Linus Torvalds committed
468 469 470 471
struct sighand_struct {
	atomic_t		count;
	struct k_sigaction	action[_NSIG];
	spinlock_t		siglock;
Davide Libenzi's avatar
Davide Libenzi committed
472
	wait_queue_head_t	signalfd_wqh;
Linus Torvalds's avatar
Linus Torvalds committed
473 474
};

475
struct pacct_struct {
476 477
	int			ac_flag;
	long			ac_exitcode;
478
	unsigned long		ac_mem;
479 480
	cputime_t		ac_utime, ac_stime;
	unsigned long		ac_minflt, ac_majflt;
481 482
};

483 484 485 486 487
/**
 * struct task_cputime - collected CPU time counts
 * @utime:		time spent in user mode, in &cputime_t units
 * @stime:		time spent in kernel mode, in &cputime_t units
 * @sum_exec_runtime:	total time spent on the CPU, in nanoseconds
488
 *
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503
 * This structure groups together three kinds of CPU time that are
 * tracked for threads and thread groups.  Most things considering
 * CPU time want to group these counts together and treat all three
 * of them in parallel.
 */
struct task_cputime {
	cputime_t utime;
	cputime_t stime;
	unsigned long long sum_exec_runtime;
};
/* Alternate field names when used to cache expirations. */
#define prof_exp	stime
#define virt_exp	utime
#define sched_exp	sum_exec_runtime

504 505 506 507 508 509 510
#define INIT_CPUTIME	\
	(struct task_cputime) {					\
		.utime = cputime_zero,				\
		.stime = cputime_zero,				\
		.sum_exec_runtime = 0,				\
	}

Peter Zijlstra's avatar
Peter Zijlstra committed
511 512 513
/*
 * Disable preemption until the scheduler is running.
 * Reset by start_kernel()->sched_init()->init_idle().
514 515 516
 *
 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
 * before the scheduler is active -- see should_resched().
Peter Zijlstra's avatar
Peter Zijlstra committed
517
 */
518
#define INIT_PREEMPT_COUNT	(1 + PREEMPT_ACTIVE)
Peter Zijlstra's avatar
Peter Zijlstra committed
519

520
/**
521 522 523 524 525
 * struct thread_group_cputimer - thread group interval timer counts
 * @cputime:		thread group interval timers.
 * @running:		non-zero when there are timers running and
 * 			@cputime receives updates.
 * @lock:		lock for fields in this struct.
526 527
 *
 * This structure contains the version of task_cputime, above, that is
528
 * used for thread group CPU timer calculations.
529
 */
530 531 532 533
struct thread_group_cputimer {
	struct task_cputime cputime;
	int running;
	spinlock_t lock;
534 535
};

Linus Torvalds's avatar
Linus Torvalds committed
536 537 538 539 540 541 542 543 544 545 546 547 548 549
/*
 * NOTE! "signal_struct" does not have it's own
 * locking, because a shared signal_struct always
 * implies a shared sighand_struct, so locking
 * sighand_struct is always a proper superset of
 * the locking of signal_struct.
 */
struct signal_struct {
	atomic_t		count;
	atomic_t		live;

	wait_queue_head_t	wait_chldexit;	/* for wait4() */

	/* current thread group signal load-balancing target: */
550
	struct task_struct	*curr_target;
Linus Torvalds's avatar
Linus Torvalds committed
551 552 553 554 555 556 557 558 559 560 561 562

	/* shared signal handling: */
	struct sigpending	shared_pending;

	/* thread group exit support */
	int			group_exit_code;
	/* overloaded:
	 * - notify group_exit_task when ->count is equal to notify_count
	 * - everyone except group_exit_task is stopped during signal delivery
	 *   of fatal signals, group_exit_task processes the signal.
	 */
	int			notify_count;
563
	struct task_struct	*group_exit_task;
Linus Torvalds's avatar
Linus Torvalds committed
564 565 566 567 568 569 570 571 572

	/* thread group stop support, overloads group_exit_code too */
	int			group_stop_count;
	unsigned int		flags; /* see SIGNAL_* flags below */

	/* POSIX.1b Interval Timers */
	struct list_head posix_timers;

	/* ITIMER_REAL timer for the process */
573
	struct hrtimer real_timer;
574
	struct pid *leader_pid;
575
	ktime_t it_real_incr;
Linus Torvalds's avatar
Linus Torvalds committed
576 577 578 579 580

	/* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
	cputime_t it_prof_expires, it_virt_expires;
	cputime_t it_prof_incr, it_virt_incr;

581
	/*
582 583
	 * Thread group totals for process CPU timers.
	 * See thread_group_cputimer(), et al, for details.
584
	 */
585
	struct thread_group_cputimer cputimer;
586 587 588 589 590 591

	/* Earliest-expiration cache. */
	struct task_cputime cputime_expires;

	struct list_head cpu_timers[3];

592
	struct pid *tty_old_pgrp;
593

Linus Torvalds's avatar
Linus Torvalds committed
594 595 596 597 598 599 600 601 602 603 604
	/* boolean value for session group leader */
	int leader;

	struct tty_struct *tty; /* NULL if no tty */

	/*
	 * Cumulative resource counters for dead threads in the group,
	 * and for reaped dead child processes forked by this group.
	 * Live threads maintain their own counters and add to these
	 * in __exit_signal, except for the group leader.
	 */
605
	cputime_t utime, stime, cutime, cstime;
606 607
	cputime_t gtime;
	cputime_t cgtime;
Linus Torvalds's avatar
Linus Torvalds committed
608 609
	unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
	unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
610
	unsigned long inblock, oublock, cinblock, coublock;
611
	struct task_io_accounting ioac;
Linus Torvalds's avatar
Linus Torvalds committed
612

613 614 615 616 617 618 619 620
	/*
	 * Cumulative ns of schedule CPU time fo dead threads in the
	 * group, not including a zombie group leader, (This only differs
	 * from jiffies_to_ns(utime + stime) if sched_clock uses something
	 * other than jiffies.)
	 */
	unsigned long long sum_sched_runtime;

Linus Torvalds's avatar
Linus Torvalds committed
621 622 623 624 625 626 627 628 629 630 631
	/*
	 * We don't bother to synchronize most readers of this at all,
	 * because there is no reader checking a limit that actually needs
	 * to get both rlim_cur and rlim_max atomically, and either one
	 * alone is a single word that can safely be read normally.
	 * getrlimit/setrlimit use task_lock(current->group_leader) to
	 * protect this instead of the siglock, because they really
	 * have no need to disable irqs.
	 */
	struct rlimit rlim[RLIM_NLIMITS];

632 633 634
#ifdef CONFIG_BSD_PROCESS_ACCT
	struct pacct_struct pacct;	/* per-process accounting information */
#endif
635 636 637
#ifdef CONFIG_TASKSTATS
	struct taskstats *stats;
#endif
Miloslav Trmac's avatar
Miloslav Trmac committed
638 639 640 641
#ifdef CONFIG_AUDIT
	unsigned audit_tty;
	struct tty_audit_buf *tty_audit_buf;
#endif
642 643

	int oom_adj;	/* OOM kill score adjustment (bit shift) */
Linus Torvalds's avatar
Linus Torvalds committed
644 645
};

646 647 648 649 650
/* Context switch must be unlocked if interrupts are to be enabled */
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
# define __ARCH_WANT_UNLOCKED_CTXSW
#endif

Linus Torvalds's avatar
Linus Torvalds committed
651 652 653 654 655 656 657
/*
 * Bits in flags field of signal_struct.
 */
#define SIGNAL_STOP_STOPPED	0x00000001 /* job control stop in effect */
#define SIGNAL_STOP_DEQUEUED	0x00000002 /* stop signal dequeued */
#define SIGNAL_STOP_CONTINUED	0x00000004 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT	0x00000008 /* group exit in progress */
658 659 660 661 662 663
/*
 * Pending notifications to parent.
 */
#define SIGNAL_CLD_STOPPED	0x00000010
#define SIGNAL_CLD_CONTINUED	0x00000020
#define SIGNAL_CLD_MASK		(SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
Linus Torvalds's avatar
Linus Torvalds committed
664

665 666
#define SIGNAL_UNKILLABLE	0x00000040 /* for init: ignore fatal signals */

667 668 669 670 671 672 673
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
	return	(sig->flags & SIGNAL_GROUP_EXIT) ||
		(sig->group_exit_task != NULL);
}

Linus Torvalds's avatar
Linus Torvalds committed
674 675 676 677 678 679 680 681
/*
 * Some day this will be a full-fledged user tracking system..
 */
struct user_struct {
	atomic_t __count;	/* reference count */
	atomic_t processes;	/* How many processes does this user have? */
	atomic_t files;		/* How many open files does this user have? */
	atomic_t sigpending;	/* How many pending signals does this user have? */
682
#ifdef CONFIG_INOTIFY_USER
Robert Love's avatar
Robert Love committed
683 684 685
	atomic_t inotify_watches; /* How many inotify watches does this user have? */
	atomic_t inotify_devs;	/* How many inotify devs does this user have opened? */
#endif
686 687 688
#ifdef CONFIG_EPOLL
	atomic_t epoll_watches;	/* The number of file descriptors currently watched */
#endif
Alexey Dobriyan's avatar
Alexey Dobriyan committed
689
#ifdef CONFIG_POSIX_MQUEUE
Linus Torvalds's avatar
Linus Torvalds committed
690 691
	/* protected by mq_lock	*/
	unsigned long mq_bytes;	/* How many bytes can be allocated to mqueue? */
Alexey Dobriyan's avatar
Alexey Dobriyan committed
692
#endif
Linus Torvalds's avatar
Linus Torvalds committed
693 694 695 696 697 698 699 700
	unsigned long locked_shm; /* How many pages of mlocked shm ? */

#ifdef CONFIG_KEYS
	struct key *uid_keyring;	/* UID specific keyring */
	struct key *session_keyring;	/* UID's default session keyring */
#endif

	/* Hash table maintenance information */
Pavel Emelyanov's avatar
Pavel Emelyanov committed
701
	struct hlist_node uidhash_node;
Linus Torvalds's avatar
Linus Torvalds committed
702
	uid_t uid;
703
	struct user_namespace *user_ns;
704

705
#ifdef CONFIG_USER_SCHED
706
	struct task_group *tg;
707
#ifdef CONFIG_SYSFS
708
	struct kobject kobj;
709
	struct delayed_work work;
710
#endif
711
#endif
712

713
#ifdef CONFIG_PERF_EVENTS
714 715
	atomic_long_t locked_vm;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
716 717
};

718
extern int uids_sysfs_init(void);
719

Linus Torvalds's avatar
Linus Torvalds committed
720 721 722 723 724
extern struct user_struct *find_user(uid_t);

extern struct user_struct root_user;
#define INIT_USER (&root_user)

725

Linus Torvalds's avatar
Linus Torvalds committed
726 727 728
struct backing_dev_info;
struct reclaim_state;

729
#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
Linus Torvalds's avatar
Linus Torvalds committed
730 731
struct sched_info {
	/* cumulative counters */
732
	unsigned long pcount;	      /* # of times run on this cpu */
733
	unsigned long long run_delay; /* time spent waiting on a runqueue */
Linus Torvalds's avatar
Linus Torvalds committed
734 735

	/* timestamps */
736 737
	unsigned long long last_arrival,/* when we last ran on a cpu */
			   last_queued;	/* when we were last queued to run */
738 739
#ifdef CONFIG_SCHEDSTATS
	/* BKL stats */
740
	unsigned int bkl_count;
741
#endif
Linus Torvalds's avatar
Linus Torvalds committed
742
};
743
#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
Linus Torvalds's avatar
Linus Torvalds committed
744

745 746 747 748 749 750 751 752 753 754 755 756 757 758
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
	spinlock_t	lock;
	unsigned int	flags;	/* Private per-task flags */

	/* For each stat XXX, add following, aligned appropriately
	 *
	 * struct timespec XXX_start, XXX_end;
	 * u64 XXX_delay;
	 * u32 XXX_count;
	 *
	 * Atomicity of updates to XXX_delay, XXX_count protected by
	 * single lock above (split into XXX_lock if contention is an issue).
	 */
759 760 761 762 763 764 765 766 767 768 769 770 771

	/*
	 * XXX_count is incremented on every XXX operation, the delay
	 * associated with the operation is added to XXX_delay.
	 * XXX_delay contains the accumulated delay time in nanoseconds.
	 */
	struct timespec blkio_start, blkio_end;	/* Shared by blkio, swapin */
	u64 blkio_delay;	/* wait for sync block io completion */
	u64 swapin_delay;	/* wait for swapin block io completion */
	u32 blkio_count;	/* total count of the number of sync block */
				/* io operations performed */
	u32 swapin_count;	/* total count of the number of swapin block */
				/* io operations performed */
772 773 774 775

	struct timespec freepages_start, freepages_end;
	u64 freepages_delay;	/* wait for memory reclaim */
	u32 freepages_count;	/* total count of memory reclaim */
776
};
777 778 779 780 781 782 783 784 785 786 787
#endif	/* CONFIG_TASK_DELAY_ACCT */

static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
	return 1;
#elif defined(CONFIG_TASK_DELAY_ACCT)
	extern int delayacct_on;
	return delayacct_on;
#else
	return 0;
788
#endif
789
}
790

791 792 793 794 795
enum cpu_idle_type {
	CPU_IDLE,
	CPU_NOT_IDLE,
	CPU_NEWLY_IDLE,
	CPU_MAX_IDLE_TYPES
Linus Torvalds's avatar
Linus Torvalds committed
796 797 798 799 800
};

/*
 * sched-domains (multiprocessor balancing) declarations:
 */
801 802 803 804 805 806 807

/*
 * Increase resolution of nice-level calculations:
 */
#define SCHED_LOAD_SHIFT	10
#define SCHED_LOAD_SCALE	(1L << SCHED_LOAD_SHIFT)

808
#define SCHED_LOAD_SCALE_FUZZ	SCHED_LOAD_SCALE
Linus Torvalds's avatar
Linus Torvalds committed
809

810
#ifdef CONFIG_SMP
Peter Zijlstra's avatar
Peter Zijlstra committed
811 812 813 814
#define SD_LOAD_BALANCE		0x0001	/* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE	0x0002	/* Balance when about to become idle */
#define SD_BALANCE_EXEC		0x0004	/* Balance on exec */
#define SD_BALANCE_FORK		0x0008	/* Balance on fork, clone */
815
#define SD_BALANCE_WAKE		0x0010  /* Balance on wakeup */
Peter Zijlstra's avatar
Peter Zijlstra committed
816
#define SD_WAKE_AFFINE		0x0020	/* Wake task to waking CPU */
Peter Zijlstra's avatar
Peter Zijlstra committed
817
#define SD_PREFER_LOCAL		0x0040  /* Prefer to keep tasks local to this domain */
Peter Zijlstra's avatar
Peter Zijlstra committed
818 819 820 821
#define SD_SHARE_CPUPOWER	0x0080	/* Domain members share cpu power */
#define SD_POWERSAVINGS_BALANCE	0x0100	/* Balance for power savings */
#define SD_SHARE_PKG_RESOURCES	0x0200	/* Domain members share cpu pkg resources */
#define SD_SERIALIZE		0x0400	/* Only a single load balancing instance */
822

Peter Zijlstra's avatar
Peter Zijlstra committed
823
#define SD_PREFER_SIBLING	0x1000	/* Prefer to place tasks in a sibling domain */
824

825 826 827 828 829 830 831 832 833 834
enum powersavings_balance_level {
	POWERSAVINGS_BALANCE_NONE = 0,  /* No power saving load balance */
	POWERSAVINGS_BALANCE_BASIC,	/* Fill one thread/core/package
					 * first for long running threads
					 */
	POWERSAVINGS_BALANCE_WAKEUP,	/* Also bias task wakeups to semi-idle
					 * cpu package for power savings
					 */
	MAX_POWERSAVINGS_BALANCE_LEVELS
};
835

836
extern int sched_mc_power_savings, sched_smt_power_savings;
837

838 839 840 841
static inline int sd_balance_for_mc_power(void)
{
	if (sched_smt_power_savings)
		return SD_POWERSAVINGS_BALANCE;
842

Peter Zijlstra's avatar
Peter Zijlstra committed
843
	return SD_PREFER_SIBLING;
844
}
845

846 847 848 849 850
static inline int sd_balance_for_package_power(void)
{
	if (sched_mc_power_savings | sched_smt_power_savings)
		return SD_POWERSAVINGS_BALANCE;

Peter Zijlstra's avatar
Peter Zijlstra committed
851
	return SD_PREFER_SIBLING;
852
}
853

854 855 856 857 858 859 860 861 862 863 864 865 866
/*
 * Optimise SD flags for power savings:
 * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
 * Keep default SD flags if sched_{smt,mc}_power_saving=0
 */

static inline int sd_power_saving_flags(void)
{
	if (sched_mc_power_savings | sched_smt_power_savings)
		return SD_BALANCE_NEWIDLE;

	return 0;
}
Linus Torvalds's avatar
Linus Torvalds committed
867 868 869 870 871 872

struct sched_group {
	struct sched_group *next;	/* Must be a circular list */

	/*
	 * CPU power of this group, SCHED_LOAD_SCALE being max power for a
873
	 * single CPU.
874
	 */
875
	unsigned int cpu_power;
876

877 878 879 880 881 882 883 884 885 886 887
	/*
	 * The CPUs this group covers.
	 *
	 * NOTE: this field is variable length. (Allocated dynamically
	 * by attaching extra space to the end of the structure,
	 * depending on how many CPUs the kernel has booted up with)
	 *
	 * It is also be embedded into static data structures at build
	 * time. (See 'struct static_sched_group' in kernel/sched.c)
	 */
	unsigned long cpumask[0];
Linus Torvalds's avatar
Linus Torvalds committed
888 889
};

890 891
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
{
892
	return to_cpumask(sg->cpumask);
893 894
}

895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
enum sched_domain_level {
	SD_LV_NONE = 0,
	SD_LV_SIBLING,
	SD_LV_MC,
	SD_LV_CPU,
	SD_LV_NODE,
	SD_LV_ALLNODES,
	SD_LV_MAX
};

struct sched_domain_attr {
	int relax_domain_level;
};

#define SD_ATTR_INIT	(struct sched_domain_attr) {	\
	.relax_domain_level = -1,			\
}

Linus Torvalds's avatar
Linus Torvalds committed
913 914 915
struct sched_domain {
	/* These fields must be setup */
	struct sched_domain *parent;	/* top domain must be null terminated */
916
	struct sched_domain *child;	/* bottom domain must be null terminated */
Linus Torvalds's avatar
Linus Torvalds committed
917 918 919 920 921 922
	struct sched_group *groups;	/* the balancing groups of the domain */
	unsigned long min_interval;	/* Minimum balance interval ms */
	unsigned long max_interval;	/* Maximum balance interval ms */
	unsigned int busy_factor;	/* less balancing by factor if busy */
	unsigned int imbalance_pct;	/* No balance until over watermark */
	unsigned int cache_nice_tries;	/* Leave cache hot tasks for # tries */
Nick Piggin's avatar
Nick Piggin committed
923 924 925 926
	unsigned int busy_idx;
	unsigned int idle_idx;
	unsigned int newidle_idx;
	unsigned int wake_idx;
Nick Piggin's avatar
Nick Piggin committed
927
	unsigned int forkexec_idx;
Peter Zijlstra's avatar
Peter Zijlstra committed
928
	unsigned int smt_gain;
Linus Torvalds's avatar
Linus Torvalds committed
929
	int flags;			/* See SD_* */
930
	enum sched_domain_level level;
Linus Torvalds's avatar
Linus Torvalds committed
931 932 933 934 935 936

	/* Runtime fields. */
	unsigned long last_balance;	/* init to jiffies. units in jiffies */
	unsigned int balance_interval;	/* initialise to 1. units in ms. */
	unsigned int nr_balance_failed; /* initialise to 0 */

937 938
	u64 last_update;

Linus Torvalds's avatar
Linus Torvalds committed
939 940
#ifdef CONFIG_SCHEDSTATS
	/* load_balance() stats */
941 942 943 944 945 946 947 948
	unsigned int lb_count[CPU_MAX_IDLE_TYPES];
	unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
	unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
	unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
	unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
	unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
Linus Torvalds's avatar
Linus Torvalds committed
949 950

	/* Active load balancing */
951 952 953
	unsigned int alb_count;
	unsigned int alb_failed;
	unsigned int alb_pushed;
Linus Torvalds's avatar
Linus Torvalds committed
954

955
	/* SD_BALANCE_EXEC stats */
956 957 958
	unsigned int sbe_count;
	unsigned int sbe_balanced;
	unsigned int sbe_pushed;
Linus Torvalds's avatar
Linus Torvalds committed
959

960
	/* SD_BALANCE_FORK stats */
961 962 963
	unsigned int sbf_count;
	unsigned int sbf_balanced;
	unsigned int sbf_pushed;
964

Linus Torvalds's avatar
Linus Torvalds committed
965
	/* try_to_wake_up() stats */
966 967 968
	unsigned int ttwu_wake_remote;
	unsigned int ttwu_move_affine;
	unsigned int ttwu_move_balance;
Linus Torvalds's avatar
Linus Torvalds committed
969
#endif
970 971 972
#ifdef CONFIG_SCHED_DEBUG
	char *name;
#endif
973

974 975 976 977 978 979 980 981 982 983 984
	/*
	 * Span of all CPUs in this domain.
	 *
	 * NOTE: this field is variable length. (Allocated dynamically
	 * by attaching extra space to the end of the structure,
	 * depending on how many CPUs the kernel has booted up with)
	 *
	 * It is also be embedded into static data structures at build
	 * time. (See 'struct static_sched_domain' in kernel/sched.c)
	 */
	unsigned long span[0];
Linus Torvalds's avatar
Linus Torvalds committed
985 986
};

987 988
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
989
	return to_cpumask(sd->span);
990 991
}

992
extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
993
				    struct sched_domain_attr *dattr_new);
Paul Jackson's avatar
Paul Jackson committed
994

995 996 997 998 999 1000 1001 1002
/* Test a flag in parent sched domain */
static inline int test_sd_parent(struct sched_domain *sd, int flag)
{
	if (sd->parent && (sd->parent->flags & flag))
		return 1;

	return 0;
}
Paul Jackson's avatar
Paul Jackson committed
1003

1004 1005 1006
unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu);
unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu);

1007
#else /* CONFIG_SMP */
Linus Torvalds's avatar
Linus Torvalds committed
1008

1009
struct sched_domain_attr;
1010

1011
static inline void
1012
partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
1013 1014
			struct sched_domain_attr *dattr_new)
{
1015
}
1016
#endif	/* !CONFIG_SMP */
Linus Torvalds's avatar
Linus Torvalds committed
1017

1018

Linus Torvalds's avatar
Linus Torvalds committed
1019 1020 1021
struct io_context;			/* See blkdev.h */


1022
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1023
extern void prefetch_stack(struct task_struct *t);
1024 1025 1026
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1027 1028 1029

struct audit_context;		/* See audit.c */
struct mempolicy;
1030
struct pipe_inode_info;
1031
struct uts_namespace;
Linus Torvalds's avatar
Linus Torvalds committed
1032

Ingo Molnar's avatar
Ingo Molnar committed
1033 1034 1035
struct rq;
struct sched_domain;

Peter Zijlstra's avatar
Peter Zijlstra committed
1036 1037 1038 1039
/*
 * wake flags
 */
#define WF_SYNC		0x01		/* waker goes to sleep after wakup */
Peter Zijlstra's avatar
Peter Zijlstra committed
1040
#define WF_FORK		0x02		/* child wakeup after fork */
Peter Zijlstra's avatar
Peter Zijlstra committed
1041

Ingo Molnar's avatar
Ingo Molnar committed
1042
struct sched_class {
1043
	const struct sched_class *next;
Ingo Molnar's avatar
Ingo Molnar committed
1044

1045
	void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
1046
	void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
1047
	void (*yield_task) (struct rq *rq);
Ingo Molnar's avatar
Ingo Molnar committed
1048

Peter Zijlstra's avatar
Peter Zijlstra committed
1049
	void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags);
Ingo Molnar's avatar
Ingo Molnar committed
1050

1051
	struct task_struct * (*pick_next_task) (struct rq *rq);
1052
	void (*put_prev_task) (struct rq *rq, struct task_struct *p);
Ingo Molnar's avatar
Ingo Molnar committed
1053

1054
#ifdef CONFIG_SMP
Peter Zijlstra's avatar
Peter Zijlstra committed
1055
	int  (*select_task_rq)(struct task_struct *p, int sd_flag, int flags);
Li Zefan's avatar
Li Zefan committed
1056

Peter Williams's avatar
Peter Williams committed
1057
	unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
1058
			struct rq *busiest, unsigned long max_load_move,
Ingo Molnar's avatar
Ingo Molnar committed
1059
			struct sched_domain *sd, enum cpu_idle_type idle,
1060
			int *all_pinned, int *this_best_prio);
Ingo Molnar's avatar
Ingo Molnar committed
1061

1062 1063 1064
	int (*move_one_task) (struct rq *this_rq, int this_cpu,
			      struct rq *busiest, struct sched_domain *sd,
			      enum cpu_idle_type idle);
1065 1066 1067
	void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
	void (*post_schedule) (struct rq *this_rq);
	void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
1068

1069
	void (*set_cpus_allowed)(struct task_struct *p,
1070
				 const struct cpumask *newmask);
1071

1072 1073
	void (*rq_online)(struct rq *rq);
	void (*rq_offline)(struct rq *rq);
Li Zefan's avatar
Li Zefan committed
1074 1075 1076 1077 1078
#endif

	void (*set_curr_task) (struct rq *rq);
	void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
	void (*task_new) (struct rq *rq, struct task_struct *p);
1079 1080 1081 1082 1083 1084 1085

	void (*switched_from) (struct rq *this_rq, struct task_struct *task,
			       int running);
	void (*switched_to) (struct rq *this_rq, struct task_struct *task,
			     int running);
	void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
			     int oldprio, int running);
Peter Zijlstra's avatar
Peter Zijlstra committed
1086

1087 1088
	unsigned int (*get_rr_interval) (struct task_struct *task);

Peter Zijlstra's avatar
Peter Zijlstra committed
1089 1090 1091
#ifdef CONFIG_FAIR_GROUP_SCHED
	void (*moved_group) (struct task_struct *p);
#endif
Ingo Molnar's avatar
Ingo Molnar committed
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
};

struct load_weight {
	unsigned long weight, inv_weight;
};

/*
 * CFS stats for a schedulable entity (task, task-group etc)
 *
 * Current field usage histogram:
 *
 *     4 se->block_start
 *     4 se->run_node
 *     4 se->sleep_start
 *     6 se->load.weight
 */
struct sched_entity {
	struct load_weight	load;		/* for load-balancing */
	struct rb_node		run_node;
1111
	struct list_head	group_node;
Ingo Molnar's avatar
Ingo Molnar committed
1112 1113
	unsigned int		on_rq;

1114 1115
	u64			exec_start;
	u64			sum_exec_runtime;
Ingo Molnar's avatar
Ingo Molnar committed
1116
	u64			vruntime;
1117
	u64			prev_sum_exec_runtime;
1118

Ingo Molnar's avatar
Ingo Molnar committed
1119 1120 1121
	u64			last_wakeup;
	u64			avg_overlap;

1122 1123
	u64			nr_migrations;

1124 1125 1126
	u64			start_runtime;
	u64			avg_wakeup;

1127 1128
	u64			avg_running;