fork.c 42.3 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
/*
 *  linux/kernel/fork.c
 *
 *  Copyright (C) 1991, 1992  Linus Torvalds
 */

/*
 *  'fork.c' contains the help-routines for the 'fork' system call
 * (see also entry.S and others).
 * Fork is rather simple, once you get the hang of it, but the memory
 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
 */

#include <linux/slab.h>
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/smp_lock.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/completion.h>
#include <linux/namespace.h>
#include <linux/personality.h>
#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/fs.h>
30
#include <linux/nsproxy.h>
31
#include <linux/capability.h>
Linus Torvalds's avatar
Linus Torvalds committed
32
33
34
35
36
37
38
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/security.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/futex.h>
39
#include <linux/rcupdate.h>
Linus Torvalds's avatar
Linus Torvalds committed
40
41
42
43
44
45
#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/acct.h>
46
#include <linux/tsacct_kern.h>
47
#include <linux/cn_proc.h>
48
#include <linux/delayacct.h>
49
#include <linux/taskstats_kern.h>
50
#include <linux/random.h>
Linus Torvalds's avatar
Linus Torvalds committed
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68

#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

/*
 * Protected counters by write_lock_irq(&tasklist_lock)
 */
unsigned long total_forks;	/* Handle normal Linux uptimes. */
int nr_threads; 		/* The idle threads do not count.. */

int max_threads;		/* tunable limit on nr_threads */

DEFINE_PER_CPU(unsigned long, process_counts) = 0;

69
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
Linus Torvalds's avatar
Linus Torvalds committed
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88

int nr_processes(void)
{
	int cpu;
	int total = 0;

	for_each_online_cpu(cpu)
		total += per_cpu(process_counts, cpu);

	return total;
}

#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
# define alloc_task_struct()	kmem_cache_alloc(task_struct_cachep, GFP_KERNEL)
# define free_task_struct(tsk)	kmem_cache_free(task_struct_cachep, (tsk))
static kmem_cache_t *task_struct_cachep;
#endif

/* SLAB cache for signal_struct structures (tsk->signal) */
89
static kmem_cache_t *signal_cachep;
Linus Torvalds's avatar
Linus Torvalds committed
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108

/* SLAB cache for sighand_struct structures (tsk->sighand) */
kmem_cache_t *sighand_cachep;

/* SLAB cache for files_struct structures (tsk->files) */
kmem_cache_t *files_cachep;

/* SLAB cache for fs_struct structures (tsk->fs) */
kmem_cache_t *fs_cachep;

/* SLAB cache for vm_area_struct structures */
kmem_cache_t *vm_area_cachep;

/* SLAB cache for mm_struct structures (tsk->mm) */
static kmem_cache_t *mm_cachep;

void free_task(struct task_struct *tsk)
{
	free_thread_info(tsk->thread_info);
Ingo Molnar's avatar
Ingo Molnar committed
109
	rt_mutex_debug_task_free(tsk);
Linus Torvalds's avatar
Linus Torvalds committed
110
111
112
113
	free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);

114
void __put_task_struct(struct task_struct *tsk)
Linus Torvalds's avatar
Linus Torvalds committed
115
116
117
118
119
120
121
122
{
	WARN_ON(!(tsk->exit_state & (EXIT_DEAD | EXIT_ZOMBIE)));
	WARN_ON(atomic_read(&tsk->usage));
	WARN_ON(tsk == current);

	security_task_free(tsk);
	free_uid(tsk->user);
	put_group_info(tsk->group_info);
123
	delayacct_tsk_free(tsk);
Linus Torvalds's avatar
Linus Torvalds committed
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178

	if (!profile_handoff_task(tsk))
		free_task(tsk);
}

void __init fork_init(unsigned long mempages)
{
#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef ARCH_MIN_TASKALIGN
#define ARCH_MIN_TASKALIGN	L1_CACHE_BYTES
#endif
	/* create a slab on which task_structs can be allocated */
	task_struct_cachep =
		kmem_cache_create("task_struct", sizeof(struct task_struct),
			ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL, NULL);
#endif

	/*
	 * The default maximum number of threads is set to a safe
	 * value: the thread structures can take up at most half
	 * of memory.
	 */
	max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);

	/*
	 * we need to allow at least 20 threads to boot a system
	 */
	if(max_threads < 20)
		max_threads = 20;

	init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
	init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
	init_task.signal->rlim[RLIMIT_SIGPENDING] =
		init_task.signal->rlim[RLIMIT_NPROC];
}

static struct task_struct *dup_task_struct(struct task_struct *orig)
{
	struct task_struct *tsk;
	struct thread_info *ti;

	prepare_to_copy(orig);

	tsk = alloc_task_struct();
	if (!tsk)
		return NULL;

	ti = alloc_thread_info(tsk);
	if (!ti) {
		free_task_struct(tsk);
		return NULL;
	}

	*tsk = *orig;
	tsk->thread_info = ti;
179
	setup_thread_stack(tsk, orig);
Linus Torvalds's avatar
Linus Torvalds committed
180

181
182
183
184
#ifdef CONFIG_CC_STACKPROTECTOR
	tsk->stack_canary = get_random_int();
#endif

Linus Torvalds's avatar
Linus Torvalds committed
185
186
	/* One for us, one for whoever does the "release_task()" (usually parent) */
	atomic_set(&tsk->usage,2);
187
	atomic_set(&tsk->fs_excl, 0);
188
#ifdef CONFIG_BLK_DEV_IO_TRACE
189
	tsk->btrace_seq = 0;
190
#endif
191
	tsk->splice_pipe = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
192
193
194
195
	return tsk;
}

#ifdef CONFIG_MMU
196
static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
Linus Torvalds's avatar
Linus Torvalds committed
197
{
198
	struct vm_area_struct *mpnt, *tmp, **pprev;
Linus Torvalds's avatar
Linus Torvalds committed
199
200
201
202
203
204
	struct rb_node **rb_link, *rb_parent;
	int retval;
	unsigned long charge;
	struct mempolicy *pol;

	down_write(&oldmm->mmap_sem);
205
	flush_cache_mm(oldmm);
206
207
208
209
	/*
	 * Not linked in yet - no deadlock potential:
	 */
	down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
210

Linus Torvalds's avatar
Linus Torvalds committed
211
212
213
214
	mm->locked_vm = 0;
	mm->mmap = NULL;
	mm->mmap_cache = NULL;
	mm->free_area_cache = oldmm->mmap_base;
215
	mm->cached_hole_size = ~0UL;
Linus Torvalds's avatar
Linus Torvalds committed
216
217
218
219
220
221
222
	mm->map_count = 0;
	cpus_clear(mm->cpu_vm_mask);
	mm->mm_rb = RB_ROOT;
	rb_link = &mm->mm_rb.rb_node;
	rb_parent = NULL;
	pprev = &mm->mmap;

223
	for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
Linus Torvalds's avatar
Linus Torvalds committed
224
225
226
		struct file *file;

		if (mpnt->vm_flags & VM_DONTCOPY) {
227
228
			long pages = vma_pages(mpnt);
			mm->total_vm -= pages;
229
			vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
230
								-pages);
Linus Torvalds's avatar
Linus Torvalds committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
			continue;
		}
		charge = 0;
		if (mpnt->vm_flags & VM_ACCOUNT) {
			unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
			if (security_vm_enough_memory(len))
				goto fail_nomem;
			charge = len;
		}
		tmp = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
		if (!tmp)
			goto fail_nomem;
		*tmp = *mpnt;
		pol = mpol_copy(vma_policy(mpnt));
		retval = PTR_ERR(pol);
		if (IS_ERR(pol))
			goto fail_nomem_policy;
		vma_set_policy(tmp, pol);
		tmp->vm_flags &= ~VM_LOCKED;
		tmp->vm_mm = mm;
		tmp->vm_next = NULL;
		anon_vma_link(tmp);
		file = tmp->vm_file;
		if (file) {
			struct inode *inode = file->f_dentry->d_inode;
			get_file(file);
			if (tmp->vm_flags & VM_DENYWRITE)
				atomic_dec(&inode->i_writecount);
      
			/* insert tmp into the share list, just after mpnt */
			spin_lock(&file->f_mapping->i_mmap_lock);
			tmp->vm_truncate_count = mpnt->vm_truncate_count;
			flush_dcache_mmap_lock(file->f_mapping);
			vma_prio_tree_add(tmp, mpnt);
			flush_dcache_mmap_unlock(file->f_mapping);
			spin_unlock(&file->f_mapping->i_mmap_lock);
		}

		/*
270
		 * Link in the new vma and copy the page table entries.
Linus Torvalds's avatar
Linus Torvalds committed
271
272
273
274
275
276
277
278
279
		 */
		*pprev = tmp;
		pprev = &tmp->vm_next;

		__vma_link_rb(mm, tmp, rb_link, rb_parent);
		rb_link = &tmp->vm_rb.rb_right;
		rb_parent = &tmp->vm_rb;

		mm->map_count++;
280
		retval = copy_page_range(mm, oldmm, mpnt);
Linus Torvalds's avatar
Linus Torvalds committed
281
282
283
284
285
286
287
288
289

		if (tmp->vm_ops && tmp->vm_ops->open)
			tmp->vm_ops->open(tmp);

		if (retval)
			goto out;
	}
	retval = 0;
out:
290
	up_write(&mm->mmap_sem);
291
	flush_tlb_mm(oldmm);
Linus Torvalds's avatar
Linus Torvalds committed
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
	up_write(&oldmm->mmap_sem);
	return retval;
fail_nomem_policy:
	kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
	retval = -ENOMEM;
	vm_unacct_memory(charge);
	goto out;
}

static inline int mm_alloc_pgd(struct mm_struct * mm)
{
	mm->pgd = pgd_alloc(mm);
	if (unlikely(!mm->pgd))
		return -ENOMEM;
	return 0;
}

static inline void mm_free_pgd(struct mm_struct * mm)
{
	pgd_free(mm->pgd);
}
#else
#define dup_mmap(mm, oldmm)	(0)
#define mm_alloc_pgd(mm)	(0)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */

 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);

#define allocate_mm()	(kmem_cache_alloc(mm_cachep, SLAB_KERNEL))
#define free_mm(mm)	(kmem_cache_free(mm_cachep, (mm)))

#include <linux/init_task.h>

static struct mm_struct * mm_init(struct mm_struct * mm)
{
	atomic_set(&mm->mm_users, 1);
	atomic_set(&mm->mm_count, 1);
	init_rwsem(&mm->mmap_sem);
	INIT_LIST_HEAD(&mm->mmlist);
	mm->core_waiters = 0;
	mm->nr_ptes = 0;
335
	set_mm_counter(mm, file_rss, 0);
336
	set_mm_counter(mm, anon_rss, 0);
Linus Torvalds's avatar
Linus Torvalds committed
337
338
339
340
	spin_lock_init(&mm->page_table_lock);
	rwlock_init(&mm->ioctx_list_lock);
	mm->ioctx_list = NULL;
	mm->free_area_cache = TASK_UNMAPPED_BASE;
341
	mm->cached_hole_size = ~0UL;
Linus Torvalds's avatar
Linus Torvalds committed
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383

	if (likely(!mm_alloc_pgd(mm))) {
		mm->def_flags = 0;
		return mm;
	}
	free_mm(mm);
	return NULL;
}

/*
 * Allocate and initialize an mm_struct.
 */
struct mm_struct * mm_alloc(void)
{
	struct mm_struct * mm;

	mm = allocate_mm();
	if (mm) {
		memset(mm, 0, sizeof(*mm));
		mm = mm_init(mm);
	}
	return mm;
}

/*
 * Called when the last reference to the mm
 * is dropped: either by a lazy thread or by
 * mmput. Free the page directory and the mm.
 */
void fastcall __mmdrop(struct mm_struct *mm)
{
	BUG_ON(mm == &init_mm);
	mm_free_pgd(mm);
	destroy_context(mm);
	free_mm(mm);
}

/*
 * Decrement the use count and release all resources for an mm.
 */
void mmput(struct mm_struct *mm)
{
Andrew Morton's avatar
Andrew Morton committed
384
385
	might_sleep();

Linus Torvalds's avatar
Linus Torvalds committed
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
	if (atomic_dec_and_test(&mm->mm_users)) {
		exit_aio(mm);
		exit_mmap(mm);
		if (!list_empty(&mm->mmlist)) {
			spin_lock(&mmlist_lock);
			list_del(&mm->mmlist);
			spin_unlock(&mmlist_lock);
		}
		put_swap_token(mm);
		mmdrop(mm);
	}
}
EXPORT_SYMBOL_GPL(mmput);

/**
 * get_task_mm - acquire a reference to the task's mm
 *
 * Returns %NULL if the task has no mm.  Checks PF_BORROWED_MM (meaning
 * this kernel workthread has transiently adopted a user mm with use_mm,
 * to do its AIO) is not set and if so returns a reference to it, after
 * bumping up the use count.  User must release the mm via mmput()
 * after use.  Typically used by /proc and ptrace.
 */
struct mm_struct *get_task_mm(struct task_struct *task)
{
	struct mm_struct *mm;

	task_lock(task);
	mm = task->mm;
	if (mm) {
		if (task->flags & PF_BORROWED_MM)
			mm = NULL;
		else
			atomic_inc(&mm->mm_users);
	}
	task_unlock(task);
	return mm;
}
EXPORT_SYMBOL_GPL(get_task_mm);

/* Please note the differences between mmput and mm_release.
 * mmput is called whenever we stop holding onto a mm_struct,
 * error success whatever.
 *
 * mm_release is called after a mm_struct has been removed
 * from the current process.
 *
 * This difference is important for error handling, when we
 * only half set up a mm_struct for a new process and need to restore
 * the old one.  Because we mmput the new mm_struct before
 * restoring the old one. . .
 * Eric Biederman 10 January 1998
 */
void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{
	struct completion *vfork_done = tsk->vfork_done;

	/* Get rid of any cached register state */
	deactivate_mm(tsk, mm);

	/* notify parent sleeping on vfork() */
	if (vfork_done) {
		tsk->vfork_done = NULL;
		complete(vfork_done);
	}
	if (tsk->clear_child_tid && atomic_read(&mm->mm_users) > 1) {
		u32 __user * tidptr = tsk->clear_child_tid;
		tsk->clear_child_tid = NULL;

		/*
		 * We don't check the error code - if userspace has
		 * not set up a proper pointer then tough luck.
		 */
		put_user(0, tidptr);
		sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
	}
}

464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
/*
 * Allocate a new mm structure and copy contents from the
 * mm structure of the passed in task structure.
 */
static struct mm_struct *dup_mm(struct task_struct *tsk)
{
	struct mm_struct *mm, *oldmm = current->mm;
	int err;

	if (!oldmm)
		return NULL;

	mm = allocate_mm();
	if (!mm)
		goto fail_nomem;

	memcpy(mm, oldmm, sizeof(*mm));

	if (!mm_init(mm))
		goto fail_nomem;

	if (init_new_context(tsk, mm))
		goto fail_nocontext;

	err = dup_mmap(mm, oldmm);
	if (err)
		goto free_pt;

	mm->hiwater_rss = get_mm_rss(mm);
	mm->hiwater_vm = mm->total_vm;

	return mm;

free_pt:
	mmput(mm);

fail_nomem:
	return NULL;

fail_nocontext:
	/*
	 * If init_new_context() failed, we cannot use mmput() to free the mm
	 * because it calls destroy_context()
	 */
	mm_free_pgd(mm);
	free_mm(mm);
	return NULL;
}

Linus Torvalds's avatar
Linus Torvalds committed
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
static int copy_mm(unsigned long clone_flags, struct task_struct * tsk)
{
	struct mm_struct * mm, *oldmm;
	int retval;

	tsk->min_flt = tsk->maj_flt = 0;
	tsk->nvcsw = tsk->nivcsw = 0;

	tsk->mm = NULL;
	tsk->active_mm = NULL;

	/*
	 * Are we cloning a kernel thread?
	 *
	 * We need to steal a active VM for that..
	 */
	oldmm = current->mm;
	if (!oldmm)
		return 0;

	if (clone_flags & CLONE_VM) {
		atomic_inc(&oldmm->mm_users);
		mm = oldmm;
		goto good_mm;
	}

	retval = -ENOMEM;
540
	mm = dup_mm(tsk);
Linus Torvalds's avatar
Linus Torvalds committed
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
	if (!mm)
		goto fail_nomem;

good_mm:
	tsk->mm = mm;
	tsk->active_mm = mm;
	return 0;

fail_nomem:
	return retval;
}

static inline struct fs_struct *__copy_fs_struct(struct fs_struct *old)
{
	struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
	/* We don't need to lock fs - think why ;-) */
	if (fs) {
		atomic_set(&fs->count, 1);
		rwlock_init(&fs->lock);
		fs->umask = old->umask;
		read_lock(&old->lock);
		fs->rootmnt = mntget(old->rootmnt);
		fs->root = dget(old->root);
		fs->pwdmnt = mntget(old->pwdmnt);
		fs->pwd = dget(old->pwd);
		if (old->altroot) {
			fs->altrootmnt = mntget(old->altrootmnt);
			fs->altroot = dget(old->altroot);
		} else {
			fs->altrootmnt = NULL;
			fs->altroot = NULL;
		}
		read_unlock(&old->lock);
	}
	return fs;
}

struct fs_struct *copy_fs_struct(struct fs_struct *old)
{
	return __copy_fs_struct(old);
}

EXPORT_SYMBOL_GPL(copy_fs_struct);

static inline int copy_fs(unsigned long clone_flags, struct task_struct * tsk)
{
	if (clone_flags & CLONE_FS) {
		atomic_inc(&current->fs->count);
		return 0;
	}
	tsk->fs = __copy_fs_struct(current->fs);
	if (!tsk->fs)
		return -ENOMEM;
	return 0;
}

597
static int count_open_files(struct fdtable *fdt)
Linus Torvalds's avatar
Linus Torvalds committed
598
{
599
	int size = fdt->max_fdset;
Linus Torvalds's avatar
Linus Torvalds committed
600
601
602
603
	int i;

	/* Find the last open fd */
	for (i = size/(8*sizeof(long)); i > 0; ) {
604
		if (fdt->open_fds->fds_bits[--i])
Linus Torvalds's avatar
Linus Torvalds committed
605
606
607
608
609
610
			break;
	}
	i = (i+1) * 8 * sizeof(long);
	return i;
}

611
612
613
614
615
616
617
618
619
620
621
622
static struct files_struct *alloc_files(void)
{
	struct files_struct *newf;
	struct fdtable *fdt;

	newf = kmem_cache_alloc(files_cachep, SLAB_KERNEL);
	if (!newf)
		goto out;

	atomic_set(&newf->count, 1);

	spin_lock_init(&newf->file_lock);
623
	newf->next_fd = 0;
624
	fdt = &newf->fdtab;
625
	fdt->max_fds = NR_OPEN_DEFAULT;
626
627
628
	fdt->max_fdset = EMBEDDED_FD_SET_SIZE;
	fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
	fdt->open_fds = (fd_set *)&newf->open_fds_init;
629
	fdt->fd = &newf->fd_array[0];
630
631
632
633
	INIT_RCU_HEAD(&fdt->rcu);
	fdt->free_files = NULL;
	fdt->next = NULL;
	rcu_assign_pointer(newf->fdt, fdt);
634
635
636
637
out:
	return newf;
}

638
639
640
/*
 * Allocate a new files structure and copy contents from the
 * passed in files structure.
Prasanna Meda's avatar
Prasanna Meda committed
641
 * errorp will be valid only when the returned files_struct is NULL.
642
643
 */
static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
Linus Torvalds's avatar
Linus Torvalds committed
644
{
645
	struct files_struct *newf;
Linus Torvalds's avatar
Linus Torvalds committed
646
	struct file **old_fds, **new_fds;
647
	int open_files, size, i, expand;
648
	struct fdtable *old_fdt, *new_fdt;
Linus Torvalds's avatar
Linus Torvalds committed
649

Prasanna Meda's avatar
Prasanna Meda committed
650
	*errorp = -ENOMEM;
651
652
	newf = alloc_files();
	if (!newf)
Linus Torvalds's avatar
Linus Torvalds committed
653
654
655
		goto out;

	spin_lock(&oldf->file_lock);
656
657
658
	old_fdt = files_fdtable(oldf);
	new_fdt = files_fdtable(newf);
	size = old_fdt->max_fdset;
659
	open_files = count_open_files(old_fdt);
Linus Torvalds's avatar
Linus Torvalds committed
660
661
662
663
664
665
	expand = 0;

	/*
	 * Check whether we need to allocate a larger fd array or fd set.
	 * Note: we're not a clone task, so the open count won't  change.
	 */
666
667
	if (open_files > new_fdt->max_fdset) {
		new_fdt->max_fdset = 0;
Linus Torvalds's avatar
Linus Torvalds committed
668
669
		expand = 1;
	}
670
671
	if (open_files > new_fdt->max_fds) {
		new_fdt->max_fds = 0;
Linus Torvalds's avatar
Linus Torvalds committed
672
673
674
675
676
677
678
		expand = 1;
	}

	/* if the old fdset gets grown now, we'll only copy up to "size" fds */
	if (expand) {
		spin_unlock(&oldf->file_lock);
		spin_lock(&newf->file_lock);
679
		*errorp = expand_files(newf, open_files-1);
Linus Torvalds's avatar
Linus Torvalds committed
680
		spin_unlock(&newf->file_lock);
681
		if (*errorp < 0)
Linus Torvalds's avatar
Linus Torvalds committed
682
			goto out_release;
683
684
685
686
687
688
		new_fdt = files_fdtable(newf);
		/*
		 * Reacquire the oldf lock and a pointer to its fd table
		 * who knows it may have a new bigger fd table. We need
		 * the latest pointer.
		 */
Linus Torvalds's avatar
Linus Torvalds committed
689
		spin_lock(&oldf->file_lock);
690
		old_fdt = files_fdtable(oldf);
Linus Torvalds's avatar
Linus Torvalds committed
691
692
	}

693
694
	old_fds = old_fdt->fd;
	new_fds = new_fdt->fd;
Linus Torvalds's avatar
Linus Torvalds committed
695

696
697
	memcpy(new_fdt->open_fds->fds_bits, old_fdt->open_fds->fds_bits, open_files/8);
	memcpy(new_fdt->close_on_exec->fds_bits, old_fdt->close_on_exec->fds_bits, open_files/8);
Linus Torvalds's avatar
Linus Torvalds committed
698
699
700
701
702
703
704
705
706
707
708
709

	for (i = open_files; i != 0; i--) {
		struct file *f = *old_fds++;
		if (f) {
			get_file(f);
		} else {
			/*
			 * The fd may be claimed in the fd bitmap but not yet
			 * instantiated in the files array if a sibling thread
			 * is partway through open().  So make sure that this
			 * fd is available to the new process.
			 */
710
			FD_CLR(open_files - i, new_fdt->open_fds);
Linus Torvalds's avatar
Linus Torvalds committed
711
		}
712
		rcu_assign_pointer(*new_fds++, f);
Linus Torvalds's avatar
Linus Torvalds committed
713
714
715
716
	}
	spin_unlock(&oldf->file_lock);

	/* compute the remainder to be cleared */
717
	size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
Linus Torvalds's avatar
Linus Torvalds committed
718
719
720
721

	/* This is long word aligned thus could use a optimized version */ 
	memset(new_fds, 0, size); 

722
723
	if (new_fdt->max_fdset > open_files) {
		int left = (new_fdt->max_fdset-open_files)/8;
Linus Torvalds's avatar
Linus Torvalds committed
724
725
		int start = open_files / (8 * sizeof(unsigned long));

726
727
		memset(&new_fdt->open_fds->fds_bits[start], 0, left);
		memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
Linus Torvalds's avatar
Linus Torvalds committed
728
729
730
	}

out:
731
	return newf;
Linus Torvalds's avatar
Linus Torvalds committed
732
733

out_release:
734
735
736
	free_fdset (new_fdt->close_on_exec, new_fdt->max_fdset);
	free_fdset (new_fdt->open_fds, new_fdt->max_fdset);
	free_fd_array(new_fdt->fd, new_fdt->max_fds);
Linus Torvalds's avatar
Linus Torvalds committed
737
	kmem_cache_free(files_cachep, newf);
738
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
739
740
}

741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
{
	struct files_struct *oldf, *newf;
	int error = 0;

	/*
	 * A background process may not have any files ...
	 */
	oldf = current->files;
	if (!oldf)
		goto out;

	if (clone_flags & CLONE_FILES) {
		atomic_inc(&oldf->count);
		goto out;
	}

	/*
	 * Note: we may be using current for both targets (See exec.c)
	 * This works because we cache current->files (old) as oldf. Don't
	 * break this.
	 */
	tsk->files = NULL;
	newf = dup_fd(oldf, &error);
	if (!newf)
		goto out;

	tsk->files = newf;
	error = 0;
out:
	return error;
}

Linus Torvalds's avatar
Linus Torvalds committed
774
775
776
777
778
779
780
781
782
783
784
/*
 *	Helper to unshare the files of the current task.
 *	We don't want to expose copy_files internals to
 *	the exec layer of the kernel.
 */

int unshare_files(void)
{
	struct files_struct *files  = current->files;
	int rc;

785
	BUG_ON(!files);
Linus Torvalds's avatar
Linus Torvalds committed
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810

	/* This can race but the race causes us to copy when we don't
	   need to and drop the copy */
	if(atomic_read(&files->count) == 1)
	{
		atomic_inc(&files->count);
		return 0;
	}
	rc = copy_files(0, current);
	if(rc)
		current->files = files;
	return rc;
}

EXPORT_SYMBOL(unshare_files);

static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
{
	struct sighand_struct *sig;

	if (clone_flags & (CLONE_SIGHAND | CLONE_THREAD)) {
		atomic_inc(&current->sighand->count);
		return 0;
	}
	sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
Ingo Molnar's avatar
Ingo Molnar committed
811
	rcu_assign_pointer(tsk->sighand, sig);
Linus Torvalds's avatar
Linus Torvalds committed
812
813
814
815
816
817
818
	if (!sig)
		return -ENOMEM;
	atomic_set(&sig->count, 1);
	memcpy(sig->action, current->sighand->action, sizeof(sig->action));
	return 0;
}

819
void __cleanup_sighand(struct sighand_struct *sighand)
820
821
822
823
824
{
	if (atomic_dec_and_test(&sighand->count))
		kmem_cache_free(sighand_cachep, sighand);
}

Linus Torvalds's avatar
Linus Torvalds committed
825
826
827
828
829
830
831
832
static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
{
	struct signal_struct *sig;
	int ret;

	if (clone_flags & CLONE_THREAD) {
		atomic_inc(&current->signal->count);
		atomic_inc(&current->signal->live);
833
		taskstats_tgid_alloc(current->signal);
Linus Torvalds's avatar
Linus Torvalds committed
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
		return 0;
	}
	sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
	tsk->signal = sig;
	if (!sig)
		return -ENOMEM;

	ret = copy_thread_group_keys(tsk);
	if (ret < 0) {
		kmem_cache_free(signal_cachep, sig);
		return ret;
	}

	atomic_set(&sig->count, 1);
	atomic_set(&sig->live, 1);
	init_waitqueue_head(&sig->wait_chldexit);
	sig->flags = 0;
	sig->group_exit_code = 0;
	sig->group_exit_task = NULL;
	sig->group_stop_count = 0;
	sig->curr_target = NULL;
	init_sigpending(&sig->shared_pending);
	INIT_LIST_HEAD(&sig->posix_timers);

858
	hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL);
859
	sig->it_real_incr.tv64 = 0;
Linus Torvalds's avatar
Linus Torvalds committed
860
	sig->real_timer.function = it_real_fn;
861
	sig->tsk = tsk;
Linus Torvalds's avatar
Linus Torvalds committed
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877

	sig->it_virt_expires = cputime_zero;
	sig->it_virt_incr = cputime_zero;
	sig->it_prof_expires = cputime_zero;
	sig->it_prof_incr = cputime_zero;

	sig->leader = 0;	/* session leadership doesn't inherit */
	sig->tty_old_pgrp = 0;

	sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
	sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
	sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
	sig->sched_time = 0;
	INIT_LIST_HEAD(&sig->cpu_timers[0]);
	INIT_LIST_HEAD(&sig->cpu_timers[1]);
	INIT_LIST_HEAD(&sig->cpu_timers[2]);
878
	taskstats_tgid_init(sig);
Linus Torvalds's avatar
Linus Torvalds committed
879
880
881
882
883
884
885
886
887
888
889
890
891

	task_lock(current->group_leader);
	memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
	task_unlock(current->group_leader);

	if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
		/*
		 * New sole thread in the process gets an expiry time
		 * of the whole CPU time limit.
		 */
		tsk->it_prof_expires =
			secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
	}
892
	acct_init_pacct(&sig->pacct);
Linus Torvalds's avatar
Linus Torvalds committed
893
894
895
896

	return 0;
}

897
898
899
void __cleanup_signal(struct signal_struct *sig)
{
	exit_thread_group_keys(sig);
900
	taskstats_tgid_free(sig);
901
902
903
904
905
906
907
908
909
910
911
912
913
	kmem_cache_free(signal_cachep, sig);
}

static inline void cleanup_signal(struct task_struct *tsk)
{
	struct signal_struct *sig = tsk->signal;

	atomic_dec(&sig->live);

	if (atomic_dec_and_test(&sig->count))
		__cleanup_signal(sig);
}

Linus Torvalds's avatar
Linus Torvalds committed
914
915
916
917
static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
	unsigned long new_flags = p->flags;

918
	new_flags &= ~(PF_SUPERPRIV | PF_NOFREEZE);
Linus Torvalds's avatar
Linus Torvalds committed
919
920
921
922
923
924
925
926
927
928
929
930
931
	new_flags |= PF_FORKNOEXEC;
	if (!(clone_flags & CLONE_PTRACE))
		p->ptrace = 0;
	p->flags = new_flags;
}

asmlinkage long sys_set_tid_address(int __user *tidptr)
{
	current->clear_child_tid = tidptr;

	return current->pid;
}

Ingo Molnar's avatar
Ingo Molnar committed
932
933
934
935
936
937
938
939
940
static inline void rt_mutex_init_task(struct task_struct *p)
{
#ifdef CONFIG_RT_MUTEXES
	spin_lock_init(&p->pi_lock);
	plist_head_init(&p->pi_waiters, &p->pi_lock);
	p->pi_blocked_on = NULL;
#endif
}

Linus Torvalds's avatar
Linus Torvalds committed
941
942
943
944
945
946
947
948
/*
 * This creates a new process as a copy of the old one,
 * but does not actually start it yet.
 *
 * It copies the registers, and all the appropriate
 * parts of the process environment (as per the clone
 * flags). The actual kick-off is left to the caller.
 */
949
950
951
952
953
954
955
static struct task_struct *copy_process(unsigned long clone_flags,
					unsigned long stack_start,
					struct pt_regs *regs,
					unsigned long stack_size,
					int __user *parent_tidptr,
					int __user *child_tidptr,
					int pid)
Linus Torvalds's avatar
Linus Torvalds committed
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
{
	int retval;
	struct task_struct *p = NULL;

	if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
		return ERR_PTR(-EINVAL);

	/*
	 * Thread groups must share signals as well, and detached threads
	 * can only be started up within the thread group.
	 */
	if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
		return ERR_PTR(-EINVAL);

	/*
	 * Shared signal handlers imply shared VM. By way of the above,
	 * thread groups also imply shared VM. Blocking this case allows
	 * for various simplifications in other code.
	 */
	if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
		return ERR_PTR(-EINVAL);

	retval = security_task_create(clone_flags);
	if (retval)
		goto fork_out;

	retval = -ENOMEM;
	p = dup_task_struct(current);
	if (!p)
		goto fork_out;

987
988
	rt_mutex_init_task(p);

989
990
991
992
#ifdef CONFIG_TRACE_IRQFLAGS
	DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
	DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
Linus Torvalds's avatar
Linus Torvalds committed
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
	retval = -EAGAIN;
	if (atomic_read(&p->user->processes) >=
			p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
		if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
				p->user != &root_user)
			goto bad_fork_free;
	}

	atomic_inc(&p->user->__count);
	atomic_inc(&p->user->processes);
	get_group_info(p->group_info);

	/*
	 * If multiple threads are within copy_process(), then this check
	 * triggers too late. This doesn't hurt, the check is only there
	 * to stop root fork bombs.
	 */
	if (nr_threads >= max_threads)
		goto bad_fork_cleanup_count;

1013
	if (!try_module_get(task_thread_info(p)->exec_domain->module))
Linus Torvalds's avatar
Linus Torvalds committed
1014
1015
1016
1017
1018
1019
		goto bad_fork_cleanup_count;

	if (p->binfmt && !try_module_get(p->binfmt->module))
		goto bad_fork_cleanup_put_domain;

	p->did_exec = 0;
1020
	delayacct_tsk_init(p);	/* Must remain after dup_task_struct() */
Linus Torvalds's avatar
Linus Torvalds committed
1021
1022
1023
1024
1025
	copy_flags(clone_flags, p);
	p->pid = pid;
	retval = -EFAULT;
	if (clone_flags & CLONE_PARENT_SETTID)
		if (put_user(p->pid, parent_tidptr))
1026
			goto bad_fork_cleanup_delays_binfmt;
Linus Torvalds's avatar
Linus Torvalds committed
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057

	INIT_LIST_HEAD(&p->children);
	INIT_LIST_HEAD(&p->sibling);
	p->vfork_done = NULL;
	spin_lock_init(&p->alloc_lock);

	clear_tsk_thread_flag(p, TIF_SIGPENDING);
	init_sigpending(&p->pending);

	p->utime = cputime_zero;
	p->stime = cputime_zero;
 	p->sched_time = 0;
	p->rchar = 0;		/* I/O counter: bytes read */
	p->wchar = 0;		/* I/O counter: bytes written */
	p->syscr = 0;		/* I/O counter: read syscalls */
	p->syscw = 0;		/* I/O counter: write syscalls */
	acct_clear_integrals(p);

 	p->it_virt_expires = cputime_zero;
	p->it_prof_expires = cputime_zero;
 	p->it_sched_expires = 0;
 	INIT_LIST_HEAD(&p->cpu_timers[0]);
 	INIT_LIST_HEAD(&p->cpu_timers[1]);
 	INIT_LIST_HEAD(&p->cpu_timers[2]);

	p->lock_depth = -1;		/* -1 = no lock */
	do_posix_clock_monotonic_gettime(&p->start_time);
	p->security = NULL;
	p->io_context = NULL;
	p->io_wait = NULL;
	p->audit_context = NULL;
Paul Jackson's avatar
Paul Jackson committed
1058
	cpuset_fork(p);
Linus Torvalds's avatar
Linus Torvalds committed
1059
1060
1061
1062
1063
#ifdef CONFIG_NUMA
 	p->mempolicy = mpol_copy(p->mempolicy);
 	if (IS_ERR(p->mempolicy)) {
 		retval = PTR_ERR(p->mempolicy);
 		p->mempolicy = NULL;
Paul Jackson's avatar
Paul Jackson committed
1064
 		goto bad_fork_cleanup_cpuset;
Linus Torvalds's avatar
Linus Torvalds committed
1065
 	}
1066
	mpol_fix_fork_child_flag(p);
Linus Torvalds's avatar
Linus Torvalds committed
1067
#endif
1068
1069
#ifdef CONFIG_TRACE_IRQFLAGS
	p->irq_events = 0;
1070
1071
1072
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
	p->hardirqs_enabled = 1;
#else
1073
	p->hardirqs_enabled = 0;
1074
#endif
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
	p->hardirq_enable_ip = 0;
	p->hardirq_enable_event = 0;
	p->hardirq_disable_ip = _THIS_IP_;
	p->hardirq_disable_event = 0;
	p->softirqs_enabled = 1;
	p->softirq_enable_ip = _THIS_IP_;
	p->softirq_enable_event = 0;
	p->softirq_disable_ip = 0;
	p->softirq_disable_event = 0;
	p->hardirq_context = 0;
	p->softirq_context = 0;
#endif
Ingo Molnar's avatar
Ingo Molnar committed
1087
1088
1089
1090
1091
#ifdef CONFIG_LOCKDEP
	p->lockdep_depth = 0; /* no locks held yet */
	p->curr_chain_key = 0;
	p->lockdep_recursion = 0;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1092

1093
1094
1095
1096
#ifdef CONFIG_DEBUG_MUTEXES
	p->blocked_on = NULL; /* not blocked yet */
#endif

Linus Torvalds's avatar
Linus Torvalds committed
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
	p->tgid = p->pid;
	if (clone_flags & CLONE_THREAD)
		p->tgid = current->tgid;

	if ((retval = security_task_alloc(p)))
		goto bad_fork_cleanup_policy;
	if ((retval = audit_alloc(p)))
		goto bad_fork_cleanup_security;
	/* copy all the process information */
	if ((retval = copy_semundo(clone_flags, p)))
		goto bad_fork_cleanup_audit;
	if ((retval = copy_files(clone_flags, p)))
		goto bad_fork_cleanup_semundo;
	if ((retval = copy_fs(clone_flags, p)))
		goto bad_fork_cleanup_files;
	if ((retval = copy_sighand(clone_flags, p)))
		goto bad_fork_cleanup_fs;
	if ((retval = copy_signal(clone_flags, p)))
		goto bad_fork_cleanup_sighand;
	if ((retval = copy_mm(clone_flags, p)))
		goto bad_fork_cleanup_signal;
	if ((retval = copy_keys(clone_flags, p)))
		goto bad_fork_cleanup_mm;
1120
	if ((retval = copy_namespaces(clone_flags, p)))
Linus Torvalds's avatar
Linus Torvalds committed
1121
1122
1123
		goto bad_fork_cleanup_keys;
	retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
	if (retval)
1124
		goto bad_fork_cleanup_namespaces;
Linus Torvalds's avatar
Linus Torvalds committed
1125
1126
1127
1128
1129
1130

	p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
	/*
	 * Clear TID on mm_release()?
	 */
	p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
1131
1132
1133
1134
	p->robust_list = NULL;
#ifdef CONFIG_COMPAT
	p->compat_robust_list = NULL;
#endif
1135
1136
1137
	INIT_LIST_HEAD(&p->pi_state_list);
	p->pi_state_cache = NULL;

1138
1139
1140
1141
1142
1143
	/*
	 * sigaltstack should be cleared when sharing the same VM
	 */
	if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
		p->sas_ss_sp = p->sas_ss_size = 0;

Linus Torvalds's avatar
Linus Torvalds committed
1144
1145
1146
1147
1148
	/*
	 * Syscall tracing should be turned off in the child regardless
	 * of CLONE_PTRACE.
	 */
	clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1149
1150
1151
#ifdef TIF_SYSCALL_EMU
	clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166

	/* Our parent execution domain becomes current domain
	   These must match for thread signalling to apply */
	p->parent_exec_id = p->self_exec_id;

	/* ok, now we should be set up.. */
	p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
	p->pdeath_signal = 0;
	p->exit_state = 0;

	/*
	 * Ok, make it visible to the rest of the system.
	 * We dont wake it up yet.
	 */
	p->group_leader = p;
1167
	INIT_LIST_HEAD(&p->thread_group);
Linus Torvalds's avatar
Linus Torvalds committed
1168
1169
1170
	INIT_LIST_HEAD(&p->ptrace_children);
	INIT_LIST_HEAD(&p->ptrace_list);

1171
1172
1173
	/* Perform scheduler related setup. Assign this task to a CPU. */
	sched_fork(p, clone_flags);

Linus Torvalds's avatar
Linus Torvalds committed
1174
1175
1176
	/* Need tasklist lock for parent etc handling! */
	write_lock_irq(&tasklist_lock);

1177
1178
1179
	/* for sys_ioprio_set(IOPRIO_WHO_PGRP) */
	p->ioprio = current->ioprio;

Linus Torvalds's avatar
Linus Torvalds committed
1180
	/*
1181
1182
1183
1184
1185
1186
1187
	 * The task hasn't been attached yet, so its cpus_allowed mask will
	 * not be changed, nor will its assigned CPU.
	 *
	 * The cpus_allowed mask of the parent may have changed after it was
	 * copied first time - so re-copy it here, then check the child's CPU
	 * to ensure it is on a valid CPU (and if not, just force it back to
	 * parent's CPU). This avoids alot of nasty races.
Linus Torvalds's avatar
Linus Torvalds committed
1188
1189
	 */
	p->cpus_allowed = current->cpus_allowed;
1190
1191
	if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) ||
			!cpu_online(task_cpu(p))))
1192
		set_task_cpu(p, smp_processor_id());
Linus Torvalds's avatar
Linus Torvalds committed
1193
1194
1195
1196
1197
1198
1199
1200

	/* CLONE_PARENT re-uses the old parent */
	if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
		p->real_parent = current->real_parent;
	else
		p->real_parent = current;
	p->parent = p->real_parent;

1201
	spin_lock(&current->sighand->siglock);
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215

	/*
	 * Process group and session signals need to be delivered to just the
	 * parent before the fork or both the parent and the child after the
	 * fork. Restart if a signal comes in before we add the new process to
	 * it's process group.
	 * A fatal signal pending means that current will exit, so the new
	 * thread can't slip out of an OOM kill (or normal SIGKILL).
 	 */
 	recalc_sigpending();
	if (signal_pending(current)) {
		spin_unlock(&current->sighand->siglock);
		write_unlock_irq(&tasklist_lock);
		retval = -ERESTARTNOINTR;
1216
		goto bad_fork_cleanup_namespaces;
1217
1218
	}

Linus Torvalds's avatar
Linus Torvalds committed
1219
1220
	if (clone_flags & CLONE_THREAD) {
		p->group_leader = current->group_leader;
1221
		list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
Linus Torvalds's avatar
Linus Torvalds committed
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238

		if (!cputime_eq(current->signal->it_virt_expires,
				cputime_zero) ||
		    !cputime_eq(current->signal->it_prof_expires,
				cputime_zero) ||
		    current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
		    !list_empty(&current->signal->cpu_timers[0]) ||
		    !list_empty(&current->signal->cpu_timers[1]) ||
		    !list_empty(&current->signal->cpu_timers[2])) {
			/*
			 * Have child wake up on its first tick to check
			 * for process CPU timers.
			 */
			p->it_prof_expires = jiffies_to_cputime(1);
		}
	}

1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
	if (likely(p->pid)) {
		add_parent(p);
		if (unlikely(p->ptrace & PT_PTRACED))
			__ptrace_link(p, current->parent);

		if (thread_group_leader(p)) {
			p->signal->tty = current->signal->tty;
			p->signal->pgrp = process_group(current);
			p->signal->session = current->signal->session;
			attach_pid(p, PIDTYPE_PGID, process_group(p));
			attach_pid(p, PIDTYPE_SID, p->signal->session);

1251
			list_add_tail_rcu(&p->tasks, &init_task.tasks);
Linus Torvalds's avatar
Linus Torvalds committed
1252
			__get_cpu_var(process_counts)++;
1253
1254
1255
		}
		attach_pid(p, PIDTYPE_PID, p->pid);
		nr_threads++;
Linus Torvalds's avatar
Linus Torvalds committed
1256
1257
1258
	}

	total_forks++;
1259
	spin_unlock(&current->sighand->siglock);
Linus Torvalds's avatar
Linus Torvalds committed
1260
	write_unlock_irq(&tasklist_lock);
1261
	proc_fork_connector(p);
Linus Torvalds's avatar
Linus Torvalds committed
1262
1263
	return p;

1264
1265
bad_fork_cleanup_namespaces:
	exit_task_namespaces(p);
Linus Torvalds's avatar
Linus Torvalds committed
1266
1267
1268
1269
1270
1271
bad_fork_cleanup_keys:
	exit_keys(p);
bad_fork_cleanup_mm:
	if (p->mm)
		mmput(p->mm);
bad_fork_cleanup_signal:
1272
	cleanup_signal(p);
Linus Torvalds's avatar
Linus Torvalds committed
1273
bad_fork_cleanup_sighand:
1274
	__cleanup_sighand(p->sighand);
Linus Torvalds's avatar
Linus Torvalds committed
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
bad_fork_cleanup_fs:
	exit_fs(p); /* blocking */
bad_fork_cleanup_files:
	exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
	exit_sem(p);
bad_fork_cleanup_audit:
	audit_free(p);
bad_fork_cleanup_security:
	security_task_free(p);
bad_fork_cleanup_policy:
#ifdef CONFIG_NUMA
	mpol_free(p->mempolicy);
Paul Jackson's avatar
Paul Jackson committed
1288
bad_fork_cleanup_cpuset:
Linus Torvalds's avatar
Linus Torvalds committed
1289
#endif
Paul Jackson's avatar
Paul Jackson committed
1290
	cpuset_exit(p);
1291
1292
bad_fork_cleanup_delays_binfmt:
	delayacct_tsk_free(p);
Linus Torvalds's avatar
Linus Torvalds committed
1293
1294
1295
	if (p->binfmt)
		module_put(p->binfmt->module);
bad_fork_cleanup_put_domain:
1296
	module_put(task_thread_info(p)->exec_domain->module);
Linus Torvalds's avatar
Linus Torvalds committed
1297
1298
1299
1300
1301
1302
bad_fork_cleanup_count:
	put_group_info(p->group_info);
	atomic_dec(&p->user->processes);
	free_uid(p->user);
bad_fork_free:
	free_task(p);
1303
1304
fork_out:
	return ERR_PTR(retval);
Linus Torvalds's avatar
Linus Torvalds committed
1305
1306
1307
1308
1309
1310
1311
1312
}

struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
{
	memset(regs, 0, sizeof(struct pt_regs));
	return regs;
}

1313
struct task_struct * __devinit fork_idle(int cpu)
Linus Torvalds's avatar
Linus Torvalds committed
1314
{
1315
	struct task_struct *task;
Linus Torvalds's avatar
Linus Torvalds committed
1316
1317
1318
1319
1320
1321
	struct pt_regs regs;

	task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
	if (!task)
		return ERR_PTR(-ENOMEM);
	init_idle(task, cpu);
1322

Linus Torvalds's avatar
Linus Torvalds committed
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
	return task;
}

static inline int fork_traceflag (unsigned clone_flags)
{
	if (clone_flags & CLONE_UNTRACED)
		return 0;
	else if (clone_flags & CLONE_VFORK) {
		if (current->ptrace & PT_TRACE_VFORK)
			return PTRACE_EVENT_VFORK;
	} else if ((clone_flags & CSIGNAL) != SIGCHLD) {
		if (current->ptrace & PT_TRACE_CLONE)
			return PTRACE_EVENT_CLONE;
	} else if (current->ptrace & PT_TRACE_FORK)
		return PTRACE_EVENT_FORK;

	return 0;
}

/*
 *  Ok, this is the main fork-routine.
 *
 * It copies the process, and if successful kick-starts
 * it and waits for it to finish using the VM if required.
 */
long do_fork(unsigned long clone_flags,
	      unsigned long stack_start,
	      struct pt_regs *regs,
	      unsigned long stack_size,
	      int __user *parent_tidptr,
	      int __user *child_tidptr)
{
	struct task_struct *p;
	int trace = 0;
1357
1358
	struct pid *pid = alloc_pid();
	long nr;
Linus Torvalds's avatar
Linus Torvalds committed
1359

1360
	if (!pid)
Linus Torvalds's avatar
Linus Torvalds committed
1361
		return -EAGAIN;
1362
	nr = pid->nr;
Linus Torvalds's avatar
Linus Torvalds committed
1363
1364
1365
1366
1367
1368
	if (unlikely(current->ptrace)) {
		trace = fork_traceflag (clone_flags);
		if (trace)
			clone_flags |= CLONE_PTRACE;
	}

1369
	p = copy_process(clone_flags, stack_start, regs, stack_size, parent_tidptr, child_tidptr, nr);
Linus Torvalds's avatar
Linus Torvalds committed
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
	/*
	 * Do this prior waking up the new thread - the thread pointer
	 * might get invalid after that point, if the thread exits quickly.
	 */
	if (!IS_ERR(p)) {
		struct completion vfork;

		if (clone_flags & CLONE_VFORK) {
			p->vfork_done = &vfork;
			init_completion(&vfork);
		}

		if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) {
			/*
			 * We'll start up with an immediate SIGSTOP.
			 */
			sigaddset(&p->pending.signal, SIGSTOP);
			set_tsk_thread_flag(p, TIF_SIGPENDING);
		}

		if (!(clone_flags & CLONE_STOPPED))
			wake_up_new_task(p, clone_flags);
		else
			p->state = TASK_STOPPED;

		if (unlikely (trace)) {
1396
			current->ptrace_message = nr;
Linus Torvalds's avatar
Linus Torvalds committed
1397
1398
1399
1400
1401
			ptrace_notify ((trace << 8) | SIGTRAP);
		}

		if (clone_flags & CLONE_VFORK) {
			wait_for_completion(&vfork);
1402
1403
			if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) {
				current->ptrace_message = nr;
Linus Torvalds's avatar
Linus Torvalds committed
1404
				ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1405
			}
Linus Torvalds's avatar
Linus Torvalds committed
1406
1407
		}
	} else {
1408
1409
		free_pid(pid);
		nr = PTR_ERR(p);
Linus Torvalds's avatar
Linus Torvalds committed
1410
	}
1411
	return nr;
Linus Torvalds's avatar
Linus Torvalds committed
1412
1413
}

1414
1415
1416
1417
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif

1418
1419
1420
1421
1422
1423
1424
1425
1426
static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
{
	struct sighand_struct *sighand = data;

	if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
					SLAB_CTOR_CONSTRUCTOR)
		spin_lock_init(&sighand->siglock);
}

Linus Torvalds's avatar
Linus Torvalds committed
1427
1428
1429
1430
void __init proc_caches_init(void)
{
	sighand_cachep = kmem_cache_create("sighand_cache",
			sizeof(struct sighand_struct), 0,
1431
1432
			SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
			sighand_ctor, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
	signal_cachep = kmem_cache_create("signal_cache",
			sizeof(struct signal_struct), 0,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
	files_cachep = kmem_cache_create("files_cache", 
			sizeof(struct files_struct), 0,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
	fs_cachep = kmem_cache_create("fs_cache", 
			sizeof(struct fs_struct), 0,
			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
	vm_area_cachep = kmem_cache_create("vm_area_struct",
			sizeof(struct vm_area_struct), 0,
			SLAB_PANIC, NULL, NULL);
	mm_cachep = kmem_cache_create("mm_struct",
1446
			sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
Linus Torvalds's avatar
Linus Torvalds committed
1447
1448
			SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
}
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496


/*
 * Check constraints on flags passed to the unshare system call and
 * force unsharing of additional process context as appropriate.
 */
static inline void check_unshare_flags(unsigned long *flags_ptr)
{
	/*
	 * If unsharing a thread from a thread group, must also
	 * unshare vm.
	 */
	if (*flags_ptr & CLONE_THREAD)
		*flags_ptr |= CLONE_VM;

	/*
	 * If unsharing vm, must also unshare signal handlers.
	 */
	if (*flags_ptr & CLONE_VM)
		*flags_ptr |= CLONE_SIGHAND;

	/*
	 * If unsharing signal handlers and the task was created
	 * using CLONE_THREAD, then must unshare the thread
	 */
	if ((*flags_ptr & CLONE_SIGHAND) &&
	    (atomic_read(&current->signal->count) > 1))
		*flags_ptr |= CLONE_THREAD;

	/*
	 * If unsharing namespace, must also unshare filesystem information.
	 */
	if (*flags_ptr & CLONE_NEWNS)
		*flags_ptr |= CLONE_FS;
}

/*
 * Unsharing of tasks created with CLONE_THREAD is not supported yet
 */
static int unshare_thread(unsigned long unshare_flags)
{
	if (unshare_flags & CLONE_THREAD)
		return -EINVAL;

	return 0;
}

/*
1497
 * Unshare the filesystem structure if it is being shared
1498
1499
1500
1501
1502
1503
 */
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
{
	struct fs_struct *fs = current->fs;

	if ((unshare_flags & CLONE_FS) &&
1504
1505
1506
1507
1508
	    (fs && atomic_read(&fs->count) > 1)) {
		*new_fsp = __copy_fs_struct(current->fs);
		if (!*new_fsp)
			return -ENOMEM;
	}
1509
1510
1511
1512
1513

	return 0;
}

/*
1514
 * Unshare the namespace structure if it is being shared
1515
 */
1516
static int unshare_namespace(unsigned long unshare_flags, struct namespace **new_nsp, struct fs_struct *new_fs)
1517
{
1518
	struct namespace *ns = current->nsproxy->namespace;
1519

1520
	if ((unshare_flags & CLONE_NEWNS) && ns) {
1521
1522
1523
1524
1525
1526
1527
		if (!capable(CAP_SYS_ADMIN))
			return -EPERM;

		*new_nsp = dup_namespace(current, new_fs ? new_fs : current->fs);
		if (!*new_nsp)
			return -ENOMEM;
	}
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547

	return 0;
}

/*
 * Unsharing of sighand for tasks created with CLONE_SIGHAND is not
 * supported yet
 */
static int unshare_sighand(unsigned long unshare_flags, struct sighand_struct **new_sighp)
{
	struct sighand_struct *sigh = current->sighand;

	if ((unshare_flags & CLONE_SIGHAND) &&
	    (sigh && atomic_read(&sigh->count) > 1))
		return -EINVAL;
	else
		return 0;
}

/*
1548
 * Unshare vm if it is being shared
1549
1550
1551
1552
1553
1554
 */
static int unshare_vm(unsigned long unshare_flags, struct mm_struct **new_mmp)
{
	struct mm_struct *mm = current->mm;

	if ((unshare_flags & CLONE_VM) &&
1555
	    (mm && atomic_read(&mm->mm_users) > 1)) {
1556
		return -EINVAL;
1557
	}
1558
1559
1560
1561
1562

	return 0;
}

/*
1563
 * Unshare file descriptor table if it is being shared
1564
1565
1566
1567
 */
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
{
	struct files_struct *fd = current->files;
1568
	int error = 0;
1569
1570

	if ((unshare_flags & CLONE_FILES) &&
1571
1572
1573
1574
1575
	    (fd && atomic_read(&fd->count) > 1)) {
		*new_fdp = dup_fd(fd, &error);
		if (!*new_fdp)
			return error;
	}
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591

	return 0;
}

/*
 * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
 * supported yet
 */
static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
{
	if (unshare_flags & CLONE_SYSVSEM)
		return -EINVAL;

	return 0;
}

1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
#ifndef CONFIG_IPC_NS
static inline int unshare_ipcs(unsigned long flags, struct ipc_namespace **ns)
{
	if (flags & CLONE_NEWIPC)
		return -EINVAL;

	return 0;
}
#endif

1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
/*
 * unshare allows a process to 'unshare' part of the process
 * context which was originally shared using clone.  copy_*
 * functions used by do_fork() cannot be used here directly
 * because they modify an inactive task_struct that is being
 * constructed. Here we are modifying the current, active,
 * task_struct.
 */
asmlinkage long sys_unshare(unsigned long unshare_flags)
{
	int err = 0;
	struct fs_struct *fs, *new_fs = NULL;
	struct namespace *ns, *new_ns = NULL;
	struct sighand_struct *sigh, *new_sigh = NULL;
	struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
	struct files_struct *fd, *new_fd = NULL;
	struct sem_undo_list *new_ulist = NULL;
1619
	struct nsproxy *new_nsproxy = NULL, *old_nsproxy = NULL;
1620
	struct uts_namespace *uts, *new_uts = NULL;
Kirill Korotaev's avatar
Kirill Korotaev committed
1621
	struct ipc_namespace *ipc, *new_ipc = NULL;
1622
1623
1624

	check_unshare_flags(&unshare_flags);

1625
1626
1627
	/* Return -EINVAL for all unsupported flags */
	err = -EINVAL;
	if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
Kirill Korotaev's avatar
Kirill Korotaev committed
1628
1629
				CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
				CLONE_NEWUTS|CLONE_NEWIPC))
1630
1631
		goto bad_unshare_out;

1632
1633
1634
1635
	if ((err = unshare_thread(unshare_flags)))
		goto bad_unshare_out;
	if ((err = unshare_fs(unshare_flags, &new_fs)))
		goto bad_unshare_cleanup_thread;
1636
	if ((err = unshare_namespace(unshare_flags, &new_ns, new_fs)))
1637
1638
1639
1640
1641
1642
1643
1644
1645
		goto bad_unshare_cleanup_fs;
	if ((err = unshare_sighand(unshare_flags, &new_sigh)))
		goto bad_unshare_cleanup_ns;
	if ((err = unshare_vm(unshare_flags, &new_mm)))
		goto bad_unshare_cleanup_sigh;
	if ((err = unshare_fd(unshare_flags, &new_fd)))
		goto bad_unshare_cleanup_vm;
	if ((err = unshare_semundo(unshare_flags, &new_ulist)))
		goto bad_unshare_cleanup_fd;
1646
1647
	if ((err = unshare_utsname(unshare_flags, &new_uts)))
		goto bad_unshare_cleanup_semundo;
Kirill Korotaev's avatar
Kirill Korotaev committed
1648
1649
	if ((err = unshare_ipcs(unshare_flags, &new_ipc)))
		goto bad_unshare_cleanup_uts;
1650

Kirill Korotaev's avatar
Kirill Korotaev committed
1651
	if (new_ns || new_uts || new_ipc) {
1652
1653
1654
1655
		old_nsproxy = current->nsproxy;
		new_nsproxy = dup_namespaces(old_nsproxy);
		if (!new_nsproxy) {
			err = -ENOMEM;
Kirill Korotaev's avatar
Kirill Korotaev committed
1656
			goto bad_unshare_cleanup_ipc;
1657
		}
1658
1659
1660
	}

	if (new_fs || new_ns || new_sigh || new_mm || new_fd || new_ulist ||
Kirill Korotaev's avatar
Kirill Korotaev committed
1661
				new_uts || new_ipc) {