Newer
Older
/*
* linux/kernel/exit.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/mnt_namespace.h>
#include <linux/key.h>
#include <linux/security.h>
#include <linux/cpu.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/pid_namespace.h>
#include <linux/ptrace.h>
#include <linux/profile.h>
#include <linux/mount.h>
#include <linux/proc_fs.h>
#include <linux/kthread.h>
#include <linux/taskstats_kern.h>
#include <linux/freezer.h>
#include <linux/signal.h>
#include <linux/posix-timers.h>
#include <linux/pipe_fs_i.h>
#include <linux/audit.h> /* for audit_free() */
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
static void exit_mm(struct task_struct * tsk);
static void __unhash_process(struct task_struct *p)
{
nr_threads--;
detach_pid(p, PIDTYPE_PID);
if (thread_group_leader(p)) {
detach_pid(p, PIDTYPE_PGID);
detach_pid(p, PIDTYPE_SID);
list_del_rcu(&p->tasks);
__get_cpu_var(process_counts)--;
/*
* This function expects the tasklist_lock write-locked.
*/
static void __exit_signal(struct task_struct *tsk)
{
struct signal_struct *sig = tsk->signal;
struct sighand_struct *sighand;
BUG_ON(!sig);
BUG_ON(!atomic_read(&sig->count));
rcu_read_lock();
sighand = rcu_dereference(tsk->sighand);
spin_lock(&sighand->siglock);
posix_cpu_timers_exit(tsk);
if (atomic_dec_and_test(&sig->count))
posix_cpu_timers_exit_group(tsk);
else {
/*
* If there is any task waiting for the group exit
* then notify it:
*/
if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count)
wake_up_process(sig->group_exit_task);
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
/*
* Accumulate here the counters for all threads but the
* group leader as they die, so they can be added into
* the process-wide totals when those are taken.
* The group leader stays around as a zombie as long
* as there are other threads. When it gets reaped,
* the exit.c code will add its counts into these totals.
* We won't ever get here for the group leader, since it
* will have been the last reference on the signal_struct.
*/
sig->utime = cputime_add(sig->utime, tsk->utime);
sig->stime = cputime_add(sig->stime, tsk->stime);
sig->gtime = cputime_add(sig->gtime, tsk->gtime);
sig->min_flt += tsk->min_flt;
sig->maj_flt += tsk->maj_flt;
sig->nvcsw += tsk->nvcsw;
sig->nivcsw += tsk->nivcsw;
sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk);
sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
sig = NULL; /* Marker for below. */
}
__unhash_process(tsk);
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
rcu_read_unlock();
__cleanup_sighand(sighand);
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
flush_sigqueue(&tsk->pending);
if (sig) {
flush_sigqueue(&sig->shared_pending);
__cleanup_signal(sig);
}
}
static void delayed_put_task_struct(struct rcu_head *rhp)
{
put_task_struct(container_of(rhp, struct task_struct, rcu));
}
struct task_struct *leader;
Pavel Emelyanov
committed
proc_flush_task(p);
ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
__exit_signal(p);
/*
* If we are the last non-leader member of the thread
* group, and the leader is zombie, then notify the
* group leader's parent process. (if it wants notification.)
*/
zap_leader = 0;
leader = p->group_leader;
if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
BUG_ON(leader->exit_signal == -1);
do_notify_parent(leader, leader->exit_signal);
/*
* If we were the last child thread and the leader has
* exited already, and the leader's parent ignores SIGCHLD,
* then we are the one who should release the leader.
*
* do_notify_parent() will have marked it self-reaping in
* that case.
*/
zap_leader = (leader->exit_signal == -1);
}
write_unlock_irq(&tasklist_lock);
release_thread(p);
call_rcu(&p->rcu, delayed_put_task_struct);
p = leader;
if (unlikely(zap_leader))
goto repeat;
}
/*
* This checks not only the pgrp, but falls back on the pid if no
* satisfactory pgrp is found. I dunno - gdb doesn't work correctly
* without this...
*
* The caller must hold rcu lock or the tasklist lock.
struct pid *session_of_pgrp(struct pid *pgrp)
struct pid *sid = NULL;
p = pid_task(pgrp, PIDTYPE_PGID);
if (p == NULL)
p = pid_task(pgrp, PIDTYPE_PID);
if (p != NULL)
sid = task_session(p);
return sid;
}
/*
* Determine if a process group is "orphaned", according to the POSIX
* definition in 2.2.2.52. Orphaned process groups are not to be affected
* by terminal-generated stop signals. Newly orphaned process groups are
* to receive a SIGHUP and a SIGCONT.
*
* "I ask you, have you ever known what it is to be an orphan?"
*/
static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
|| is_global_init(p->real_parent))
if (task_pgrp(p->real_parent) != pgrp &&
task_session(p->real_parent) == task_session(p)) {
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
int is_current_pgrp_orphaned(void)
retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
read_unlock(&tasklist_lock);
return retval;
}
static int has_stopped_jobs(struct pid *pgrp)
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
/*
* Check to see if any process groups have become orphaned as
* a result of our exiting, and if they have any stopped jobs,
* send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
static void
kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
{
struct pid *pgrp = task_pgrp(tsk);
struct task_struct *ignored_task = tsk;
if (!parent)
/* exit: our father is in a different pgrp than
* we are and we were the only connection outside.
*/
parent = tsk->real_parent;
else
/* reparent: our child is in a different pgrp than
* we are, and it was the only connection outside.
*/
ignored_task = NULL;
if (task_pgrp(parent) != pgrp &&
task_session(parent) == task_session(tsk) &&
will_become_orphaned_pgrp(pgrp, ignored_task) &&
has_stopped_jobs(pgrp)) {
__kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
__kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
}
}
* reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
*
* If a kernel thread is launched as a result of a system call, or if
* it ever exits, it should generally reparent itself to kthreadd so it
* isn't in the way of other processes and is correctly cleaned up on exit.
*
* The various task state such as scheduling policy and priority may have
* been inherited from a user process, so we reset them to sane values here.
*
* NOTE that reparent_to_kthreadd() gives the caller full capabilities.
static void reparent_to_kthreadd(void)
{
write_lock_irq(&tasklist_lock);
ptrace_unlink(current);
/* Reparent to init */
remove_parent(current);
current->real_parent = current->parent = kthreadd_task;
add_parent(current);
/* Set the exit signal to SIGCHLD so we signal init on exit */
current->exit_signal = SIGCHLD;
set_user_nice(current, 0);
/* cpus_allowed? */
/* rt_priority? */
/* signals? */
security_task_reparent_to_init(current);
memcpy(current->signal->rlim, init_task.signal->rlim,
sizeof(current->signal->rlim));
atomic_inc(&(INIT_USER->__count));
write_unlock_irq(&tasklist_lock);
switch_uid(INIT_USER);
}
void __set_special_pids(struct pid *pid)
struct task_struct *curr = current->group_leader;
if (task_session(curr) != pid) {
attach_pid(curr, PIDTYPE_SID, pid);
set_task_session(curr, nr);
if (task_pgrp(curr) != pid) {
attach_pid(curr, PIDTYPE_PGID, pid);
set_task_pgrp(curr, nr);
static void set_special_pids(struct pid *pid)
write_unlock_irq(&tasklist_lock);
}
/*
* Let kernel threads use this to say that they
* allow a certain signal (since daemonize() will
* have disabled all of them by default).
*/
int allow_signal(int sig)
{
if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
sigdelset(¤t->blocked, sig);
if (!current->mm) {
/* Kernel threads handle their own signals.
Let the signal code know it'll be handled, so
that they don't get converted to SIGKILL or
just silently dropped */
current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
}
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
return 0;
}
EXPORT_SYMBOL(allow_signal);
int disallow_signal(int sig)
{
if (!valid_signal(sig) || sig < 1)
return -EINVAL;
spin_lock_irq(¤t->sighand->siglock);
current->sighand->action[(sig)-1].sa.sa_handler = SIG_IGN;
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
recalc_sigpending();
spin_unlock_irq(¤t->sighand->siglock);
return 0;
}
EXPORT_SYMBOL(disallow_signal);
/*
* Put all the gunge required to become a kernel thread without
* attached user resources in one place where it belongs.
*/
void daemonize(const char *name, ...)
{
va_list args;
struct fs_struct *fs;
sigset_t blocked;
va_start(args, name);
vsnprintf(current->comm, sizeof(current->comm), name, args);
va_end(args);
/*
* If we were started as result of loading a module, close all of the
* user space pages. We don't need them, and if we didn't close them
* they would be locked into memory.
*/
exit_mm(current);
/*
* We don't want to have TIF_FREEZE set if the system-wide hibernation
* or suspend transition begins right now.
*/
current->flags |= PF_NOFREEZE;
if (current->nsproxy != &init_nsproxy) {
get_nsproxy(&init_nsproxy);
switch_task_namespaces(current, &init_nsproxy);
}
set_special_pids(&init_struct_pid);
/* Block and flush all signals */
sigfillset(&blocked);
sigprocmask(SIG_BLOCK, &blocked, NULL);
flush_signals(current);
/* Become as one with the init task */
exit_fs(current); /* current->fs->count--; */
fs = init_task.fs;
current->fs = fs;
atomic_inc(&fs->count);
current->files = init_task.files;
atomic_inc(¤t->files->count);
reparent_to_kthreadd();
static void close_files(struct files_struct * files)
/*
* It is safe to dereference the fd table without RCU or
* ->file_lock because this is the last reference to the
* files structure.
*/
for (;;) {
unsigned long set;
i = j * __NFDBITS;
if (i >= fdt->max_fds)
set = fdt->open_fds->fds_bits[j++];
struct file * file = xchg(&fdt->fd[i], NULL);
}
i++;
set >>= 1;
}
}
}
struct files_struct *get_files_struct(struct task_struct *task)
{
struct files_struct *files;
task_lock(task);
files = task->files;
if (files)
atomic_inc(&files->count);
task_unlock(task);
return files;
}
void put_files_struct(struct files_struct *files)
if (atomic_dec_and_test(&files->count)) {
close_files(files);
/*
* Free the fd and fdset arrays if we expanded them.
* If the fdtable was embedded, pass files for freeing
* at the end of the RCU grace period. Otherwise,
* you can free files immediately.
kmem_cache_free(files_cachep, files);
void reset_files_struct(struct task_struct *tsk, struct files_struct *files)
{
struct files_struct *old;
old = tsk->files;
task_lock(tsk);
tsk->files = files;
task_unlock(tsk);
put_files_struct(old);
}
EXPORT_SYMBOL(reset_files_struct);
static void __exit_files(struct task_struct *tsk)
{
struct files_struct * files = tsk->files;
if (files) {
task_lock(tsk);
tsk->files = NULL;
task_unlock(tsk);
put_files_struct(files);
}
}
void exit_files(struct task_struct *tsk)
{
__exit_files(tsk);
}
static void __put_fs_struct(struct fs_struct *fs)
{
/* No need to hold fs->lock if we are killing it */
if (atomic_dec_and_test(&fs->count)) {
path_put(&fs->root);
path_put(&fs->pwd);
if (fs->altroot.dentry)
path_put(&fs->altroot);
kmem_cache_free(fs_cachep, fs);
}
}
void put_fs_struct(struct fs_struct *fs)
{
__put_fs_struct(fs);
}
{
struct fs_struct * fs = tsk->fs;
if (fs) {
task_lock(tsk);
tsk->fs = NULL;
task_unlock(tsk);
__put_fs_struct(fs);
}
}
void exit_fs(struct task_struct *tsk)
{
__exit_fs(tsk);
}
EXPORT_SYMBOL_GPL(exit_fs);
/*
* Turn us into a lazy TLB process if we
* aren't already..
*/
static void exit_mm(struct task_struct * tsk)
{
struct mm_struct *mm = tsk->mm;
mm_release(tsk, mm);
if (!mm)
return;
/*
* Serialize with any possible pending coredump.
* We must hold mmap_sem around checking core_waiters
* and clearing tsk->mm. The core-inducing thread
* will increment core_waiters for each thread in the
* group with ->mm != NULL.
*/
down_read(&mm->mmap_sem);
if (mm->core_waiters) {
up_read(&mm->mmap_sem);
down_write(&mm->mmap_sem);
if (!--mm->core_waiters)
complete(mm->core_startup_done);
up_write(&mm->mmap_sem);
wait_for_completion(&mm->core_done);
down_read(&mm->mmap_sem);
}
atomic_inc(&mm->mm_count);
/* more a memory barrier than a real lock */
task_lock(tsk);
tsk->mm = NULL;
up_read(&mm->mmap_sem);
enter_lazy_tlb(mm, current);
/* We don't want this task to be frozen prematurely */
clear_freeze_flag(tsk);
static void
reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
if (p->pdeath_signal)
/* We already hold the tasklist_lock here. */
group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
/* Move the child from its dying parent to the new one. */
if (unlikely(traced)) {
/* Preserve ptrace links if someone else is tracing this child. */
list_del_init(&p->ptrace_list);
if (p->parent != p->real_parent)
list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
} else {
/* If this child is being traced, then we're the one tracing it
* anyway, so let go of it.
*/
p->ptrace = 0;
/*
* If it was at a trace stop, turn it into
* a normal stop since it's no longer being
* traced.
*/
ptrace_untrace(p);
}
}
/* If this is a threaded reparent there is no need to
* notify anyone anything has happened.
*/
if (p->real_parent->group_leader == father->group_leader)
return;
/* We don't want people slaying init. */
if (p->exit_signal != -1)
p->exit_signal = SIGCHLD;
/* If we'd notified the old parent about this child's death,
* also notify the new parent.
*/
if (!traced && p->exit_state == EXIT_ZOMBIE &&
p->exit_signal != -1 && thread_group_empty(p))
do_notify_parent(p, p->exit_signal);
}
/*
* When we die, we re-parent all our children.
* Try to give them to another thread in our thread
* group, and if no such member exists, give it to
* the child reaper process (ie "init") in our pid
* space.
static void forget_original_parent(struct task_struct *father)
Matthias Kaehlcke
committed
struct task_struct *p, *n, *reaper = father;
struct list_head ptrace_dead;
INIT_LIST_HEAD(&ptrace_dead);
write_lock_irq(&tasklist_lock);
do {
reaper = next_thread(reaper);
if (reaper == father) {
reaper = task_child_reaper(father);
} while (reaper->flags & PF_EXITING);
/*
* There are only two places where our children can be:
*
* - in our child list
* - in our ptraced child list
*
* Search them and reparent children.
*/
Matthias Kaehlcke
committed
list_for_each_entry_safe(p, n, &father->children, sibling) {
int ptrace;
ptrace = p->ptrace;
/* if father isn't the real parent, then ptrace must be enabled */
BUG_ON(father != p->real_parent && !ptrace);
if (father == p->real_parent) {
/* reparent with a reaper, real father it's us */
p->real_parent = reaper;
reparent_thread(p, father, 0);
} else {
/* reparent ptraced task to its real parent */
__ptrace_unlink (p);
if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
thread_group_empty(p))
do_notify_parent(p, p->exit_signal);
}
/*
* if the ptraced child is a zombie with exit_signal == -1
* we must collect it before we exit, or it will remain
* zombie forever since we prevented it from self-reap itself
* while it was being traced by us, to be able to see it in wait4.
*/
if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
list_add(&p->ptrace_list, &ptrace_dead);
Matthias Kaehlcke
committed
list_for_each_entry_safe(p, n, &father->ptrace_children, ptrace_list) {
p->real_parent = reaper;
write_unlock_irq(&tasklist_lock);
BUG_ON(!list_empty(&father->children));
BUG_ON(!list_empty(&father->ptrace_children));
list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_list) {
list_del_init(&p->ptrace_list);
release_task(p);
}
}
/*
* Send signals to all our closest relatives so that they know
* to properly mourn us..
*/
static void exit_notify(struct task_struct *tsk)
{
int state;
/*
* This does two things:
*
* A. Make init inherit all the child processes
* B. Check to see if any process groups have become orphaned
* as a result of our exiting, and if they have any stopped
* jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
forget_original_parent(tsk);
exit_task_namespaces(tsk);
write_lock_irq(&tasklist_lock);
*
* Thread signals are configurable, but you aren't going to use
* that to send signals to arbitary processes.
* That stops right now.
*
* If the parent exec id doesn't match the exec id we saved
* when we started then we know the parent has changed security
* domain.
*
* If our self_exec id doesn't match our parent_exec_id then
* we have changed execution domain as these two values started
* the same after a fork.
*/
if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
(tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
tsk->self_exec_id != tsk->parent_exec_id)
&& !capable(CAP_KILL))
tsk->exit_signal = SIGCHLD;
/* If something other than our normal parent is ptracing us, then
* send it a SIGCHLD instead of honoring exit_signal. exit_signal
* only has special meaning to our real parent.
*/
if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
do_notify_parent(tsk, signal);
} else if (tsk->ptrace) {
do_notify_parent(tsk, SIGCHLD);
}
state = EXIT_ZOMBIE;
if (tsk->exit_signal == -1 && likely(!tsk->ptrace))
if (thread_group_leader(tsk) &&
tsk->signal->notify_count < 0 &&
tsk->signal->group_exit_task)
wake_up_process(tsk->signal->group_exit_task);
write_unlock_irq(&tasklist_lock);
/* If the process is dead, release it - nobody will wait for it */
if (state == EXIT_DEAD)
release_task(tsk);
}
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
#ifdef CONFIG_DEBUG_STACK_USAGE
static void check_stack_usage(void)
{
static DEFINE_SPINLOCK(low_water_lock);
static int lowest_to_date = THREAD_SIZE;
unsigned long *n = end_of_stack(current);
unsigned long free;
while (*n == 0)
n++;
free = (unsigned long)n - (unsigned long)end_of_stack(current);
if (free >= lowest_to_date)
return;
spin_lock(&low_water_lock);
if (free < lowest_to_date) {
printk(KERN_WARNING "%s used greatest stack depth: %lu bytes "
"left\n",
current->comm, free);
lowest_to_date = free;
}
spin_unlock(&low_water_lock);
}
#else
static inline void check_stack_usage(void) {}
#endif
static inline void exit_child_reaper(struct task_struct *tsk)
{
if (likely(tsk->group_leader != task_child_reaper(tsk)))
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
if (tsk->nsproxy->pid_ns == &init_pid_ns)
panic("Attempted to kill init!");
/*
* @tsk is the last thread in the 'cgroup-init' and is exiting.
* Terminate all remaining processes in the namespace and reap them
* before exiting @tsk.
*
* Note that @tsk (last thread of cgroup-init) may not necessarily
* be the child-reaper (i.e main thread of cgroup-init) of the
* namespace i.e the child_reaper may have already exited.
*
* Even after a child_reaper exits, we let it inherit orphaned children,
* because, pid_ns->child_reaper remains valid as long as there is
* at least one living sub-thread in the cgroup init.
* This living sub-thread of the cgroup-init will be notified when
* a child inherited by the 'child-reaper' exits (do_notify_parent()
* uses __group_send_sig_info()). Further, when reaping child processes,
* do_wait() iterates over children of all living sub threads.
* i.e even though 'child_reaper' thread is listed as the parent of the
* orphaned children, any living sub-thread in the cgroup-init can
* perform the role of the child_reaper.
*/
zap_pid_ns_processes(tsk->nsproxy->pid_ns);
NORET_TYPE void do_exit(long code)
{
struct task_struct *tsk = current;
int group_dead;
profile_task_exit(tsk);
WARN_ON(atomic_read(&tsk->fs_excl));
if (unlikely(in_interrupt()))
panic("Aiee, killing interrupt handler!");
if (unlikely(!tsk->pid))
panic("Attempted to kill the idle task!");
if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
current->ptrace_message = code;
ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP);
}
/*
* We're taking recursive faults here in do_exit. Safest is to just
* leave this task alone and wait for reboot.
*/
if (unlikely(tsk->flags & PF_EXITING)) {
printk(KERN_ALERT
"Fixing recursive fault but reboot is needed!\n");
/*
* We can do this unlocked here. The futex code uses
* this flag just to verify whether the pi state
* cleanup has been done or not. In the worst case it
* loops once more. We pretend that the cleanup was
* done as there is no way to return. Either the
* OWNER_DIED bit is set by now or we push the blocked
* task into the wait for ever nirwana as well.
*/
tsk->flags |= PF_EXITPIDONE;
if (tsk->io_context)
exit_io_context();
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
/*
* tsk->flags are checked in the futex code to protect against
* an exiting task cleaning up the robust pi futexes.
*/
smp_mb();
spin_unlock_wait(&tsk->pi_lock);
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, task_pid_nr(current),
if (tsk->mm) {
update_hiwater_rss(tsk->mm);
update_hiwater_vm(tsk->mm);
}
exit_child_reaper(tsk);
hrtimer_cancel(&tsk->signal->real_timer);
exit_itimers(tsk->signal);
KaiGai Kohei
committed
acct_collect(code, group_dead);
if (unlikely(tsk->robust_list))
exit_robust_list(tsk);
if (unlikely(tsk->compat_robust_list))
compat_exit_robust_list(tsk);
if (unlikely(tsk->audit_context))
audit_free(tsk);
tsk->exit_code = code;
taskstats_exit(tsk, group_dead);
KaiGai Kohei
committed
acct_process();
exit_sem(tsk);
__exit_files(tsk);
__exit_fs(tsk);
check_stack_usage();
exit_keys(tsk);
if (group_dead && tsk->signal->leader)
disassociate_ctty(1);
module_put(task_thread_info(tsk)->exec_domain->module);
if (tsk->binfmt)
module_put(tsk->binfmt->module);
#ifdef CONFIG_NUMA
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
/*
* This must happen late, after the PID is not
* hashed anymore:
*/
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
if (unlikely(current->pi_state_cache))
kfree(current->pi_state_cache);