Commit 5f921ae9 authored by Ingo Molnar's avatar Ingo Molnar Committed by Linus Torvalds

[PATCH] sem2mutex: ipc, id.sem

Semaphore to mutex conversion.

The conversion was generated via scripts, and the result was validated
automatically via a script as well.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: default avatarLee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 14cc3e2b
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <asm/semaphore.h> #include <linux/mutex.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "util.h" #include "util.h"
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include <linux/netlink.h> #include <linux/netlink.h>
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/signal.h> #include <linux/signal.h>
#include <linux/mutex.h>
#include <net/sock.h> #include <net/sock.h>
#include "util.h" #include "util.h"
...@@ -760,7 +762,7 @@ out_unlock: ...@@ -760,7 +762,7 @@ out_unlock:
* The receiver accepts the message and returns without grabbing the queue * The receiver accepts the message and returns without grabbing the queue
* spinlock. Therefore an intermediate STATE_PENDING state and memory barriers * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
* are necessary. The same algorithm is used for sysv semaphores, see * are necessary. The same algorithm is used for sysv semaphores, see
* ipc/sem.c fore more details. * ipc/mutex.c fore more details.
* *
* The same algorithm is used for senders. * The same algorithm is used for senders.
*/ */
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <linux/syscalls.h> #include <linux/syscalls.h>
#include <linux/audit.h> #include <linux/audit.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/mutex.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "util.h" #include "util.h"
...@@ -179,8 +181,8 @@ static void expunge_all(struct msg_queue* msq, int res) ...@@ -179,8 +181,8 @@ static void expunge_all(struct msg_queue* msq, int res)
* removes the message queue from message queue ID * removes the message queue from message queue ID
* array, and cleans up all the messages associated with this queue. * array, and cleans up all the messages associated with this queue.
* *
* msg_ids.sem and the spinlock for this message queue is hold * msg_ids.mutex and the spinlock for this message queue is hold
* before freeque() is called. msg_ids.sem remains locked on exit. * before freeque() is called. msg_ids.mutex remains locked on exit.
*/ */
static void freeque (struct msg_queue *msq, int id) static void freeque (struct msg_queue *msq, int id)
{ {
...@@ -208,7 +210,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg) ...@@ -208,7 +210,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg)
int id, ret = -EPERM; int id, ret = -EPERM;
struct msg_queue *msq; struct msg_queue *msq;
down(&msg_ids.sem); mutex_lock(&msg_ids.mutex);
if (key == IPC_PRIVATE) if (key == IPC_PRIVATE)
ret = newque(key, msgflg); ret = newque(key, msgflg);
else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */
...@@ -231,7 +233,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg) ...@@ -231,7 +233,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg)
} }
msg_unlock(msq); msg_unlock(msq);
} }
up(&msg_ids.sem); mutex_unlock(&msg_ids.mutex);
return ret; return ret;
} }
...@@ -361,7 +363,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) ...@@ -361,7 +363,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
msginfo.msgmnb = msg_ctlmnb; msginfo.msgmnb = msg_ctlmnb;
msginfo.msgssz = MSGSSZ; msginfo.msgssz = MSGSSZ;
msginfo.msgseg = MSGSEG; msginfo.msgseg = MSGSEG;
down(&msg_ids.sem); mutex_lock(&msg_ids.mutex);
if (cmd == MSG_INFO) { if (cmd == MSG_INFO) {
msginfo.msgpool = msg_ids.in_use; msginfo.msgpool = msg_ids.in_use;
msginfo.msgmap = atomic_read(&msg_hdrs); msginfo.msgmap = atomic_read(&msg_hdrs);
...@@ -372,7 +374,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) ...@@ -372,7 +374,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
msginfo.msgtql = MSGTQL; msginfo.msgtql = MSGTQL;
} }
max_id = msg_ids.max_id; max_id = msg_ids.max_id;
up(&msg_ids.sem); mutex_unlock(&msg_ids.mutex);
if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) if (copy_to_user (buf, &msginfo, sizeof(struct msginfo)))
return -EFAULT; return -EFAULT;
return (max_id < 0) ? 0: max_id; return (max_id < 0) ? 0: max_id;
...@@ -435,7 +437,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) ...@@ -435,7 +437,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
return -EINVAL; return -EINVAL;
} }
down(&msg_ids.sem); mutex_lock(&msg_ids.mutex);
msq = msg_lock(msqid); msq = msg_lock(msqid);
err=-EINVAL; err=-EINVAL;
if (msq == NULL) if (msq == NULL)
...@@ -489,7 +491,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) ...@@ -489,7 +491,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf)
} }
err = 0; err = 0;
out_up: out_up:
up(&msg_ids.sem); mutex_unlock(&msg_ids.mutex);
return err; return err;
out_unlock_up: out_unlock_up:
msg_unlock(msq); msg_unlock(msq);
......
...@@ -75,6 +75,8 @@ ...@@ -75,6 +75,8 @@
#include <linux/audit.h> #include <linux/audit.h>
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/mutex.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "util.h" #include "util.h"
...@@ -139,7 +141,7 @@ void __init sem_init (void) ...@@ -139,7 +141,7 @@ void __init sem_init (void)
* * if it's IN_WAKEUP, then it must wait until the value changes * * if it's IN_WAKEUP, then it must wait until the value changes
* * if it's not -EINTR, then the operation was completed by * * if it's not -EINTR, then the operation was completed by
* update_queue. semtimedop can return queue.status without * update_queue. semtimedop can return queue.status without
* performing any operation on the semaphore array. * performing any operation on the sem array.
* * otherwise it must acquire the spinlock and check what's up. * * otherwise it must acquire the spinlock and check what's up.
* *
* The two-stage algorithm is necessary to protect against the following * The two-stage algorithm is necessary to protect against the following
...@@ -214,7 +216,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) ...@@ -214,7 +216,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg)
if (nsems < 0 || nsems > sc_semmsl) if (nsems < 0 || nsems > sc_semmsl)
return -EINVAL; return -EINVAL;
down(&sem_ids.sem); mutex_lock(&sem_ids.mutex);
if (key == IPC_PRIVATE) { if (key == IPC_PRIVATE) {
err = newary(key, nsems, semflg); err = newary(key, nsems, semflg);
...@@ -242,7 +244,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) ...@@ -242,7 +244,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg)
sem_unlock(sma); sem_unlock(sma);
} }
up(&sem_ids.sem); mutex_unlock(&sem_ids.mutex);
return err; return err;
} }
...@@ -437,8 +439,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) ...@@ -437,8 +439,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum)
return semzcnt; return semzcnt;
} }
/* Free a semaphore set. freeary() is called with sem_ids.sem down and /* Free a semaphore set. freeary() is called with sem_ids.mutex locked and
* the spinlock for this semaphore set hold. sem_ids.sem remains locked * the spinlock for this semaphore set hold. sem_ids.mutex remains locked
* on exit. * on exit.
*/ */
static void freeary (struct sem_array *sma, int id) static void freeary (struct sem_array *sma, int id)
...@@ -525,7 +527,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu ...@@ -525,7 +527,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu
seminfo.semmnu = SEMMNU; seminfo.semmnu = SEMMNU;
seminfo.semmap = SEMMAP; seminfo.semmap = SEMMAP;
seminfo.semume = SEMUME; seminfo.semume = SEMUME;
down(&sem_ids.sem); mutex_lock(&sem_ids.mutex);
if (cmd == SEM_INFO) { if (cmd == SEM_INFO) {
seminfo.semusz = sem_ids.in_use; seminfo.semusz = sem_ids.in_use;
seminfo.semaem = used_sems; seminfo.semaem = used_sems;
...@@ -534,7 +536,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu ...@@ -534,7 +536,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu
seminfo.semaem = SEMAEM; seminfo.semaem = SEMAEM;
} }
max_id = sem_ids.max_id; max_id = sem_ids.max_id;
up(&sem_ids.sem); mutex_unlock(&sem_ids.mutex);
if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo)))
return -EFAULT; return -EFAULT;
return (max_id < 0) ? 0: max_id; return (max_id < 0) ? 0: max_id;
...@@ -885,9 +887,9 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) ...@@ -885,9 +887,9 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
return err; return err;
case IPC_RMID: case IPC_RMID:
case IPC_SET: case IPC_SET:
down(&sem_ids.sem); mutex_lock(&sem_ids.mutex);
err = semctl_down(semid,semnum,cmd,version,arg); err = semctl_down(semid,semnum,cmd,version,arg);
up(&sem_ids.sem); mutex_unlock(&sem_ids.mutex);
return err; return err;
default: default:
return -EINVAL; return -EINVAL;
...@@ -1299,9 +1301,9 @@ found: ...@@ -1299,9 +1301,9 @@ found:
/* perform adjustments registered in u */ /* perform adjustments registered in u */
nsems = sma->sem_nsems; nsems = sma->sem_nsems;
for (i = 0; i < nsems; i++) { for (i = 0; i < nsems; i++) {
struct sem * sem = &sma->sem_base[i]; struct sem * semaphore = &sma->sem_base[i];
if (u->semadj[i]) { if (u->semadj[i]) {
sem->semval += u->semadj[i]; semaphore->semval += u->semadj[i];
/* /*
* Range checks of the new semaphore value, * Range checks of the new semaphore value,
* not defined by sus: * not defined by sus:
...@@ -1315,11 +1317,11 @@ found: ...@@ -1315,11 +1317,11 @@ found:
* *
* Manfred <manfred@colorfullife.com> * Manfred <manfred@colorfullife.com>
*/ */
if (sem->semval < 0) if (semaphore->semval < 0)
sem->semval = 0; semaphore->semval = 0;
if (sem->semval > SEMVMX) if (semaphore->semval > SEMVMX)
sem->semval = SEMVMX; semaphore->semval = SEMVMX;
sem->sempid = current->tgid; semaphore->sempid = current->tgid;
} }
} }
sma->sem_otime = get_seconds(); sma->sem_otime = get_seconds();
......
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/capability.h> #include <linux/capability.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/mutex.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -109,7 +110,7 @@ static void shm_open (struct vm_area_struct *shmd) ...@@ -109,7 +110,7 @@ static void shm_open (struct vm_area_struct *shmd)
* *
* @shp: struct to free * @shp: struct to free
* *
* It has to be called with shp and shm_ids.sem locked, * It has to be called with shp and shm_ids.mutex locked,
* but returns with shp unlocked and freed. * but returns with shp unlocked and freed.
*/ */
static void shm_destroy (struct shmid_kernel *shp) static void shm_destroy (struct shmid_kernel *shp)
...@@ -139,7 +140,7 @@ static void shm_close (struct vm_area_struct *shmd) ...@@ -139,7 +140,7 @@ static void shm_close (struct vm_area_struct *shmd)
int id = file->f_dentry->d_inode->i_ino; int id = file->f_dentry->d_inode->i_ino;
struct shmid_kernel *shp; struct shmid_kernel *shp;
down (&shm_ids.sem); mutex_lock(&shm_ids.mutex);
/* remove from the list of attaches of the shm segment */ /* remove from the list of attaches of the shm segment */
if(!(shp = shm_lock(id))) if(!(shp = shm_lock(id)))
BUG(); BUG();
...@@ -151,7 +152,7 @@ static void shm_close (struct vm_area_struct *shmd) ...@@ -151,7 +152,7 @@ static void shm_close (struct vm_area_struct *shmd)
shm_destroy (shp); shm_destroy (shp);
else else
shm_unlock(shp); shm_unlock(shp);
up (&shm_ids.sem); mutex_unlock(&shm_ids.mutex);
} }
static int shm_mmap(struct file * file, struct vm_area_struct * vma) static int shm_mmap(struct file * file, struct vm_area_struct * vma)
...@@ -270,7 +271,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) ...@@ -270,7 +271,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
struct shmid_kernel *shp; struct shmid_kernel *shp;
int err, id = 0; int err, id = 0;
down(&shm_ids.sem); mutex_lock(&shm_ids.mutex);
if (key == IPC_PRIVATE) { if (key == IPC_PRIVATE) {
err = newseg(key, shmflg, size); err = newseg(key, shmflg, size);
} else if ((id = ipc_findkey(&shm_ids, key)) == -1) { } else if ((id = ipc_findkey(&shm_ids, key)) == -1) {
...@@ -296,7 +297,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) ...@@ -296,7 +297,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
} }
shm_unlock(shp); shm_unlock(shp);
} }
up(&shm_ids.sem); mutex_unlock(&shm_ids.mutex);
return err; return err;
} }
...@@ -467,14 +468,14 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) ...@@ -467,14 +468,14 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
return err; return err;
memset(&shm_info,0,sizeof(shm_info)); memset(&shm_info,0,sizeof(shm_info));
down(&shm_ids.sem); mutex_lock(&shm_ids.mutex);
shm_info.used_ids = shm_ids.in_use; shm_info.used_ids = shm_ids.in_use;
shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp);
shm_info.shm_tot = shm_tot; shm_info.shm_tot = shm_tot;
shm_info.swap_attempts = 0; shm_info.swap_attempts = 0;
shm_info.swap_successes = 0; shm_info.swap_successes = 0;
err = shm_ids.max_id; err = shm_ids.max_id;
up(&shm_ids.sem); mutex_unlock(&shm_ids.mutex);
if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
err = -EFAULT; err = -EFAULT;
goto out; goto out;
...@@ -583,7 +584,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) ...@@ -583,7 +584,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
* Instead we set a destroyed flag, and then blow * Instead we set a destroyed flag, and then blow
* the name away when the usage hits zero. * the name away when the usage hits zero.
*/ */
down(&shm_ids.sem); mutex_lock(&shm_ids.mutex);
shp = shm_lock(shmid); shp = shm_lock(shmid);
err = -EINVAL; err = -EINVAL;
if (shp == NULL) if (shp == NULL)
...@@ -610,7 +611,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) ...@@ -610,7 +611,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
shm_unlock(shp); shm_unlock(shp);
} else } else
shm_destroy (shp); shm_destroy (shp);
up(&shm_ids.sem); mutex_unlock(&shm_ids.mutex);
goto out; goto out;
} }
...@@ -620,12 +621,13 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) ...@@ -620,12 +621,13 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
err = -EFAULT; err = -EFAULT;
goto out; goto out;
} }
down(&shm_ids.sem); mutex_lock(&shm_ids.mutex);
shp = shm_lock(shmid); shp = shm_lock(shmid);
err=-EINVAL; err=-EINVAL;
if(shp==NULL) if(shp==NULL)
goto out_up; goto out_up;
if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode, &(shp->shm_perm)))) if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid,
setbuf.mode, &(shp->shm_perm))))
goto out_unlock_up; goto out_unlock_up;
err = shm_checkid(shp,shmid); err = shm_checkid(shp,shmid);
if(err) if(err)
...@@ -658,7 +660,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) ...@@ -658,7 +660,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
out_unlock_up: out_unlock_up:
shm_unlock(shp); shm_unlock(shp);
out_up: out_up:
up(&shm_ids.sem); mutex_unlock(&shm_ids.mutex);
goto out; goto out;
out_unlock: out_unlock:
shm_unlock(shp); shm_unlock(shp);
...@@ -771,7 +773,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) ...@@ -771,7 +773,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
invalid: invalid:
up_write(&current->mm->mmap_sem); up_write(&current->mm->mmap_sem);
down (&shm_ids.sem); mutex_lock(&shm_ids.mutex);
if(!(shp = shm_lock(shmid))) if(!(shp = shm_lock(shmid)))
BUG(); BUG();
shp->shm_nattch--; shp->shm_nattch--;
...@@ -780,7 +782,7 @@ invalid: ...@@ -780,7 +782,7 @@ invalid:
shm_destroy (shp); shm_destroy (shp);
else else
shm_unlock(shp); shm_unlock(shp);
up (&shm_ids.sem); mutex_unlock(&shm_ids.mutex);
*raddr = (unsigned long) user_addr; *raddr = (unsigned long) user_addr;
err = 0; err = 0;
......
...@@ -68,7 +68,8 @@ __initcall(ipc_init); ...@@ -68,7 +68,8 @@ __initcall(ipc_init);
void __init ipc_init_ids(struct ipc_ids* ids, int size) void __init ipc_init_ids(struct ipc_ids* ids, int size)
{ {
int i; int i;
sema_init(&ids->sem,1);
mutex_init(&ids->mutex);
if(size > IPCMNI) if(size > IPCMNI)
size = IPCMNI; size = IPCMNI;
...@@ -138,7 +139,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, ...@@ -138,7 +139,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
* @ids: Identifier set * @ids: Identifier set
* @key: The key to find * @key: The key to find
* *
* Requires ipc_ids.sem locked. * Requires ipc_ids.mutex locked.
* Returns the identifier if found or -1 if not. * Returns the identifier if found or -1 if not.
*/ */
...@@ -150,7 +151,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key) ...@@ -150,7 +151,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key)
/* /*
* rcu_dereference() is not needed here * rcu_dereference() is not needed here
* since ipc_ids.sem is held * since ipc_ids.mutex is held
*/ */
for (id = 0; id <= max_id; id++) { for (id = 0; id <= max_id; id++) {
p = ids->entries->p[id]; p = ids->entries->p[id];
...@@ -163,7 +164,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key) ...@@ -163,7 +164,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key)
} }
/* /*
* Requires ipc_ids.sem locked * Requires ipc_ids.mutex locked
*/ */
static int grow_ary(struct ipc_ids* ids, int newsize) static int grow_ary(struct ipc_ids* ids, int newsize)
{ {
...@@ -210,7 +211,7 @@ static int grow_ary(struct ipc_ids* ids, int newsize) ...@@ -210,7 +211,7 @@ static int grow_ary(struct ipc_ids* ids, int newsize)
* is returned. The list is returned in a locked state on success. * is returned. The list is returned in a locked state on success.
* On failure the list is not locked and -1 is returned. * On failure the list is not locked and -1 is returned.
* *
* Called with ipc_ids.sem held. * Called with ipc_ids.mutex held.
*/ */
int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
...@@ -221,7 +222,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) ...@@ -221,7 +222,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
/* /*
* rcu_dereference()() is not needed here since * rcu_dereference()() is not needed here since
* ipc_ids.sem is held * ipc_ids.mutex is held
*/ */
for (id = 0; id < size; id++) { for (id = 0; id < size; id++) {
if(ids->entries->p[id] == NULL) if(ids->entries->p[id] == NULL)
...@@ -257,7 +258,7 @@ found: ...@@ -257,7 +258,7 @@ found:
* fed an invalid identifier. The entry is removed and internal * fed an invalid identifier. The entry is removed and internal
* variables recomputed. The object associated with the identifier * variables recomputed. The object associated with the identifier
* is returned. * is returned.
* ipc_ids.sem and the spinlock for this ID is hold before this function * ipc_ids.mutex and the spinlock for this ID is hold before this function
* is called, and remain locked on the exit. * is called, and remain locked on the exit.
*/ */
...@@ -270,7 +271,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) ...@@ -270,7 +271,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
/* /*
* do not need a rcu_dereference()() here to force ordering * do not need a rcu_dereference()() here to force ordering
* on Alpha, since the ipc_ids.sem is held. * on Alpha, since the ipc_ids.mutex is held.
*/ */
p = ids->entries->p[lid]; p = ids->entries->p[lid];
ids->entries->p[lid] = NULL; ids->entries->p[lid] = NULL;
...@@ -530,13 +531,13 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) ...@@ -530,13 +531,13 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
/* /*
* So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get()
* is called with shm_ids.sem locked. Since grow_ary() is also called with * is called with shm_ids.mutex locked. Since grow_ary() is also called with
* shm_ids.sem down(for Shared Memory), there is no need to add read * shm_ids.mutex down(for Shared Memory), there is no need to add read
* barriers here to gurantee the writes in grow_ary() are seen in order * barriers here to gurantee the writes in grow_ary() are seen in order
* here (for Alpha). * here (for Alpha).
* *
* However ipc_get() itself does not necessary require ipc_ids.sem down. So * However ipc_get() itself does not necessary require ipc_ids.mutex down. So
* if in the future ipc_get() is used by other places without ipc_ids.sem * if in the future ipc_get() is used by other places without ipc_ids.mutex
* down, then ipc_get() needs read memery barriers as ipc_lock() does. * down, then ipc_get() needs read memery barriers as ipc_lock() does.
*/ */
struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
...@@ -667,7 +668,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) ...@@ -667,7 +668,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
* Take the lock - this will be released by the corresponding * Take the lock - this will be released by the corresponding
* call to stop(). * call to stop().
*/ */
down(&iface->ids->sem); mutex_lock(&iface->ids->mutex);
/* pos < 0 is invalid */ /* pos < 0 is invalid */
if (*pos < 0) if (*pos < 0)
...@@ -697,7 +698,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it) ...@@ -697,7 +698,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it)