Break the global IPC locks
From: | Mingming cao <cmm@us.ibm.com> | |
To: | lse-tech@lists.sourceforge.net | |
Subject: | [Lse-tech] [PATCH]Break the global IPC locks | |
Date: | Tue, 09 Jul 2002 18:21:22 -0700 |
Currently the use of three type of IPC resources(semaphores, message queues and shared memory segments) are synchronized by three global locks respectively. This means, for example, all semaphores are going to share one global lock. Although the IPC locks are not highly used as the BKL, on some applications which use lots of IPC resources(e.g. IPC semaphores), there could be contention. It makes more sense to me if we break the one globale lock for all semaphores into one lock per sempahore id(same for message queue id/shared memory id). I attached a patch which implements this idea. Just sharing my thoughts with you. Please send any of your comments to me. Patch applies against 2.5.25, also for 2.4.18. -- Mingming Cao IBM Linux Technology Centerdiff -urN -X ../dontdiff ../base/linux-2.4.17/ipc/util.c ipc/ipc/util.c --- ../base/linux-2.4.17/ipc/util.c Sun Aug 12 17:37:53 2001 +++ ipc/ipc/util.c Mon May 6 15:11:21 2002 @@ -74,9 +74,11 @@ printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n"); ids->size = 0; } - ids->ary = SPIN_LOCK_UNLOCKED; - for(i=0;i<ids->size;i++) + ids->ary =RW_LOCK_UNLOCKED; + for(i=0;i<ids->size;i++) { ids->entries[i].p = NULL; + ids->entries[i].lock = SPIN_LOCK_UNLOCKED; + } } /** @@ -119,14 +121,15 @@ memcpy(new, ids->entries, sizeof(struct ipc_id)*ids->size); for(i=ids->size;i<newsize;i++) { new[i].p = NULL; + new[i].lock = SPIN_LOCK_UNLOCKED; } - spin_lock(&ids->ary); + write_lock(&ids->ary); old = ids->entries; ids->entries = new; i = ids->size; ids->size = newsize; - spin_unlock(&ids->ary); + write_unlock(&ids->ary); ipc_free(old, sizeof(struct ipc_id)*i); return ids->size; } @@ -165,7 +168,8 @@ if(ids->seq > ids->seq_max) ids->seq = 0; - spin_lock(&ids->ary); + read_lock(&ids->ary); + spin_lock(&ids->entries[id].lock); ids->entries[id].p = new; return id; } diff -urN -X ../dontdiff ../base/linux-2.4.17/ipc/util.h ipc/ipc/util.h --- ../base/linux-2.4.17/ipc/util.h Mon Feb 19 10:18:18 2001 +++ ipc/ipc/util.h Mon May 6 15:14:39 2002 @@ -19,12 +19,13 @@ unsigned short seq; unsigned short seq_max; struct semaphore sem; - spinlock_t ary; + rwlock_t ary; struct ipc_id* entries; }; struct ipc_id { struct kern_ipc_perm* p; + spinlock_t lock; }; @@ -47,7 +48,7 @@ extern inline void ipc_lockall(struct ipc_ids* ids) { - spin_lock(&ids->ary); + read_lock(&ids->ary); } extern inline struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) @@ -63,7 +64,7 @@ extern inline void ipc_unlockall(struct ipc_ids* ids) { - spin_unlock(&ids->ary); + read_unlock(&ids->ary); } extern inline struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id) { @@ -72,16 +73,22 @@ if(lid >= ids->size) return NULL; - spin_lock(&ids->ary); + read_lock(&ids->ary); + spin_lock(&ids->entries[lid].lock); out = ids->entries[lid].p; - if(out==NULL) - spin_unlock(&ids->ary); + if(out==NULL) { + spin_unlock(&ids->entries[lid].lock); + read_unlock(&ids->ary); + } return out; } extern inline void ipc_unlock(struct ipc_ids* ids, int id) { - spin_unlock(&ids->ary); + int lid = id % SEQ_MULTIPLIER; + if(lid < ids->size) + spin_unlock(&ids->entries[lid].lock); + read_unlock(&ids->ary); } extern inline int ipc_buildid(struct ipc_ids* ids, int id, int seq)