diff options
author | Eric Dumazet <dada1@cosmosbay.com> | 2006-03-26 01:37:24 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-26 08:56:56 -0800 |
commit | fa3536cc144c1298f2ed9416c33f3b77fa2cd37a (patch) | |
tree | 5484541319b86ae7dac0def4db7925f7cc7008e7 /fs | |
parent | 878a9f30d7b13015f3aa4534d7877d985f150183 (diff) | |
download | lwn-fa3536cc144c1298f2ed9416c33f3b77fa2cd37a.tar.gz lwn-fa3536cc144c1298f2ed9416c33f3b77fa2cd37a.zip |
[PATCH] Use __read_mostly on some hot fs variables
I discovered on oprofile hunting on a SMP platform that dentry lookups were
slowed down because d_hash_mask, d_hash_shift and dentry_hashtable were in
a cache line that contained inodes_stat. So each time inodes_stats is
changed by a cpu, other cpus have to refill their cache line.
This patch moves some variables to the __read_mostly section, in order to
avoid false sharing. RCU dentry lookups can go full speed.
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/bio.c | 4 | ||||
-rw-r--r-- | fs/block_dev.c | 4 | ||||
-rw-r--r-- | fs/dcache.c | 14 | ||||
-rw-r--r-- | fs/dcookies.c | 6 | ||||
-rw-r--r-- | fs/dnotify.c | 4 | ||||
-rw-r--r-- | fs/eventpoll.c | 6 | ||||
-rw-r--r-- | fs/fcntl.c | 4 | ||||
-rw-r--r-- | fs/inode.c | 8 | ||||
-rw-r--r-- | fs/inotify.c | 12 | ||||
-rw-r--r-- | fs/locks.c | 2 | ||||
-rw-r--r-- | fs/namespace.c | 4 | ||||
-rw-r--r-- | fs/pipe.c | 2 |
12 files changed, 35 insertions, 35 deletions
@@ -30,7 +30,7 @@ #define BIO_POOL_SIZE 256 -static kmem_cache_t *bio_slab; +static kmem_cache_t *bio_slab __read_mostly; #define BIOVEC_NR_POOLS 6 @@ -39,7 +39,7 @@ static kmem_cache_t *bio_slab; * basically we just need to survive */ #define BIO_SPLIT_ENTRIES 8 -mempool_t *bio_split_pool; +mempool_t *bio_split_pool __read_mostly; struct biovec_slab { int nr_vecs; diff --git a/fs/block_dev.c b/fs/block_dev.c index 573fc8e0b67a..9a451a9ffad4 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -234,7 +234,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) */ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); -static kmem_cache_t * bdev_cachep; +static kmem_cache_t * bdev_cachep __read_mostly; static struct inode *bdev_alloc_inode(struct super_block *sb) { @@ -308,7 +308,7 @@ static struct file_system_type bd_type = { .kill_sb = kill_anon_super, }; -static struct vfsmount *bd_mnt; +static struct vfsmount *bd_mnt __read_mostly; struct super_block *blockdev_superblock; void __init bdev_cache_init(void) diff --git a/fs/dcache.c b/fs/dcache.c index 939584648504..aaca5e7970bc 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -36,7 +36,7 @@ /* #define DCACHE_DEBUG 1 */ -int sysctl_vfs_cache_pressure = 100; +int sysctl_vfs_cache_pressure __read_mostly = 100; EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); @@ -44,7 +44,7 @@ static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; EXPORT_SYMBOL(dcache_lock); -static kmem_cache_t *dentry_cache; +static kmem_cache_t *dentry_cache __read_mostly; #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) @@ -59,9 +59,9 @@ static kmem_cache_t *dentry_cache; #define D_HASHBITS d_hash_shift #define D_HASHMASK d_hash_mask -static unsigned int d_hash_mask; -static unsigned int d_hash_shift; -static struct hlist_head *dentry_hashtable; +static unsigned int d_hash_mask __read_mostly; +static unsigned int d_hash_shift __read_mostly; +static struct hlist_head *dentry_hashtable __read_mostly; static LIST_HEAD(dentry_unused); /* Statistics gathering. */ @@ -1719,10 +1719,10 @@ static void __init dcache_init(unsigned long mempages) } /* SLAB cache for __getname() consumers */ -kmem_cache_t *names_cachep; +kmem_cache_t *names_cachep __read_mostly; /* SLAB cache for file structures */ -kmem_cache_t *filp_cachep; +kmem_cache_t *filp_cachep __read_mostly; EXPORT_SYMBOL(d_genocide); diff --git a/fs/dcookies.c b/fs/dcookies.c index ef758cfa5565..8749339bf4f6 100644 --- a/fs/dcookies.c +++ b/fs/dcookies.c @@ -38,9 +38,9 @@ struct dcookie_struct { static LIST_HEAD(dcookie_users); static DEFINE_MUTEX(dcookie_mutex); -static kmem_cache_t * dcookie_cache; -static struct list_head * dcookie_hashtable; -static size_t hash_size; +static kmem_cache_t *dcookie_cache __read_mostly; +static struct list_head *dcookie_hashtable __read_mostly; +static size_t hash_size __read_mostly; static inline int is_live(void) { diff --git a/fs/dnotify.c b/fs/dnotify.c index f3b540dd5d11..f932591df5a4 100644 --- a/fs/dnotify.c +++ b/fs/dnotify.c @@ -21,9 +21,9 @@ #include <linux/spinlock.h> #include <linux/slab.h> -int dir_notify_enable = 1; +int dir_notify_enable __read_mostly = 1; -static kmem_cache_t *dn_cache; +static kmem_cache_t *dn_cache __read_mostly; static void redo_inode_mask(struct inode *inode) { diff --git a/fs/eventpoll.c b/fs/eventpoll.c index a0f682cdd03e..e067a06c6464 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -281,13 +281,13 @@ static struct mutex epmutex; static struct poll_safewake psw; /* Slab cache used to allocate "struct epitem" */ -static kmem_cache_t *epi_cache; +static kmem_cache_t *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ -static kmem_cache_t *pwq_cache; +static kmem_cache_t *pwq_cache __read_mostly; /* Virtual fs used to allocate inodes for eventpoll files */ -static struct vfsmount *eventpoll_mnt; +static struct vfsmount *eventpoll_mnt __read_mostly; /* File callbacks that implement the eventpoll file behaviour */ static struct file_operations eventpoll_fops = { diff --git a/fs/fcntl.c b/fs/fcntl.c index 03c789560fb8..2a2479196f96 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -412,7 +412,7 @@ out: /* Table to convert sigio signal codes into poll band bitmaps */ -static long band_table[NSIGPOLL] = { +static const long band_table[NSIGPOLL] = { POLLIN | POLLRDNORM, /* POLL_IN */ POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ @@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown) } static DEFINE_RWLOCK(fasync_lock); -static kmem_cache_t *fasync_cache; +static kmem_cache_t *fasync_cache __read_mostly; /* * fasync_helper() is used by some character device drivers (mainly mice) diff --git a/fs/inode.c b/fs/inode.c index 85da11044adc..1fddf2803af8 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -56,8 +56,8 @@ #define I_HASHBITS i_hash_shift #define I_HASHMASK i_hash_mask -static unsigned int i_hash_mask; -static unsigned int i_hash_shift; +static unsigned int i_hash_mask __read_mostly; +static unsigned int i_hash_shift __read_mostly; /* * Each inode can be on two separate lists. One is @@ -73,7 +73,7 @@ static unsigned int i_hash_shift; LIST_HEAD(inode_in_use); LIST_HEAD(inode_unused); -static struct hlist_head *inode_hashtable; +static struct hlist_head *inode_hashtable __read_mostly; /* * A simple spinlock to protect the list manipulations. @@ -98,7 +98,7 @@ static DEFINE_MUTEX(iprune_mutex); */ struct inodes_stat_t inodes_stat; -static kmem_cache_t * inode_cachep; +static kmem_cache_t * inode_cachep __read_mostly; static struct inode *alloc_inode(struct super_block *sb) { diff --git a/fs/inotify.c b/fs/inotify.c index a61e93e17853..f48a3dae0712 100644 --- a/fs/inotify.c +++ b/fs/inotify.c @@ -39,15 +39,15 @@ static atomic_t inotify_cookie; -static kmem_cache_t *watch_cachep; -static kmem_cache_t *event_cachep; +static kmem_cache_t *watch_cachep __read_mostly; +static kmem_cache_t *event_cachep __read_mostly; -static struct vfsmount *inotify_mnt; +static struct vfsmount *inotify_mnt __read_mostly; /* these are configurable via /proc/sys/fs/inotify/ */ -int inotify_max_user_instances; -int inotify_max_user_watches; -int inotify_max_queued_events; +int inotify_max_user_instances __read_mostly; +int inotify_max_user_watches __read_mostly; +int inotify_max_queued_events __read_mostly; /* * Lock ordering: diff --git a/fs/locks.c b/fs/locks.c index 56f996e98bbc..709450a7b89d 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -142,7 +142,7 @@ int lease_break_time = 45; static LIST_HEAD(file_lock_list); static LIST_HEAD(blocked_list); -static kmem_cache_t *filelock_cache; +static kmem_cache_t *filelock_cache __read_mostly; /* Allocate an empty lock structure. */ static struct file_lock *locks_alloc_lock(void) diff --git a/fs/namespace.c b/fs/namespace.c index 71e75bcf4d28..e069a4c5e389 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -43,9 +43,9 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); static int event; -static struct list_head *mount_hashtable; +static struct list_head *mount_hashtable __read_mostly; static int hash_mask __read_mostly, hash_bits __read_mostly; -static kmem_cache_t *mnt_cache; +static kmem_cache_t *mnt_cache __read_mostly; static struct rw_semaphore namespace_sem; /* /sys/fs */ diff --git a/fs/pipe.c b/fs/pipe.c index d976866a115b..4384c9290943 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -675,7 +675,7 @@ fail_page: return NULL; } -static struct vfsmount *pipe_mnt; +static struct vfsmount *pipe_mnt __read_mostly; static int pipefs_delete_dentry(struct dentry *dentry) { return 1; |