diff options
author | Jeff Layton <jlayton@primarydata.com> | 2015-01-16 15:05:54 -0500 |
---|---|---|
committer | Jeff Layton <jlayton@primarydata.com> | 2015-01-16 15:05:54 -0500 |
commit | 4a075e39c86490cc0f0c10ac6abe3592d1689463 (patch) | |
tree | 8da8633f9f717128c02a08ad15b7d9f067091acb /fs/locks.c | |
parent | dd459bb1974c5e9cff3dfbf4f6fdb3e9363ef32e (diff) | |
download | lwn-4a075e39c86490cc0f0c10ac6abe3592d1689463.tar.gz lwn-4a075e39c86490cc0f0c10ac6abe3592d1689463.zip |
locks: add a new struct file_locking_context pointer to struct inode
The current scheme of using the i_flock list is really difficult to
manage. There is also a legitimate desire for a per-inode spinlock to
manage these lists that isn't the i_lock.
Start conversion to a new scheme to eventually replace the old i_flock
list with a new "file_lock_context" object.
We start by adding a new i_flctx to struct inode. For now, it lives in
parallel with i_flock list, but will eventually replace it. The idea is
to allocate a structure to sit in that pointer and act as a locus for
all things file locking.
We allocate a file_lock_context for an inode when the first lock is
added to it, and it's only freed when the inode is freed. We use the
i_lock to protect the assignment, but afterward it should mostly be
accessed locklessly.
Signed-off-by: Jeff Layton <jlayton@primarydata.com>
Acked-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'fs/locks.c')
-rw-r--r-- | fs/locks.c | 44 |
1 files changed, 44 insertions, 0 deletions
diff --git a/fs/locks.c b/fs/locks.c index ae1e7cf721d6..526d5fca67c8 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -202,8 +202,49 @@ static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS); */ static DEFINE_SPINLOCK(blocked_lock_lock); +static struct kmem_cache *flctx_cache __read_mostly; static struct kmem_cache *filelock_cache __read_mostly; +static struct file_lock_context * +locks_get_lock_context(struct inode *inode) +{ + struct file_lock_context *new; + + if (likely(inode->i_flctx)) + goto out; + + new = kmem_cache_alloc(flctx_cache, GFP_KERNEL); + if (!new) + goto out; + + INIT_LIST_HEAD(&new->flc_flock); + + /* + * Assign the pointer if it's not already assigned. If it is, then + * free the context we just allocated. + */ + spin_lock(&inode->i_lock); + if (likely(!inode->i_flctx)) { + inode->i_flctx = new; + new = NULL; + } + spin_unlock(&inode->i_lock); + + if (new) + kmem_cache_free(flctx_cache, new); +out: + return inode->i_flctx; +} + +void +locks_free_lock_context(struct file_lock_context *ctx) +{ + if (ctx) { + WARN_ON_ONCE(!list_empty(&ctx->flc_flock)); + kmem_cache_free(flctx_cache, ctx); + } +} + static void locks_init_lock_heads(struct file_lock *fl) { INIT_HLIST_NODE(&fl->fl_link); @@ -2636,6 +2677,9 @@ static int __init filelock_init(void) { int i; + flctx_cache = kmem_cache_create("file_lock_ctx", + sizeof(struct file_lock_context), 0, SLAB_PANIC, NULL); + filelock_cache = kmem_cache_create("file_lock_cache", sizeof(struct file_lock), 0, SLAB_PANIC, NULL); |