summaryrefslogtreecommitdiff
path: root/fs/locks.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2010-10-27 15:46:08 +0200
committerArnd Bergmann <arnd@arndb.de>2010-10-27 21:41:50 +0200
commitc5b1f0d92c36851aca09ac6c7c0c4f9690ac14f3 (patch)
treec8aa4ad65aea3b97292135a4c23d512e6071dc8d /fs/locks.c
parenta282a1fa6b23bd21ba0b86e53ed2a316b001836f (diff)
downloadlwn-c5b1f0d92c36851aca09ac6c7c0c4f9690ac14f3.tar.gz
lwn-c5b1f0d92c36851aca09ac6c7c0c4f9690ac14f3.zip
locks/nfsd: allocate file lock outside of spinlock
As suggested by Christoph Hellwig, this moves allocation of new file locks out of generic_setlease into the callers, nfs4_open_delegation and fcntl_setlease in order to allow GFP_KERNEL allocations when lock_flocks has become a spinlock. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Acked-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/locks.c')
-rw-r--r--fs/locks.c36
1 files changed, 12 insertions, 24 deletions
diff --git a/fs/locks.c b/fs/locks.c
index 8b2b6ad56a09..0391d2ff5a4e 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -162,10 +162,11 @@ EXPORT_SYMBOL_GPL(unlock_flocks);
static struct kmem_cache *filelock_cache __read_mostly;
/* Allocate an empty lock structure. */
-static struct file_lock *locks_alloc_lock(void)
+struct file_lock *locks_alloc_lock(void)
{
return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
}
+EXPORT_SYMBOL_GPL(locks_alloc_lock);
void locks_release_private(struct file_lock *fl)
{
@@ -1365,7 +1366,6 @@ int fcntl_getlease(struct file *filp)
int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
{
struct file_lock *fl, **before, **my_before = NULL, *lease;
- struct file_lock *new_fl = NULL;
struct dentry *dentry = filp->f_path.dentry;
struct inode *inode = dentry->d_inode;
int error, rdlease_count = 0, wrlease_count = 0;
@@ -1385,11 +1385,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
lease = *flp;
if (arg != F_UNLCK) {
- error = -ENOMEM;
- new_fl = locks_alloc_lock();
- if (new_fl == NULL)
- goto out;
-
error = -EAGAIN;
if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0))
goto out;
@@ -1434,7 +1429,6 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
goto out;
}
- error = 0;
if (arg == F_UNLCK)
goto out;
@@ -1442,15 +1436,11 @@ int generic_setlease(struct file *filp, long arg, struct file_lock **flp)
if (!leases_enable)
goto out;
- locks_copy_lock(new_fl, lease);
- locks_insert_lock(before, new_fl);
-
- *flp = new_fl;
+ locks_insert_lock(before, lease);
return 0;
out:
- if (new_fl != NULL)
- locks_free_lock(new_fl);
+ locks_free_lock(lease);
return error;
}
EXPORT_SYMBOL(generic_setlease);
@@ -1514,26 +1504,24 @@ EXPORT_SYMBOL_GPL(vfs_setlease);
*/
int fcntl_setlease(unsigned int fd, struct file *filp, long arg)
{
- struct file_lock fl, *flp = &fl;
+ struct file_lock *fl;
struct inode *inode = filp->f_path.dentry->d_inode;
int error;
- locks_init_lock(&fl);
- error = lease_init(filp, arg, &fl);
- if (error)
- return error;
+ fl = lease_alloc(filp, arg);
+ if (IS_ERR(fl))
+ return PTR_ERR(fl);
lock_flocks();
-
- error = __vfs_setlease(filp, arg, &flp);
+ error = __vfs_setlease(filp, arg, &fl);
if (error || arg == F_UNLCK)
goto out_unlock;
- error = fasync_helper(fd, filp, 1, &flp->fl_fasync);
+ error = fasync_helper(fd, filp, 1, &fl->fl_fasync);
if (error < 0) {
/* remove lease just inserted by setlease */
- flp->fl_type = F_UNLCK | F_INPROGRESS;
- flp->fl_break_time = jiffies - 10;
+ fl->fl_type = F_UNLCK | F_INPROGRESS;
+ fl->fl_break_time = jiffies - 10;
time_out_leases(inode);
goto out_unlock;
}