diff options
author | Roberto Bergantinos Corpas <rbergant@redhat.com> | 2020-11-27 19:38:31 +0100 |
---|---|---|
committer | Chuck Lever <chuck.lever@oracle.com> | 2020-12-09 09:38:34 -0500 |
commit | 4b5cff7ed8afcdd35bfff39ab503342900ec80c6 (patch) | |
tree | 13ef5d7ed5efeb40b0fe7c61120703f49cb1761b /net/sunrpc | |
parent | 4420440c57892779f265108f46f83832a88ca795 (diff) | |
download | lwn-4b5cff7ed8afcdd35bfff39ab503342900ec80c6.tar.gz lwn-4b5cff7ed8afcdd35bfff39ab503342900ec80c6.zip |
sunrpc: clean-up cache downcall
We can simplify code around cache_downcall unifying memory
allocations using kvmalloc. This has the benefit of getting rid of
cache_slow_downcall (and queue_io_mutex), and also matches userland
allocation size and limits.
Signed-off-by: Roberto Bergantinos Corpas <rbergant@redhat.com>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/cache.c | 41 |
1 files changed, 11 insertions, 30 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 20c93b68505e..1a2c1c44bb00 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -778,7 +778,6 @@ void cache_clean_deferred(void *owner) */ static DEFINE_SPINLOCK(queue_lock); -static DEFINE_MUTEX(queue_io_mutex); struct cache_queue { struct list_head list; @@ -906,44 +905,26 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf, return ret; } -static ssize_t cache_slow_downcall(const char __user *buf, - size_t count, struct cache_detail *cd) -{ - static char write_buf[32768]; /* protected by queue_io_mutex */ - ssize_t ret = -EINVAL; - - if (count >= sizeof(write_buf)) - goto out; - mutex_lock(&queue_io_mutex); - ret = cache_do_downcall(write_buf, buf, count, cd); - mutex_unlock(&queue_io_mutex); -out: - return ret; -} - static ssize_t cache_downcall(struct address_space *mapping, const char __user *buf, size_t count, struct cache_detail *cd) { - struct page *page; - char *kaddr; + char *write_buf; ssize_t ret = -ENOMEM; - if (count >= PAGE_SIZE) - goto out_slow; + if (count >= 32768) { /* 32k is max userland buffer, lets check anyway */ + ret = -EINVAL; + goto out; + } - page = find_or_create_page(mapping, 0, GFP_KERNEL); - if (!page) - goto out_slow; + write_buf = kvmalloc(count + 1, GFP_KERNEL); + if (!write_buf) + goto out; - kaddr = kmap(page); - ret = cache_do_downcall(kaddr, buf, count, cd); - kunmap(page); - unlock_page(page); - put_page(page); + ret = cache_do_downcall(write_buf, buf, count, cd); + kvfree(write_buf); +out: return ret; -out_slow: - return cache_slow_downcall(buf, count, cd); } static ssize_t cache_write(struct file *filp, const char __user *buf, |