summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/android/binder_alloc.c7
-rw-r--r--drivers/android/binder_alloc.h2
-rw-r--r--fs/dcache.c4
-rw-r--r--fs/gfs2/quota.c2
-rw-r--r--fs/inode.c4
-rw-r--r--fs/nfs/nfs42xattr.c4
-rw-r--r--fs/nfsd/filecache.c5
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--fs/xfs/xfs_qm.c5
-rw-r--r--include/linux/list_lru.h2
-rw-r--r--mm/list_lru.c2
-rw-r--r--mm/workingset.c15
-rw-r--r--mm/zswap.c4
13 files changed, 25 insertions, 33 deletions
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 86bbe40f4bcd..a738e7745865 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1047,7 +1047,7 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
/**
* binder_alloc_free_page() - shrinker callback to free pages
* @item: item to free
- * @lock: lock protecting the item
+ * @lru: list_lru instance of the item
* @cb_arg: callback argument
*
* Called from list_lru_walk() in binder_shrink_scan() to free
@@ -1055,9 +1055,8 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
*/
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lock,
void *cb_arg)
- __must_hold(lock)
+ __must_hold(&lru->lock)
{
struct binder_lru_page *page = container_of(item, typeof(*page), lru);
struct binder_alloc *alloc = page->alloc;
@@ -1092,7 +1091,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
list_lru_isolate(lru, item);
spin_unlock(&alloc->lock);
- spin_unlock(lock);
+ spin_unlock(&lru->lock);
if (vma) {
trace_binder_unmap_user_start(alloc, index);
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 70387234477e..c02c8ebcb466 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -118,7 +118,7 @@ static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
#endif
enum lru_status binder_alloc_free_page(struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lock, void *cb_arg);
+ void *cb_arg);
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
diff --git a/fs/dcache.c b/fs/dcache.c
index 0f6b16ba30d0..d7f6866f5f52 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1089,7 +1089,7 @@ void shrink_dentry_list(struct list_head *list)
}
static enum lru_status dentry_lru_isolate(struct list_head *item,
- struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, void *arg)
{
struct list_head *freeable = arg;
struct dentry *dentry = container_of(item, struct dentry, d_lru);
@@ -1170,7 +1170,7 @@ long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
}
static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
- struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, void *arg)
{
struct list_head *freeable = arg;
struct dentry *dentry = container_of(item, struct dentry, d_lru);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 2e6bc77f4f81..72b48f6f5561 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -149,7 +149,7 @@ static void gfs2_qd_list_dispose(struct list_head *list)
static enum lru_status gfs2_qd_isolate(struct list_head *item,
- struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, void *arg)
{
struct list_head *dispose = arg;
struct gfs2_quota_data *qd =
diff --git a/fs/inode.c b/fs/inode.c
index 442cb4fc09b2..46fbd5b23482 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -881,7 +881,7 @@ again:
* with this flag set because they are the inodes that are out of order.
*/
static enum lru_status inode_lru_isolate(struct list_head *item,
- struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, void *arg)
{
struct list_head *freeable = arg;
struct inode *inode = container_of(item, struct inode, i_lru);
@@ -923,7 +923,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
inode_pin_lru_isolating(inode);
spin_unlock(&inode->i_lock);
- spin_unlock(lru_lock);
+ spin_unlock(&lru->lock);
if (remove_inode_buffers(inode)) {
unsigned long reap;
reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c
index b6e3d8f77b91..37d79400e5f4 100644
--- a/fs/nfs/nfs42xattr.c
+++ b/fs/nfs/nfs42xattr.c
@@ -802,7 +802,7 @@ static struct shrinker *nfs4_xattr_large_entry_shrinker;
static enum lru_status
cache_lru_isolate(struct list_head *item,
- struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, void *arg)
{
struct list_head *dispose = arg;
struct inode *inode;
@@ -867,7 +867,7 @@ nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc)
static enum lru_status
entry_lru_isolate(struct list_head *item,
- struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+ struct list_lru_one *lru, void *arg)
{
struct list_head *dispose = arg;
struct nfs4_xattr_bucket *bucket;
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index 2e6783f63712..09c444eb944f 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -487,7 +487,6 @@ void nfsd_file_net_dispose(struct nfsd_net *nn)
* nfsd_file_lru_cb - Examine an entry on the LRU list
* @item: LRU entry to examine
* @lru: controlling LRU
- * @lock: LRU list lock (unused)
* @arg: dispose list
*
* Return values:
@@ -497,9 +496,7 @@ void nfsd_file_net_dispose(struct nfsd_net *nn)
*/
static enum lru_status
nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
- spinlock_t *lock, void *arg)
- __releases(lock)
- __acquires(lock)
+ void *arg)
{
struct list_head *head = arg;
struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index aa4dbda7b536..43b914c1f621 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1857,7 +1857,6 @@ static enum lru_status
xfs_buftarg_drain_rele(
struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lru_lock,
void *arg)
{
@@ -1956,7 +1955,6 @@ static enum lru_status
xfs_buftarg_isolate(
struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lru_lock,
void *arg)
{
struct xfs_buf *bp = container_of(item, struct xfs_buf, b_lru);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 665d26990b78..8413ac368042 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -412,9 +412,8 @@ static enum lru_status
xfs_qm_dquot_isolate(
struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lru_lock,
void *arg)
- __releases(lru_lock) __acquires(lru_lock)
+ __releases(&lru->lock) __acquires(&lru->lock)
{
struct xfs_dquot *dqp = container_of(item,
struct xfs_dquot, q_lru);
@@ -460,7 +459,7 @@ xfs_qm_dquot_isolate(
trace_xfs_dqreclaim_dirty(dqp);
/* we have to drop the LRU lock to flush the dquot */
- spin_unlock(lru_lock);
+ spin_unlock(&lru->lock);
error = xfs_qm_dqflush(dqp, &bp);
if (error)
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 10ba9a54d42c..05c166811f6b 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -184,7 +184,7 @@ void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
struct list_head *head);
typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
- struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
+ struct list_lru_one *list, void *cb_arg);
/**
* list_lru_walk_one: walk a @lru, isolating and disposing freeable items.
diff --git a/mm/list_lru.c b/mm/list_lru.c
index c139202e27f7..f93ada6a207b 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -298,7 +298,7 @@ restart:
break;
--*nr_to_walk;
- ret = isolate(item, l, &l->lock, cb_arg);
+ ret = isolate(item, l, cb_arg);
switch (ret) {
/*
* LRU_RETRY, LRU_REMOVED_RETRY and LRU_STOP will drop the lru
diff --git a/mm/workingset.c b/mm/workingset.c
index c187d4a3fbea..a4705e196545 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -702,8 +702,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
static enum lru_status shadow_lru_isolate(struct list_head *item,
struct list_lru_one *lru,
- spinlock_t *lru_lock,
- void *arg) __must_hold(lru_lock)
+ void *arg) __must_hold(lru->lock)
{
struct xa_node *node = container_of(item, struct xa_node, private_list);
struct address_space *mapping;
@@ -712,20 +711,20 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
/*
* Page cache insertions and deletions synchronously maintain
* the shadow node LRU under the i_pages lock and the
- * lru_lock. Because the page cache tree is emptied before
- * the inode can be destroyed, holding the lru_lock pins any
+ * &lru->lock. Because the page cache tree is emptied before
+ * the inode can be destroyed, holding the &lru->lock pins any
* address_space that has nodes on the LRU.
*
* We can then safely transition to the i_pages lock to
* pin only the address_space of the particular node we want
- * to reclaim, take the node off-LRU, and drop the lru_lock.
+ * to reclaim, take the node off-LRU, and drop the &lru->lock.
*/
mapping = container_of(node->array, struct address_space, i_pages);
/* Coming from the list, invert the lock order */
if (!xa_trylock(&mapping->i_pages)) {
- spin_unlock_irq(lru_lock);
+ spin_unlock_irq(&lru->lock);
ret = LRU_RETRY;
goto out;
}
@@ -734,7 +733,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
if (mapping->host != NULL) {
if (!spin_trylock(&mapping->host->i_lock)) {
xa_unlock(&mapping->i_pages);
- spin_unlock_irq(lru_lock);
+ spin_unlock_irq(&lru->lock);
ret = LRU_RETRY;
goto out;
}
@@ -743,7 +742,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
list_lru_isolate(lru, item);
__dec_node_page_state(virt_to_page(node), WORKINGSET_NODES);
- spin_unlock(lru_lock);
+ spin_unlock(&lru->lock);
/*
* The nodes should only contain one or more shadow entries,
diff --git a/mm/zswap.c b/mm/zswap.c
index ba35e4550941..f6316b66fb23 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1102,7 +1102,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
* for reclaim by this ratio.
*/
static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
- spinlock_t *lock, void *arg)
+ void *arg)
{
struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
bool *encountered_page_in_swapcache = (bool *)arg;
@@ -1158,7 +1158,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
* It's safe to drop the lock here because we return either
* LRU_REMOVED_RETRY or LRU_RETRY.
*/
- spin_unlock(lock);
+ spin_unlock(&l->lock);
writeback_result = zswap_writeback_entry(entry, swpentry);