summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/dynroot.c6
-rw-r--r--fs/efivarfs/super.c52
-rw-r--r--fs/fuse/dev.c2
-rw-r--r--fs/fuse/dev_uring.c4
-rw-r--r--fs/libfs.c2
-rw-r--r--fs/netfs/direct_read.c6
-rw-r--r--fs/netfs/read_collect.c18
-rw-r--r--fs/netfs/rolling_buffer.c4
-rw-r--r--fs/netfs/write_collect.c3
-rw-r--r--fs/smb/client/cifssmb.c46
-rw-r--r--fs/smb/client/smb2pdu.c96
11 files changed, 140 insertions, 99 deletions
diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
index 008698d706ca..7d997f7a8028 100644
--- a/fs/afs/dynroot.c
+++ b/fs/afs/dynroot.c
@@ -314,6 +314,9 @@ static const char *afs_atcell_get_link(struct dentry *dentry, struct inode *inod
const char *name;
bool dotted = vnode->fid.vnode == 3;
+ if (!rcu_access_pointer(net->ws_cell))
+ return ERR_PTR(-ENOENT);
+
if (!dentry) {
/* We're in RCU-pathwalk. */
cell = rcu_dereference(net->ws_cell);
@@ -325,9 +328,6 @@ static const char *afs_atcell_get_link(struct dentry *dentry, struct inode *inod
return name;
}
- if (!rcu_access_pointer(net->ws_cell))
- return ERR_PTR(-ENOENT);
-
down_read(&net->cells_lock);
cell = rcu_dereference_protected(net->ws_cell, lockdep_is_held(&net->cells_lock));
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 6eae8cf655c1..0486e9b68bc6 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -421,7 +421,7 @@ static bool efivarfs_actor(struct dir_context *ctx, const char *name, int len,
if (err)
size = 0;
- inode_lock(inode);
+ inode_lock_nested(inode, I_MUTEX_CHILD);
i_size_write(inode, size);
inode_unlock(inode);
@@ -474,12 +474,25 @@ static int efivarfs_check_missing(efi_char16_t *name16, efi_guid_t vendor,
return err;
}
+static void efivarfs_deactivate_super_work(struct work_struct *work)
+{
+ struct super_block *s = container_of(work, struct super_block,
+ destroy_work);
+ /*
+ * note: here s->destroy_work is free for reuse (which
+ * will happen in deactivate_super)
+ */
+ deactivate_super(s);
+}
+
+static struct file_system_type efivarfs_type;
+
static int efivarfs_pm_notify(struct notifier_block *nb, unsigned long action,
void *ptr)
{
struct efivarfs_fs_info *sfi = container_of(nb, struct efivarfs_fs_info,
pm_nb);
- struct path path = { .mnt = NULL, .dentry = sfi->sb->s_root, };
+ struct path path;
struct efivarfs_ctx ectx = {
.ctx = {
.actor = efivarfs_actor,
@@ -487,6 +500,7 @@ static int efivarfs_pm_notify(struct notifier_block *nb, unsigned long action,
.sb = sfi->sb,
};
struct file *file;
+ struct super_block *s = sfi->sb;
static bool rescan_done = true;
if (action == PM_HIBERNATION_PREPARE) {
@@ -499,11 +513,43 @@ static int efivarfs_pm_notify(struct notifier_block *nb, unsigned long action,
if (rescan_done)
return NOTIFY_DONE;
+ /* ensure single superblock is alive and pin it */
+ if (!atomic_inc_not_zero(&s->s_active))
+ return NOTIFY_DONE;
+
pr_info("efivarfs: resyncing variable state\n");
- /* O_NOATIME is required to prevent oops on NULL mnt */
+ path.dentry = sfi->sb->s_root;
+
+ /*
+ * do not add SB_KERNMOUNT which a single superblock could
+ * expose to userspace and which also causes MNT_INTERNAL, see
+ * below
+ */
+ path.mnt = vfs_kern_mount(&efivarfs_type, 0,
+ efivarfs_type.name, NULL);
+ if (IS_ERR(path.mnt)) {
+ pr_err("efivarfs: internal mount failed\n");
+ /*
+ * We may be the last pinner of the superblock but
+ * calling efivarfs_kill_sb from within the notifier
+ * here would deadlock trying to unregister it
+ */
+ INIT_WORK(&s->destroy_work, efivarfs_deactivate_super_work);
+ schedule_work(&s->destroy_work);
+ return PTR_ERR(path.mnt);
+ }
+
+ /* path.mnt now has pin on superblock, so this must be above one */
+ atomic_dec(&s->s_active);
+
file = kernel_file_open(&path, O_RDONLY | O_DIRECTORY | O_NOATIME,
current_cred());
+ /*
+ * safe even if last put because no MNT_INTERNAL means this
+ * will do delayed deactivate_super and not deadlock
+ */
+ mntput(path.mnt);
if (IS_ERR(file))
return NOTIFY_DONE;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 2c3a4d09e500..51e31df4c546 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -77,7 +77,7 @@ void fuse_set_initialized(struct fuse_conn *fc)
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
{
return !fc->initialized || (for_background && fc->blocked) ||
- (fc->io_uring && !fuse_uring_ready(fc));
+ (fc->io_uring && fc->connected && !fuse_uring_ready(fc));
}
static void fuse_drop_waiting(struct fuse_conn *fc)
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index ebd2931b4f2a..82bf458fa9db 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -208,11 +208,11 @@ static struct fuse_ring *fuse_uring_create(struct fuse_conn *fc)
init_waitqueue_head(&ring->stop_waitq);
- fc->ring = ring;
ring->nr_queues = nr_queues;
ring->fc = fc;
ring->max_payload_sz = max_payload_size;
atomic_set(&ring->queue_refs, 0);
+ smp_store_release(&fc->ring, ring);
spin_unlock(&fc->lock);
return ring;
@@ -1041,7 +1041,7 @@ static int fuse_uring_register(struct io_uring_cmd *cmd,
unsigned int issue_flags, struct fuse_conn *fc)
{
const struct fuse_uring_cmd_req *cmd_req = io_uring_sqe_cmd(cmd->sqe);
- struct fuse_ring *ring = fc->ring;
+ struct fuse_ring *ring = smp_load_acquire(&fc->ring);
struct fuse_ring_queue *queue;
struct fuse_ring_ent *ent;
int err;
diff --git a/fs/libfs.c b/fs/libfs.c
index 8444f5cc4064..dc042a975a56 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -496,7 +496,7 @@ offset_dir_lookup(struct dentry *parent, loff_t offset)
found = find_positive_dentry(parent, NULL, false);
else {
rcu_read_lock();
- child = mas_find(&mas, DIR_OFFSET_MAX);
+ child = mas_find_rev(&mas, DIR_OFFSET_MIN);
found = find_positive_dentry(parent, child, false);
rcu_read_unlock();
}
diff --git a/fs/netfs/direct_read.c b/fs/netfs/direct_read.c
index 0bf3c2f5a710..5e3f0aeb51f3 100644
--- a/fs/netfs/direct_read.c
+++ b/fs/netfs/direct_read.c
@@ -125,9 +125,9 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
* Perform a read to an application buffer, bypassing the pagecache and the
* local disk cache.
*/
-static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
+static ssize_t netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
{
- int ret;
+ ssize_t ret;
_enter("R=%x %llx-%llx",
rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
@@ -155,7 +155,7 @@ static int netfs_unbuffered_read(struct netfs_io_request *rreq, bool sync)
else
ret = -EIOCBQUEUED;
out:
- _leave(" = %d", ret);
+ _leave(" = %zd", ret);
return ret;
}
diff --git a/fs/netfs/read_collect.c b/fs/netfs/read_collect.c
index 636cc5a98ef5..23c75755ad4e 100644
--- a/fs/netfs/read_collect.c
+++ b/fs/netfs/read_collect.c
@@ -682,14 +682,16 @@ void netfs_wait_for_pause(struct netfs_io_request *rreq)
trace_netfs_rreq(rreq, netfs_rreq_trace_wait_queue);
prepare_to_wait(&rreq->waitq, &myself, TASK_UNINTERRUPTIBLE);
- subreq = list_first_entry_or_null(&stream->subrequests,
- struct netfs_io_subrequest, rreq_link);
- if (subreq &&
- (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
- test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
- __set_current_state(TASK_RUNNING);
- netfs_read_collection(rreq);
- continue;
+ if (!test_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &rreq->flags)) {
+ subreq = list_first_entry_or_null(&stream->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ if (subreq &&
+ (!test_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags) ||
+ test_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags))) {
+ __set_current_state(TASK_RUNNING);
+ netfs_read_collection(rreq);
+ continue;
+ }
}
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags) ||
diff --git a/fs/netfs/rolling_buffer.c b/fs/netfs/rolling_buffer.c
index 75d97af14b4a..207b6a326651 100644
--- a/fs/netfs/rolling_buffer.c
+++ b/fs/netfs/rolling_buffer.c
@@ -146,10 +146,6 @@ ssize_t rolling_buffer_load_from_ra(struct rolling_buffer *roll,
/* Store the counter after setting the slot. */
smp_store_release(&roll->next_head_slot, to);
-
- for (; ix < folioq_nr_slots(fq); ix++)
- folioq_clear(fq, ix);
-
return size;
}
diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
index 294f67795f79..3fca59e6475d 100644
--- a/fs/netfs/write_collect.c
+++ b/fs/netfs/write_collect.c
@@ -400,7 +400,8 @@ void netfs_write_collection_worker(struct work_struct *work)
trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
if (wreq->io_streams[1].active &&
- wreq->io_streams[1].failed) {
+ wreq->io_streams[1].failed &&
+ ictx->ops->invalidate_cache) {
/* Cache write failure doesn't prevent writeback completion
* unless we're in disconnected mode.
*/
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index d07682020c64..4fc9485c5d91 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -114,19 +114,23 @@ again:
mutex_lock(&ses->session_mutex);
/*
- * Recheck after acquire mutex. If another thread is negotiating
- * and the server never sends an answer the socket will be closed
- * and tcpStatus set to reconnect.
+ * Handle the case where a concurrent thread failed to negotiate or
+ * killed a channel.
*/
spin_lock(&server->srv_lock);
- if (server->tcpStatus == CifsNeedReconnect) {
+ switch (server->tcpStatus) {
+ case CifsExiting:
spin_unlock(&server->srv_lock);
mutex_unlock(&ses->session_mutex);
-
- if (tcon->retry)
- goto again;
- rc = -EHOSTDOWN;
- goto out;
+ return -EHOSTDOWN;
+ case CifsNeedReconnect:
+ spin_unlock(&server->srv_lock);
+ mutex_unlock(&ses->session_mutex);
+ if (!tcon->retry)
+ return -EHOSTDOWN;
+ goto again;
+ default:
+ break;
}
spin_unlock(&server->srv_lock);
@@ -152,16 +156,20 @@ again:
spin_unlock(&ses->ses_lock);
rc = cifs_negotiate_protocol(0, ses, server);
- if (!rc) {
- rc = cifs_setup_session(0, ses, server, ses->local_nls);
- if ((rc == -EACCES) || (rc == -EHOSTDOWN) || (rc == -EKEYREVOKED)) {
- /*
- * Try alternate password for next reconnect if an alternate
- * password is available.
- */
- if (ses->password2)
- swap(ses->password2, ses->password);
- }
+ if (rc) {
+ mutex_unlock(&ses->session_mutex);
+ if (!tcon->retry)
+ return -EHOSTDOWN;
+ goto again;
+ }
+ rc = cifs_setup_session(0, ses, server, ses->local_nls);
+ if ((rc == -EACCES) || (rc == -EHOSTDOWN) || (rc == -EKEYREVOKED)) {
+ /*
+ * Try alternate password for next reconnect if an alternate
+ * password is available.
+ */
+ if (ses->password2)
+ swap(ses->password2, ses->password);
}
/* do we need to reconnect tcon? */
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index ed7812247ebc..f9c521b3c65e 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -300,32 +300,23 @@ again:
mutex_lock(&ses->session_mutex);
/*
- * if this is called by delayed work, and the channel has been disabled
- * in parallel, the delayed work can continue to execute in parallel
- * there's a chance that this channel may not exist anymore
+ * Handle the case where a concurrent thread failed to negotiate or
+ * killed a channel.
*/
spin_lock(&server->srv_lock);
- if (server->tcpStatus == CifsExiting) {
+ switch (server->tcpStatus) {
+ case CifsExiting:
spin_unlock(&server->srv_lock);
mutex_unlock(&ses->session_mutex);
- rc = -EHOSTDOWN;
- goto out;
- }
-
- /*
- * Recheck after acquire mutex. If another thread is negotiating
- * and the server never sends an answer the socket will be closed
- * and tcpStatus set to reconnect.
- */
- if (server->tcpStatus == CifsNeedReconnect) {
+ return -EHOSTDOWN;
+ case CifsNeedReconnect:
spin_unlock(&server->srv_lock);
mutex_unlock(&ses->session_mutex);
-
- if (tcon->retry)
- goto again;
-
- rc = -EHOSTDOWN;
- goto out;
+ if (!tcon->retry)
+ return -EHOSTDOWN;
+ goto again;
+ default:
+ break;
}
spin_unlock(&server->srv_lock);
@@ -350,43 +341,41 @@ again:
spin_unlock(&ses->ses_lock);
rc = cifs_negotiate_protocol(0, ses, server);
- if (!rc) {
- /*
- * if server stopped supporting multichannel
- * and the first channel reconnected, disable all the others.
- */
- if (ses->chan_count > 1 &&
- !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
- rc = cifs_chan_skip_or_disable(ses, server,
- from_reconnect);
- if (rc) {
- mutex_unlock(&ses->session_mutex);
- goto out;
- }
- }
-
- rc = cifs_setup_session(0, ses, server, ses->local_nls);
- if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
- /*
- * Try alternate password for next reconnect (key rotation
- * could be enabled on the server e.g.) if an alternate
- * password is available and the current password is expired,
- * but do not swap on non pwd related errors like host down
- */
- if (ses->password2)
- swap(ses->password2, ses->password);
- }
-
- if ((rc == -EACCES) && !tcon->retry) {
- mutex_unlock(&ses->session_mutex);
- rc = -EHOSTDOWN;
- goto failed;
- } else if (rc) {
+ if (rc) {
+ mutex_unlock(&ses->session_mutex);
+ if (!tcon->retry)
+ return -EHOSTDOWN;
+ goto again;
+ }
+ /*
+ * if server stopped supporting multichannel
+ * and the first channel reconnected, disable all the others.
+ */
+ if (ses->chan_count > 1 &&
+ !(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+ rc = cifs_chan_skip_or_disable(ses, server,
+ from_reconnect);
+ if (rc) {
mutex_unlock(&ses->session_mutex);
goto out;
}
- } else {
+ }
+
+ rc = cifs_setup_session(0, ses, server, ses->local_nls);
+ if ((rc == -EACCES) || (rc == -EKEYEXPIRED) || (rc == -EKEYREVOKED)) {
+ /*
+ * Try alternate password for next reconnect (key rotation
+ * could be enabled on the server e.g.) if an alternate
+ * password is available and the current password is expired,
+ * but do not swap on non pwd related errors like host down
+ */
+ if (ses->password2)
+ swap(ses->password2, ses->password);
+ }
+ if (rc) {
mutex_unlock(&ses->session_mutex);
+ if (rc == -EACCES && !tcon->retry)
+ return -EHOSTDOWN;
goto out;
}
@@ -490,7 +479,6 @@ out:
case SMB2_IOCTL:
rc = -EAGAIN;
}
-failed:
return rc;
}