summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_inode_dotl.c2
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/afs/fs_probe.c2
-rw-r--r--fs/afs/server.c2
-rw-r--r--fs/bcachefs/Kconfig8
-rw-r--r--fs/bcachefs/acl.c4
-rw-r--r--fs/bcachefs/alloc_background.c117
-rw-r--r--fs/bcachefs/alloc_background.h6
-rw-r--r--fs/bcachefs/alloc_foreground.c81
-rw-r--r--fs/bcachefs/backpointers.c67
-rw-r--r--fs/bcachefs/bcachefs.h11
-rw-r--r--fs/bcachefs/bcachefs_format.h3
-rw-r--r--fs/bcachefs/btree_cache.c2
-rw-r--r--fs/bcachefs/btree_gc.c27
-rw-r--r--fs/bcachefs/btree_io.c80
-rw-r--r--fs/bcachefs/btree_iter.c202
-rw-r--r--fs/bcachefs/btree_iter.h123
-rw-r--r--fs/bcachefs/btree_journal_iter.c3
-rw-r--r--fs/bcachefs/btree_key_cache.c32
-rw-r--r--fs/bcachefs/btree_node_scan.c28
-rw-r--r--fs/bcachefs/btree_types.h1
-rw-r--r--fs/bcachefs/btree_update.c39
-rw-r--r--fs/bcachefs/btree_update.h2
-rw-r--r--fs/bcachefs/btree_update_interior.c103
-rw-r--r--fs/bcachefs/btree_write_buffer.c18
-rw-r--r--fs/bcachefs/buckets.c177
-rw-r--r--fs/bcachefs/buckets.h21
-rw-r--r--fs/bcachefs/buckets_types.h5
-rw-r--r--fs/bcachefs/chardev.c20
-rw-r--r--fs/bcachefs/checksum.c247
-rw-r--r--fs/bcachefs/checksum.h3
-rw-r--r--fs/bcachefs/clock.c2
-rw-r--r--fs/bcachefs/compress.c5
-rw-r--r--fs/bcachefs/data_update.c32
-rw-r--r--fs/bcachefs/data_update.h12
-rw-r--r--fs/bcachefs/debug.c4
-rw-r--r--fs/bcachefs/dirent.c20
-rw-r--r--fs/bcachefs/disk_accounting.c44
-rw-r--r--fs/bcachefs/disk_accounting.h8
-rw-r--r--fs/bcachefs/disk_accounting_format.h80
-rw-r--r--fs/bcachefs/disk_groups.c4
-rw-r--r--fs/bcachefs/ec.c40
-rw-r--r--fs/bcachefs/errcode.h3
-rw-r--r--fs/bcachefs/error.c229
-rw-r--r--fs/bcachefs/error.h48
-rw-r--r--fs/bcachefs/extent_update.c6
-rw-r--r--fs/bcachefs/extents.c7
-rw-r--r--fs/bcachefs/fs-io-buffered.c21
-rw-r--r--fs/bcachefs/fs-io.c45
-rw-r--r--fs/bcachefs/fs-ioctl.c6
-rw-r--r--fs/bcachefs/fs.c33
-rw-r--r--fs/bcachefs/fsck.c52
-rw-r--r--fs/bcachefs/inode.c18
-rw-r--r--fs/bcachefs/io_misc.c18
-rw-r--r--fs/bcachefs/io_read.c21
-rw-r--r--fs/bcachefs/io_read.h6
-rw-r--r--fs/bcachefs/io_write.c84
-rw-r--r--fs/bcachefs/journal.c33
-rw-r--r--fs/bcachefs/journal_io.c48
-rw-r--r--fs/bcachefs/lru.c7
-rw-r--r--fs/bcachefs/migrate.c4
-rw-r--r--fs/bcachefs/move.c49
-rw-r--r--fs/bcachefs/movinggc.c8
-rw-r--r--fs/bcachefs/namei.c42
-rw-r--r--fs/bcachefs/opts.c49
-rw-r--r--fs/bcachefs/opts.h3
-rw-r--r--fs/bcachefs/printbuf.c19
-rw-r--r--fs/bcachefs/printbuf.h1
-rw-r--r--fs/bcachefs/progress.c6
-rw-r--r--fs/bcachefs/quota.c2
-rw-r--r--fs/bcachefs/rebalance.c17
-rw-r--r--fs/bcachefs/recovery.c12
-rw-r--r--fs/bcachefs/recovery_passes.c12
-rw-r--r--fs/bcachefs/reflink.c35
-rw-r--r--fs/bcachefs/sb-errors_format.h6
-rw-r--r--fs/bcachefs/sb-members.h23
-rw-r--r--fs/bcachefs/snapshot.c29
-rw-r--r--fs/bcachefs/str_hash.c4
-rw-r--r--fs/bcachefs/str_hash.h8
-rw-r--r--fs/bcachefs/subvolume.c5
-rw-r--r--fs/bcachefs/subvolume.h14
-rw-r--r--fs/bcachefs/super-io.c21
-rw-r--r--fs/bcachefs/super.c133
-rw-r--r--fs/bcachefs/sysfs.c9
-rw-r--r--fs/bcachefs/tests.c30
-rw-r--r--fs/bcachefs/time_stats.c20
-rw-r--r--fs/bcachefs/time_stats.h1
-rw-r--r--fs/bcachefs/util.c2
-rw-r--r--fs/bcachefs/util.h3
-rw-r--r--fs/bcachefs/xattr.c2
-rw-r--r--fs/bpf_fs_kfuncs.c225
-rw-r--r--fs/btrfs/Kconfig2
-rw-r--r--fs/btrfs/disk-io.c6
-rw-r--r--fs/btrfs/zstd.c2
-rw-r--r--fs/buffer.c14
-rw-r--r--fs/cachefiles/namei.c7
-rw-r--r--fs/ceph/Kconfig2
-rw-r--r--fs/dax.c369
-rw-r--r--fs/efivarfs/file.c10
-rw-r--r--fs/erofs/Kconfig2
-rw-r--r--fs/exec.c18
-rw-r--r--fs/exfat/balloc.c14
-rw-r--r--fs/exfat/exfat_fs.h2
-rw-r--r--fs/exfat/fatent.c31
-rw-r--r--fs/exfat/file.c29
-rw-r--r--fs/exfat/inode.c142
-rw-r--r--fs/exfat/super.c10
-rw-r--r--fs/exportfs/expfs.c1
-rw-r--r--fs/ext2/ext2.h1
-rw-r--r--fs/ext2/super.c595
-rw-r--r--fs/ext4/inline.c2
-rw-r--r--fs/ext4/inode.c18
-rw-r--r--fs/ext4/move_extent.c2
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/fuse/dax.c30
-rw-r--r--fs/fuse/dev.c162
-rw-r--r--fs/fuse/dev_uring.c43
-rw-r--r--fs/fuse/dev_uring_i.h18
-rw-r--r--fs/fuse/dir.c13
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/fuse/fuse_dev_i.h4
-rw-r--r--fs/fuse/fuse_i.h47
-rw-r--r--fs/fuse/inode.c51
-rw-r--r--fs/fuse/sysctl.c24
-rw-r--r--fs/fuse/virtio_fs.c3
-rw-r--r--fs/gfs2/Kconfig1
-rw-r--r--fs/hostfs/hostfs.h2
-rw-r--r--fs/hostfs/hostfs_kern.c7
-rw-r--r--fs/hostfs/hostfs_user.c59
-rw-r--r--fs/hugetlbfs/inode.c28
-rw-r--r--fs/iomap/buffered-io.c2
-rw-r--r--fs/isofs/dir.c3
-rw-r--r--fs/jbd2/journal.c4
-rw-r--r--fs/jffs2/wbuf.c2
-rw-r--r--fs/kernfs/dir.c209
-rw-r--r--fs/kernfs/file.c6
-rw-r--r--fs/kernfs/kernfs-internal.h37
-rw-r--r--fs/kernfs/mount.c44
-rw-r--r--fs/kernfs/symlink.c30
-rw-r--r--fs/lockd/Makefile2
-rw-r--r--fs/lockd/netlink.c44
-rw-r--r--fs/lockd/netlink.h19
-rw-r--r--fs/lockd/netns.h3
-rw-r--r--fs/lockd/svc.c123
-rw-r--r--fs/namespace.c7
-rw-r--r--fs/nfs/client.c5
-rw-r--r--fs/nfs/delegation.c66
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/export.c3
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c24
-rw-r--r--fs/nfs/fs_context.c71
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h5
-rw-r--r--fs/nfs/nfs3client.c2
-rw-r--r--fs/nfs/nfs3proc.c2
-rw-r--r--fs/nfs/nfs42proc.c172
-rw-r--r--fs/nfs/nfs42xdr.c86
-rw-r--r--fs/nfs/nfs4client.c7
-rw-r--r--fs/nfs/nfs4proc.c17
-rw-r--r--fs/nfs/nfs4state.c14
-rw-r--r--fs/nfs/nfs4trace.h11
-rw-r--r--fs/nfs/nfs4xdr.c19
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfs/sysfs.c82
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nfs_common/nfsacl.c8
-rw-r--r--fs/nfsd/Kconfig12
-rw-r--r--fs/nfsd/filecache.c122
-rw-r--r--fs/nfsd/filecache.h7
-rw-r--r--fs/nfsd/nfs4callback.c144
-rw-r--r--fs/nfsd/nfs4layouts.c7
-rw-r--r--fs/nfsd/nfs4proc.c2
-rw-r--r--fs/nfsd/nfs4state.c114
-rw-r--r--fs/nfsd/nfsctl.c53
-rw-r--r--fs/nfsd/state.h20
-rw-r--r--fs/nfsd/stats.c4
-rw-r--r--fs/nfsd/stats.h2
-rw-r--r--fs/nfsd/trace.h24
-rw-r--r--fs/nfsd/vfs.c106
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ntfs3/attrib.c3
-rw-r--r--fs/ntfs3/file.c42
-rw-r--r--fs/ntfs3/frecord.c63
-rw-r--r--fs/ntfs3/fsntfs.c28
-rw-r--r--fs/ntfs3/index.c4
-rw-r--r--fs/ntfs3/inode.c40
-rw-r--r--fs/ntfs3/ntfs.h2
-rw-r--r--fs/ntfs3/ntfs_fs.h6
-rw-r--r--fs/ntfs3/super.c89
-rw-r--r--fs/ocfs2/alloc.c8
-rw-r--r--fs/ocfs2/aops.c21
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/internal.h43
-rw-r--r--fs/proc/meminfo.c2
-rw-r--r--fs/proc/page.c11
-rw-r--r--fs/proc/task_mmu.c56
-rw-r--r--fs/pstore/platform.c2
-rw-r--r--fs/smb/client/cifs_fs_sb.h1
-rw-r--r--fs/smb/client/cifsacl.c21
-rw-r--r--fs/smb/client/cifsencrypt.c16
-rw-r--r--fs/smb/client/cifsfs.c4
-rw-r--r--fs/smb/client/cifsfs.h5
-rw-r--r--fs/smb/client/cifsglob.h15
-rw-r--r--fs/smb/client/cifspdu.h2
-rw-r--r--fs/smb/client/cifsproto.h4
-rw-r--r--fs/smb/client/cifssmb.c43
-rw-r--r--fs/smb/client/connect.c388
-rw-r--r--fs/smb/client/file.c2
-rw-r--r--fs/smb/client/fs_context.c43
-rw-r--r--fs/smb/client/fs_context.h7
-rw-r--r--fs/smb/client/inode.c44
-rw-r--r--fs/smb/client/link.c11
-rw-r--r--fs/smb/client/misc.c2
-rw-r--r--fs/smb/client/reparse.c63
-rw-r--r--fs/smb/client/reparse.h5
-rw-r--r--fs/smb/client/sess.c70
-rw-r--r--fs/smb/client/smb1ops.c111
-rw-r--r--fs/smb/client/smb2file.c21
-rw-r--r--fs/smb/client/smb2glob.h1
-rw-r--r--fs/smb/client/smb2inode.c75
-rw-r--r--fs/smb/client/smb2misc.c9
-rw-r--r--fs/smb/client/smb2ops.c52
-rw-r--r--fs/smb/client/smb2pdu.c39
-rw-r--r--fs/smb/client/transport.c5
-rw-r--r--fs/smb/client/xattr.c49
-rw-r--r--fs/smb/common/smb2pdu.h9
-rw-r--r--fs/smb/server/Kconfig2
-rw-r--r--fs/smb/server/auth.c6
-rw-r--r--fs/smb/server/connection.h11
-rw-r--r--fs/smb/server/mgmt/user_session.c37
-rw-r--r--fs/smb/server/mgmt/user_session.h2
-rw-r--r--fs/smb/server/oplock.c12
-rw-r--r--fs/smb/server/smb2pdu.c54
-rw-r--r--fs/smb/server/smb_common.h2
-rw-r--r--fs/smb/server/smbacl.c21
-rw-r--r--fs/smb/server/transport_rdma.c41
-rw-r--r--fs/sysfs/dir.c2
-rw-r--r--fs/sysfs/file.c24
-rw-r--r--fs/ubifs/compress.c208
-rw-r--r--fs/ubifs/file.c74
-rw-r--r--fs/ubifs/journal.c11
-rw-r--r--fs/ubifs/ubifs.h26
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/udf/inode.c1
-rw-r--r--fs/userfaultfd.c51
-rw-r--r--fs/xfs/Kconfig2
-rw-r--r--fs/xfs/xfs_icache.c2
-rw-r--r--fs/xfs/xfs_inode.c31
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_super.c12
-rw-r--r--fs/xfs/xfs_sysfs.c8
254 files changed, 5878 insertions, 3015 deletions
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index cc2007be2173..5b5fda617b80 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -407,8 +407,8 @@ static struct dentry *v9fs_vfs_mkdir_dotl(struct mnt_idmap *idmap,
err);
goto error;
}
- v9fs_fid_add(dentry, &fid);
v9fs_set_create_acl(inode, fid, dacl, pacl);
+ v9fs_fid_add(dentry, &fid);
d_instantiate(dentry, inode);
err = 0;
inc_nlink(dir);
diff --git a/fs/Kconfig b/fs/Kconfig
index afe21866d6b4..c718b2e2de0e 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -286,6 +286,7 @@ config HUGETLB_PAGE_OPTIMIZE_VMEMMAP
def_bool HUGETLB_PAGE
depends on ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
depends on SPARSEMEM_VMEMMAP
+ select SPARSEMEM_VMEMMAP_PREINIT if ARCH_WANT_HUGETLB_VMEMMAP_PREINIT
config HUGETLB_PMD_PAGE_TABLE_SHARING
def_bool HUGETLB_PAGE
diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
index 07a8bfbdd9b9..e0030ac74ea0 100644
--- a/fs/afs/fs_probe.c
+++ b/fs/afs/fs_probe.c
@@ -534,6 +534,6 @@ dont_wait:
*/
void afs_fs_probe_cleanup(struct afs_net *net)
{
- if (del_timer_sync(&net->fs_probe_timer))
+ if (timer_delete_sync(&net->fs_probe_timer))
afs_dec_servers_outstanding(net);
}
diff --git a/fs/afs/server.c b/fs/afs/server.c
index c530d1ca15df..8755f2703815 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -318,7 +318,7 @@ struct afs_server *afs_use_server(struct afs_server *server, bool activate,
a = atomic_inc_return(&server->active);
if (a == 1 && activate &&
!test_bit(AFS_SERVER_FL_EXPIRED, &server->flags))
- del_timer(&server->timer);
+ timer_delete(&server->timer);
trace_afs_server(server->debug_id, r + 1, a, reason);
return server;
diff --git a/fs/bcachefs/Kconfig b/fs/bcachefs/Kconfig
index c9798750202d..07709b0d7688 100644
--- a/fs/bcachefs/Kconfig
+++ b/fs/bcachefs/Kconfig
@@ -4,7 +4,7 @@ config BCACHEFS_FS
depends on BLOCK
select EXPORTFS
select CLOSURES
- select LIBCRC32C
+ select CRC32
select CRC64
select FS_POSIX_ACL
select LZ4_COMPRESS
@@ -15,10 +15,9 @@ config BCACHEFS_FS
select ZLIB_INFLATE
select ZSTD_COMPRESS
select ZSTD_DECOMPRESS
- select CRYPTO
select CRYPTO_LIB_SHA256
- select CRYPTO_CHACHA20
- select CRYPTO_POLY1305
+ select CRYPTO_LIB_CHACHA
+ select CRYPTO_LIB_POLY1305
select KEYS
select RAID6_PQ
select XOR_BLOCKS
@@ -26,6 +25,7 @@ config BCACHEFS_FS
select SRCU
select SYMBOLIC_ERRNAME
select MIN_HEAP
+ select XARRAY_MULTI
help
The bcachefs filesystem - a modern, copy on write filesystem, with
support for multiple devices, compression, checksumming, etc.
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index 99487727ae64..d03adc36100e 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -273,7 +273,7 @@ struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu)
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct posix_acl *acl = NULL;
if (rcu)
@@ -344,7 +344,7 @@ int bch2_set_acl(struct mnt_idmap *idmap,
{
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter inode_iter = {};
struct bch_inode_unpacked inode_u;
struct posix_acl *acl;
umode_t mode;
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index 5fb396be9127..94ea9e49aec4 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -589,6 +589,8 @@ iter_err:
int bch2_alloc_read(struct bch_fs *c)
{
+ down_read(&c->state_lock);
+
struct btree_trans *trans = bch2_trans_get(c);
struct bch_dev *ca = NULL;
int ret;
@@ -608,7 +610,7 @@ int bch2_alloc_read(struct bch_fs *c)
* bch2_check_alloc_key() which runs later:
*/
if (!ca) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
continue;
}
@@ -629,17 +631,17 @@ int bch2_alloc_read(struct bch_fs *c)
* bch2_check_alloc_key() which runs later:
*/
if (!ca) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
continue;
}
if (k.k->p.offset < ca->mi.first_bucket) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode, ca->mi.first_bucket));
continue;
}
if (k.k->p.offset >= ca->mi.nbuckets) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
continue;
}
@@ -652,6 +654,7 @@ int bch2_alloc_read(struct bch_fs *c)
bch2_dev_put(ca);
bch2_trans_put(trans);
+ up_read(&c->state_lock);
bch_err_fn(c, ret);
return ret;
}
@@ -673,8 +676,7 @@ static int __need_discard_or_freespace_err(struct btree_trans *trans,
bch2_bkey_val_to_text(&buf, c, alloc_k);
int ret = __bch2_fsck_err(NULL, trans, flags, err_id,
- "bucket incorrectly %sset in %s btree\n"
- " %s",
+ "bucket incorrectly %sset in %s btree\n%s",
set ? "" : "un",
bch2_btree_id_str(btree),
buf.buf);
@@ -1027,7 +1029,7 @@ fsck_err:
bch2_dev_put(ca);
return ret;
invalid_bucket:
- bch2_fs_inconsistent(c, "reference to invalid bucket\n %s",
+ bch2_fs_inconsistent(c, "reference to invalid bucket\n%s",
(bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf));
ret = -BCH_ERR_trigger_alloc;
goto err;
@@ -1037,9 +1039,10 @@ invalid_bucket:
* This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for
* extents style btrees, but works on non-extents btrees:
*/
-static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end, struct bkey *hole)
{
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
if (bkey_err(k))
return k;
@@ -1050,9 +1053,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
struct btree_iter iter2;
struct bpos next;
- bch2_trans_copy_iter(&iter2, iter);
+ bch2_trans_copy_iter(trans, &iter2, iter);
- struct btree_path *path = btree_iter_path(iter->trans, iter);
+ struct btree_path *path = btree_iter_path(trans, iter);
if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
@@ -1062,9 +1065,9 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
* btree node min/max is a closed interval, upto takes a half
* open interval:
*/
- k = bch2_btree_iter_peek_max(&iter2, end);
+ k = bch2_btree_iter_peek_max(trans, &iter2, end);
next = iter2.pos;
- bch2_trans_iter_exit(iter->trans, &iter2);
+ bch2_trans_iter_exit(trans, &iter2);
BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
@@ -1105,13 +1108,14 @@ static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *buck
return *ca != NULL;
}
-static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter,
- struct bch_dev **ca, struct bkey *hole)
+static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bch_dev **ca, struct bkey *hole)
{
- struct bch_fs *c = iter->trans->c;
+ struct bch_fs *c = trans->c;
struct bkey_s_c k;
again:
- k = bch2_get_key_or_hole(iter, POS_MAX, hole);
+ k = bch2_get_key_or_hole(trans, iter, POS_MAX, hole);
if (bkey_err(k))
return k;
@@ -1124,7 +1128,7 @@ again:
if (!next_bucket(c, ca, &hole_start))
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter, hole_start);
+ bch2_btree_iter_set_pos(trans, iter, hole_start);
goto again;
}
@@ -1165,8 +1169,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
a = bch2_alloc_to_v4(alloc_k, &a_convert);
- bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
- k = bch2_btree_iter_peek_slot(discard_iter);
+ bch2_btree_iter_set_pos(trans, discard_iter, alloc_k.k->p);
+ k = bch2_btree_iter_peek_slot(trans, discard_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1179,8 +1183,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
goto err;
}
- bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
- k = bch2_btree_iter_peek_slot(freespace_iter);
+ bch2_btree_iter_set_pos(trans, freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
+ k = bch2_btree_iter_peek_slot(trans, freespace_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1193,16 +1197,15 @@ int bch2_check_alloc_key(struct btree_trans *trans,
goto err;
}
- bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
- k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+ bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
+ k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter);
ret = bkey_err(k);
if (ret)
goto err;
if (fsck_err_on(a->gen != alloc_gen(k, gens_offset),
trans, bucket_gens_key_wrong,
- "incorrect gen in bucket_gens btree (got %u should be %u)\n"
- " %s",
+ "incorrect gen in bucket_gens btree (got %u should be %u)\n%s",
alloc_gen(k, gens_offset), a->gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
@@ -1248,9 +1251,9 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
if (!ca->mi.freespace_initialized)
return 0;
- bch2_btree_iter_set_pos(freespace_iter, start);
+ bch2_btree_iter_set_pos(trans, freespace_iter, start);
- k = bch2_btree_iter_peek_slot(freespace_iter);
+ k = bch2_btree_iter_peek_slot(trans, freespace_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1260,7 +1263,7 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
if (fsck_err_on(k.k->type != KEY_TYPE_set,
trans, freespace_hole_missing,
"hole in alloc btree missing in freespace btree\n"
- " device %llu buckets %llu-%llu",
+ "device %llu buckets %llu-%llu",
freespace_iter->pos.inode,
freespace_iter->pos.offset,
end->offset)) {
@@ -1299,9 +1302,9 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
unsigned i, gens_offset, gens_end_offset;
int ret;
- bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
+ bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
- k = bch2_btree_iter_peek_slot(bucket_gens_iter);
+ k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1419,7 +1422,7 @@ int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_ite
(state == BCH_DATA_free &&
genbits != alloc_freespace_genbits(*a))) {
if (fsck_err(trans, need_discard_freespace_key_bad,
- "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
+ "%s\nincorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
bch2_btree_id_str(iter->btree_id),
iter->pos.inode,
@@ -1434,7 +1437,7 @@ int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_ite
*gen = a->gen;
out:
fsck_err:
- bch2_set_btree_iter_dontneed(&alloc_iter);
+ bch2_set_btree_iter_dontneed(trans, &alloc_iter);
bch2_trans_iter_exit(trans, &alloc_iter);
printbuf_exit(&buf);
return ret;
@@ -1500,7 +1503,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode);
if (!ca) {
if (fsck_err(trans, bucket_gens_to_invalid_dev,
- "bucket_gens key for invalid device:\n %s",
+ "bucket_gens key for invalid device:\n%s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
ret = bch2_btree_delete_at(trans, iter, 0);
goto out;
@@ -1509,7 +1512,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
if (fsck_err_on(end <= ca->mi.first_bucket ||
start >= ca->mi.nbuckets,
trans, bucket_gens_to_invalid_buckets,
- "bucket_gens key for invalid buckets:\n %s",
+ "bucket_gens key for invalid buckets:\n%s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = bch2_btree_delete_at(trans, iter, 0);
goto out;
@@ -1571,7 +1574,7 @@ int bch2_check_alloc_info(struct bch_fs *c)
bch2_trans_begin(trans);
- k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
+ k = bch2_get_key_or_real_bucket_hole(trans, &iter, &ca, &hole);
ret = bkey_err(k);
if (ret)
goto bkey_err;
@@ -1609,7 +1612,7 @@ int bch2_check_alloc_info(struct bch_fs *c)
if (ret)
goto bkey_err;
- bch2_btree_iter_set_pos(&iter, next);
+ bch2_btree_iter_set_pos(trans, &iter, next);
bkey_err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
@@ -1637,7 +1640,7 @@ bkey_err:
BTREE_ITER_prefetch);
while (1) {
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(&iter);
+ k = bch2_btree_iter_peek(trans, &iter);
if (!k.k)
break;
@@ -1656,7 +1659,7 @@ bkey_err:
break;
}
- bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
+ bch2_btree_iter_set_pos(trans, &iter, bpos_nosnap_successor(iter.pos));
}
bch2_trans_iter_exit(trans, &iter);
if (ret)
@@ -1684,7 +1687,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
struct printbuf buf = PRINTBUF;
int ret;
- alloc_k = bch2_btree_iter_peek(alloc_iter);
+ alloc_k = bch2_btree_iter_peek(trans, alloc_iter);
if (!alloc_k.k)
return 0;
@@ -1712,8 +1715,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
if (fsck_err_on(!a->io_time[READ],
trans, alloc_key_cached_but_read_time_zero,
- "cached bucket with read_time 0\n"
- " %s",
+ "cached bucket with read_time 0\n%s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
struct bkey_i_alloc_v4 *a_mut =
@@ -1826,7 +1828,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct bpos pos = need_discard_iter->pos;
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bkey_s_c k;
struct bkey_i_alloc_v4 *a;
struct printbuf buf = PRINTBUF;
@@ -1950,7 +1952,7 @@ static void bch2_do_discards_work(struct work_struct *work)
trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
bch2_err_str(ret));
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
bch2_write_ref_put(c, BCH_WRITE_REF_discard);
}
@@ -1967,7 +1969,7 @@ void bch2_dev_do_discards(struct bch_dev *ca)
if (queue_work(c->write_ref_wq, &ca->discard_work))
return;
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
put_write_ref:
bch2_write_ref_put(c, BCH_WRITE_REF_discard);
}
@@ -2045,7 +2047,7 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
trace_discard_buckets_fast(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret));
bch2_trans_put(trans);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
}
@@ -2065,7 +2067,7 @@ static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
if (queue_work(c->write_ref_wq, &ca->discard_fast_work))
return;
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
put_ref:
bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
}
@@ -2082,6 +2084,9 @@ static int invalidate_one_bp(struct btree_trans *trans,
if (ret)
return ret;
+ if (!extent_k.k)
+ return 0;
+
struct bkey_i *n =
bch2_bkey_make_mut(trans, &extent_iter, &extent_k,
BTREE_UPDATE_internal_snapshot_node);
@@ -2199,9 +2204,9 @@ static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter
{
struct bkey_s_c k;
again:
- k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
+ k = bch2_btree_iter_peek_max(trans, iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
if (!k.k && !*wrapped) {
- bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0));
+ bch2_btree_iter_set_pos(trans, iter, lru_pos(ca->dev_idx, 0, 0));
*wrapped = true;
goto again;
}
@@ -2251,12 +2256,12 @@ restart_err:
if (ret)
break;
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
}
bch2_trans_iter_exit(trans, &iter);
err:
bch2_trans_put(trans);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
bch2_bkey_buf_exit(&last_flushed, c);
bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
@@ -2274,7 +2279,7 @@ void bch2_dev_do_invalidates(struct bch_dev *ca)
if (queue_work(c->write_ref_wq, &ca->invalidate_work))
return;
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
put_ref:
bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
}
@@ -2321,7 +2326,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
break;
}
- k = bch2_get_key_or_hole(&iter, end, &hole);
+ k = bch2_get_key_or_hole(trans, &iter, end, &hole);
ret = bkey_err(k);
if (ret)
goto bkey_err;
@@ -2340,7 +2345,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
if (ret)
goto bkey_err;
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
} else {
struct bkey_i *freespace;
@@ -2360,7 +2365,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
if (ret)
goto bkey_err;
- bch2_btree_iter_set_pos(&iter, k.k->p);
+ bch2_btree_iter_set_pos(trans, &iter, k.k->p);
}
bkey_err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -2506,7 +2511,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
bch2_set_ra_pages(c, ra_pages);
- for_each_rw_member(c, ca) {
+ __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), READ) {
u64 dev_reserve = 0;
/*
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index c556ccaffe89..34b3d6ac4fbb 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -321,11 +321,11 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca,
{
u64 want_free = ca->mi.nbuckets >> 7;
u64 free = max_t(s64, 0,
- u.d[BCH_DATA_free].buckets
- + u.d[BCH_DATA_need_discard].buckets
+ u.buckets[BCH_DATA_free]
+ + u.buckets[BCH_DATA_need_discard]
- bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
- return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
+ return clamp_t(s64, want_free - free, 0, u.buckets[BCH_DATA_cached]);
}
void bch2_dev_do_invalidates(struct bch_dev *);
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 0cac65347a5d..7c930ef77380 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -327,7 +327,7 @@ again:
bucket = sector_to_bucket(ca,
round_up(bucket_to_sector(ca, bucket) + 1,
1ULL << ca->mi.btree_bitmap_shift));
- bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, bucket));
+ bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, bucket));
s->buckets_seen++;
s->skipped_mi_btree_bitmap++;
continue;
@@ -355,7 +355,7 @@ again:
watermark, s, cl)
: NULL;
next:
- bch2_set_btree_iter_dontneed(&citer);
+ bch2_set_btree_iter_dontneed(trans, &citer);
bch2_trans_iter_exit(trans, &citer);
if (ob)
break;
@@ -417,7 +417,7 @@ again:
1ULL << ca->mi.btree_bitmap_shift));
alloc_cursor = bucket|(iter.pos.offset & (~0ULL << 56));
- bch2_btree_iter_set_pos(&iter, POS(ca->dev_idx, alloc_cursor));
+ bch2_btree_iter_set_pos(trans, &iter, POS(ca->dev_idx, alloc_cursor));
s->skipped_mi_btree_bitmap++;
goto next;
}
@@ -426,7 +426,7 @@ again:
if (ob) {
if (!IS_ERR(ob))
*dev_alloc_cursor = iter.pos.offset;
- bch2_set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(trans, &iter);
break;
}
@@ -469,7 +469,7 @@ static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[watermark]);
prt_printf(&buf, "data type\t%s\n", __bch2_data_types[data_type]);
prt_printf(&buf, "blocking\t%u\n", cl != NULL);
- prt_printf(&buf, "free\t%llu\n", usage->d[BCH_DATA_free].buckets);
+ prt_printf(&buf, "free\t%llu\n", usage->buckets[BCH_DATA_free]);
prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(ca, *usage, watermark));
prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
bch2_copygc_wait_amount(c),
@@ -524,10 +524,10 @@ again:
bch2_dev_usage_read_fast(ca, usage);
avail = dev_buckets_free(ca, *usage, watermark);
- if (usage->d[BCH_DATA_need_discard].buckets > avail)
+ if (usage->buckets[BCH_DATA_need_discard] > avail)
bch2_dev_do_discards(ca);
- if (usage->d[BCH_DATA_need_gc_gens].buckets > avail)
+ if (usage->buckets[BCH_DATA_need_gc_gens] > avail)
bch2_gc_gens_async(c);
if (should_invalidate_buckets(ca, *usage))
@@ -606,8 +606,7 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
static int __dev_stripe_cmp(struct dev_stripe_state *stripe,
unsigned l, unsigned r)
{
- return ((stripe->next_alloc[l] > stripe->next_alloc[r]) -
- (stripe->next_alloc[l] < stripe->next_alloc[r]));
+ return cmp_int(stripe->next_alloc[l], stripe->next_alloc[r]);
}
#define dev_stripe_cmp(l, r) __dev_stripe_cmp(stripe, l, r)
@@ -626,25 +625,62 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
return ret;
}
+static const u64 stripe_clock_hand_rescale = 1ULL << 62; /* trigger rescale at */
+static const u64 stripe_clock_hand_max = 1ULL << 56; /* max after rescale */
+static const u64 stripe_clock_hand_inv = 1ULL << 52; /* max increment, if a device is empty */
+
+static noinline void bch2_stripe_state_rescale(struct dev_stripe_state *stripe)
+{
+ /*
+ * Avoid underflowing clock hands if at all possible, if clock hands go
+ * to 0 then we lose information - clock hands can be in a wide range if
+ * we have devices we rarely try to allocate from, if we generally
+ * allocate from a specified target but only sometimes have to fall back
+ * to the whole filesystem.
+ */
+ u64 scale_max = U64_MAX; /* maximum we can subtract without underflow */
+ u64 scale_min = 0; /* minumum we must subtract to avoid overflow */
+
+ for (u64 *v = stripe->next_alloc;
+ v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++) {
+ if (*v)
+ scale_max = min(scale_max, *v);
+ if (*v > stripe_clock_hand_max)
+ scale_min = max(scale_min, *v - stripe_clock_hand_max);
+ }
+
+ u64 scale = max(scale_min, scale_max);
+
+ for (u64 *v = stripe->next_alloc;
+ v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
+ *v = *v < scale ? 0 : *v - scale;
+}
+
static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
struct dev_stripe_state *stripe,
struct bch_dev_usage *usage)
{
+ /*
+ * Stripe state has a per device clock hand: we allocate from the device
+ * with the smallest clock hand.
+ *
+ * When we allocate, we don't do a simple increment; we add the inverse
+ * of the device's free space. This results in round robin behavior that
+ * biases in favor of the device(s) with more free space.
+ */
+
u64 *v = stripe->next_alloc + ca->dev_idx;
u64 free_space = __dev_buckets_available(ca, *usage, BCH_WATERMARK_normal);
u64 free_space_inv = free_space
- ? div64_u64(1ULL << 48, free_space)
- : 1ULL << 48;
- u64 scale = *v / 4;
+ ? div64_u64(stripe_clock_hand_inv, free_space)
+ : stripe_clock_hand_inv;
- if (*v + free_space_inv >= *v)
- *v += free_space_inv;
- else
- *v = U64_MAX;
+ /* Saturating add, avoid overflow: */
+ u64 sum = *v + free_space_inv;
+ *v = sum >= *v ? sum : U64_MAX;
- for (v = stripe->next_alloc;
- v < stripe->next_alloc + ARRAY_SIZE(stripe->next_alloc); v++)
- *v = *v < scale ? 0 : *v - scale;
+ if (unlikely(*v > stripe_clock_hand_rescale))
+ bch2_stripe_state_rescale(stripe);
}
void bch2_dev_stripe_increment(struct bch_dev *ca,
@@ -1560,7 +1596,7 @@ static void bch2_write_point_to_text(struct printbuf *out, struct bch_fs *c,
unsigned i;
prt_printf(out, "%lu: ", wp->write_point);
- prt_human_readable_u64(out, wp->sectors_allocated);
+ prt_human_readable_u64(out, wp->sectors_allocated << 9);
prt_printf(out, " last wrote: ");
bch2_pr_time_units(out, sched_clock() - wp->last_used);
@@ -1633,7 +1669,7 @@ void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
- struct bch_dev_usage stats = bch2_dev_usage_read(ca);
+ struct bch_dev_usage_full stats = bch2_dev_usage_full_read(ca);
unsigned nr[BCH_DATA_NR];
memset(nr, 0, sizeof(nr));
@@ -1656,7 +1692,8 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
printbuf_tabstop_push(out, 16);
prt_printf(out, "open buckets\t%i\r\n", ca->nr_open_buckets);
- prt_printf(out, "buckets to invalidate\t%llu\r\n", should_invalidate_buckets(ca, stats));
+ prt_printf(out, "buckets to invalidate\t%llu\r\n",
+ should_invalidate_buckets(ca, bch2_dev_usage_read(ca)));
}
static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index 20c497f0c2cb..ff26bb515150 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -96,6 +96,7 @@ static noinline int backpointer_mod_err(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
+ int ret = 0;
if (insert) {
prt_printf(&buf, "existing backpointer found when inserting ");
@@ -125,17 +126,15 @@ static noinline int backpointer_mod_err(struct btree_trans *trans,
prt_printf(&buf, "for ");
bch2_bkey_val_to_text(&buf, c, orig_k);
-
- bch_err(c, "%s", buf.buf);
}
- printbuf_exit(&buf);
+ if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers &&
+ __bch2_inconsistent_error(c, &buf))
+ ret = -BCH_ERR_erofs_unfixed_errors;
- if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
- return bch2_inconsistent_error(c) ? BCH_ERR_erofs_unfixed_errors : 0;
- } else {
- return 0;
- }
+ bch_err(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ return ret;
}
int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
@@ -210,11 +209,11 @@ static int backpointer_target_not_found(struct btree_trans *trans,
if (ret)
return ret;
- prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
+ prt_printf(&buf, "backpointer doesn't match %s it points to:\n",
bp.v->level ? "btree node" : "extent");
bch2_bkey_val_to_text(&buf, c, bp.s_c);
- prt_printf(&buf, "\n ");
+ prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, target_k);
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(target_k);
@@ -222,7 +221,7 @@ static int backpointer_target_not_found(struct btree_trans *trans,
struct extent_ptr_decoded p;
bkey_for_each_ptr_decode(target_k.k, ptrs, p, entry)
if (p.ptr.dev == bp.k->p.inode) {
- prt_printf(&buf, "\n ");
+ prt_newline(&buf);
struct bkey_i_backpointer bp2;
bch2_extent_ptr_to_bp(c, bp.v->btree_id, bp.v->level, target_k, p, entry, &bp2);
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&bp2.k_i));
@@ -253,12 +252,24 @@ struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
0,
bp.v->level,
iter_flags);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
if (bkey_err(k)) {
bch2_trans_iter_exit(trans, iter);
return k;
}
+ /*
+ * peek_slot() doesn't normally return NULL - except when we ask for a
+ * key at a btree level that doesn't exist.
+ *
+ * We may want to revisit this and change peek_slot():
+ */
+ if (!k.k) {
+ bkey_init(&iter->k);
+ iter->k.p = bp.v->pos;
+ k.k = &iter->k;
+ }
+
if (k.k &&
extent_matches_bp(c, bp.v->btree_id, bp.v->level, k, bp))
return k;
@@ -294,7 +305,7 @@ struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
0,
bp.v->level - 1,
0);
- struct btree *b = bch2_btree_iter_peek_node(iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, iter);
if (IS_ERR_OR_NULL(b))
goto err;
@@ -322,7 +333,7 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st
return 0;
struct bch_fs *c = trans->c;
- struct btree_iter alloc_iter = { NULL };
+ struct btree_iter alloc_iter = {};
struct bkey_s_c alloc_k;
struct printbuf buf = PRINTBUF;
int ret = 0;
@@ -443,12 +454,11 @@ found:
if (ret)
goto err;
- prt_str(&buf, "extents pointing to same space, but first extent checksum bad:");
- prt_printf(&buf, "\n ");
+ prt_printf(&buf, "extents pointing to same space, but first extent checksum bad:\n");
bch2_btree_id_to_text(&buf, btree);
prt_str(&buf, " ");
bch2_bkey_val_to_text(&buf, c, extent);
- prt_printf(&buf, "\n ");
+ prt_newline(&buf);
bch2_btree_id_to_text(&buf, o_btree);
prt_str(&buf, " ");
bch2_bkey_val_to_text(&buf, c, extent2);
@@ -464,7 +474,7 @@ err:
if (bio)
bio_put(bio);
kvfree(data_buf);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
printbuf_exit(&buf);
return ret;
}
@@ -539,9 +549,9 @@ check_existing_bp:
if (bch2_extents_match(orig_k, other_extent)) {
printbuf_reset(&buf);
- prt_printf(&buf, "duplicate versions of same extent, deleting smaller\n ");
+ prt_printf(&buf, "duplicate versions of same extent, deleting smaller\n");
bch2_bkey_val_to_text(&buf, c, orig_k);
- prt_str(&buf, "\n ");
+ prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, other_extent);
bch_err(c, "%s", buf.buf);
@@ -580,20 +590,20 @@ check_existing_bp:
}
printbuf_reset(&buf);
- prt_printf(&buf, "duplicate extents pointing to same space on dev %llu\n ", bp->k.p.inode);
+ prt_printf(&buf, "duplicate extents pointing to same space on dev %llu\n", bp->k.p.inode);
bch2_bkey_val_to_text(&buf, c, orig_k);
- prt_str(&buf, "\n ");
+ prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, other_extent);
bch_err(c, "%s", buf.buf);
ret = -BCH_ERR_fsck_repair_unimplemented;
goto err;
missing:
printbuf_reset(&buf);
- prt_str(&buf, "missing backpointer\n for: ");
+ prt_str(&buf, "missing backpointer\nfor: ");
bch2_bkey_val_to_text(&buf, c, orig_k);
- prt_printf(&buf, "\n want: ");
+ prt_printf(&buf, "\nwant: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&bp->k_i));
- prt_printf(&buf, "\n got: ");
+ prt_printf(&buf, "\ngot: ");
bch2_bkey_val_to_text(&buf, c, bp_k);
if (fsck_err(trans, ptr_to_missing_backpointer, "%s", buf.buf))
@@ -652,7 +662,7 @@ static int check_btree_root_to_backpointers(struct btree_trans *trans,
retry:
bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN,
0, bch2_btree_id_root(c, btree_id)->b->c.level, 0);
- b = bch2_btree_iter_peek_node(&iter);
+ b = bch2_btree_iter_peek_node(trans, &iter);
ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
@@ -936,7 +946,7 @@ static int btree_node_get_and_pin(struct btree_trans *trans, struct bkey_i *k,
{
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, k->k.p, 0, level, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
@@ -1023,7 +1033,7 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c)
* Can't allow devices to come/go/resize while we have bucket bitmaps
* allocated
*/
- lockdep_assert_held(&c->state_lock);
+ down_read(&c->state_lock);
for_each_member_device(c, ca) {
BUG_ON(ca->bucket_backpointer_mismatches);
@@ -1108,6 +1118,7 @@ err_free_bitmaps:
ca->bucket_backpointer_mismatches = NULL;
}
+ up_read(&c->state_lock);
bch_err_fn(c, ret);
return ret;
}
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index f52311017aee..5cb0fc384ac0 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -524,8 +524,8 @@ struct bch_dev {
struct percpu_ref ref;
#endif
struct completion ref_completion;
- struct percpu_ref io_ref;
- struct completion io_ref_completion;
+ struct percpu_ref io_ref[2];
+ struct completion io_ref_completion[2];
struct bch_fs *fs;
@@ -562,7 +562,8 @@ struct bch_dev {
unsigned long *bucket_backpointer_mismatches;
unsigned long *bucket_backpointer_empty;
- struct bch_dev_usage __percpu *usage;
+ struct bch_dev_usage_full __percpu
+ *usage;
/* Allocator: */
u64 alloc_cursor[3];
@@ -980,8 +981,8 @@ struct bch_fs {
mempool_t compress_workspace[BCH_COMPRESSION_OPT_NR];
size_t zstd_workspace_size;
- struct crypto_sync_skcipher *chacha20;
- struct crypto_shash *poly1305;
+ struct bch_key chacha20_key;
+ bool chacha20_key_set;
atomic64_t key_version;
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index e96d87767020..a3db328dee31 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -1143,7 +1143,8 @@ static inline __u64 __bset_magic(struct bch_sb *sb)
x(log, 9) \
x(overwrite, 10) \
x(write_buffer_keys, 11) \
- x(datetime, 12)
+ x(datetime, 12) \
+ x(log_bkey, 13)
enum bch_jset_entry_type {
#define x(f, nr) BCH_JSET_ENTRY_##f = nr,
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c
index 54666027aa85..9b80201c7982 100644
--- a/fs/bcachefs/btree_cache.c
+++ b/fs/bcachefs/btree_cache.c
@@ -1417,7 +1417,7 @@ void __bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
prt_printf(out, "%u", r->level);
else
prt_printf(out, "(unknown)");
- prt_printf(out, "\n ");
+ prt_newline(out);
bch2_bkey_val_to_text(out, c, k);
}
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index ff681e733598..7b98ba2dec64 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -213,15 +213,15 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree *
prt_printf(&buf, " at ");
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
- prt_printf(&buf, ":\n parent: ");
+ prt_printf(&buf, ":\nparent: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
if (prev) {
- prt_printf(&buf, "\n prev: ");
+ prt_printf(&buf, "\nprev: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&prev->key));
}
- prt_str(&buf, "\n next: ");
+ prt_str(&buf, "\nnext: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&cur->key));
if (bpos_lt(expected_start, cur->data->min_key)) { /* gap */
@@ -280,12 +280,12 @@ static int btree_repair_node_end(struct btree_trans *trans, struct btree *b,
if (bpos_eq(child->key.k.p, b->key.k.p))
return 0;
- prt_printf(&buf, " at ");
+ prt_printf(&buf, "\nat: ");
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
- prt_printf(&buf, ":\n parent: ");
+ prt_printf(&buf, "\nparent: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
- prt_str(&buf, "\n child: ");
+ prt_str(&buf, "\nchild: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&child->key));
if (mustfix_fsck_err(trans, btree_node_topology_bad_max_key,
@@ -351,8 +351,7 @@ again:
if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO),
trans, btree_node_read_error,
- "Topology repair: unreadable btree node at\n"
- " %s",
+ "Topology repair: unreadable btree node at\n%s",
buf.buf)) {
bch2_btree_node_evict(trans, cur_k.k);
cur = NULL;
@@ -612,7 +611,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
if (fsck_err_on(btree_id != BTREE_ID_accounting &&
k.k->bversion.lo > atomic64_read(&c->key_version),
trans, bkey_version_in_future,
- "key version number higher than recorded %llu\n %s",
+ "key version number higher than recorded %llu\n%s",
atomic64_read(&c->key_version),
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
atomic64_set(&c->key_version, k.k->bversion.lo);
@@ -620,7 +619,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
if (mustfix_fsck_err_on(level && !bch2_dev_btree_bitmap_marked(c, k),
trans, btree_bitmap_not_marked,
- "btree ptr not marked in member info btree allocated bitmap\n %s",
+ "btree ptr not marked in member info btree allocated bitmap\n%s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k),
buf.buf))) {
@@ -692,7 +691,7 @@ retry_root:
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN,
0, bch2_btree_id_root(c, btree)->b->c.level, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err_root;
@@ -1021,8 +1020,7 @@ int bch2_check_allocations(struct bch_fs *c)
{
int ret;
- lockdep_assert_held(&c->state_lock);
-
+ down_read(&c->state_lock);
down_write(&c->gc_lock);
bch2_btree_interior_updates_flush(c);
@@ -1060,6 +1058,7 @@ out:
percpu_up_write(&c->mark_lock);
up_write(&c->gc_lock);
+ up_read(&c->state_lock);
/*
* At startup, allocations can happen directly instead of via the
@@ -1200,7 +1199,7 @@ int bch2_gc_gens(struct bch_fs *c)
BCH_TRANS_COMMIT_no_enospc, ({
ca = bch2_dev_iterate(c, ca, k.k->p.inode);
if (!ca) {
- bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
+ bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
continue;
}
bch2_alloc_write_oldest_gen(trans, ca, &iter, k);
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 2ba33ffc9795..5fd4a58d2ad2 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -525,8 +525,6 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
prt_printf(out, "at btree ");
bch2_btree_pos_to_text(out, c, b);
- printbuf_indent_add(out, 2);
-
prt_printf(out, "\nnode offset %u/%u",
b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)));
if (i)
@@ -550,32 +548,39 @@ static int __btree_err(int ret,
enum bch_sb_error_id err_type,
const char *fmt, ...)
{
- struct printbuf out = PRINTBUF;
bool silent = c->curr_recovery_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes;
- va_list args;
+
+ if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
+ ret = -BCH_ERR_btree_node_read_err_fixable;
+ if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
+ ret = -BCH_ERR_btree_node_read_err_bad_node;
+
+ if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable)
+ bch2_sb_error_count(c, err_type);
+
+ struct printbuf out = PRINTBUF;
+ if (write != WRITE && ret != -BCH_ERR_btree_node_read_err_fixable) {
+ printbuf_indent_add_nextline(&out, 2);
+#ifdef BCACHEFS_LOG_PREFIX
+ prt_printf(&out, bch2_log_msg(c, ""));
+#endif
+ }
btree_err_msg(&out, c, ca, b, i, k, b->written, write);
+ va_list args;
va_start(args, fmt);
prt_vprintf(&out, fmt, args);
va_end(args);
if (write == WRITE) {
- bch2_print_string_as_lines(KERN_ERR, out.buf);
- ret = c->opts.errors == BCH_ON_ERROR_continue
- ? 0
- : -BCH_ERR_fsck_errors_not_fixed;
- goto out;
+ prt_str(&out, ", ");
+ ret = __bch2_inconsistent_error(c, &out)
+ ? -BCH_ERR_fsck_errors_not_fixed
+ : 0;
+ silent = false;
}
- if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
- ret = -BCH_ERR_btree_node_read_err_fixable;
- if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
- ret = -BCH_ERR_btree_node_read_err_bad_node;
-
- if (!silent && ret != -BCH_ERR_btree_node_read_err_fixable)
- bch2_sb_error_count(c, err_type);
-
switch (ret) {
case -BCH_ERR_btree_node_read_err_fixable:
ret = !silent
@@ -585,25 +590,21 @@ static int __btree_err(int ret,
ret != -BCH_ERR_fsck_ignore)
goto fsck_err;
ret = -BCH_ERR_fsck_fix;
- break;
- case -BCH_ERR_btree_node_read_err_want_retry:
- case -BCH_ERR_btree_node_read_err_must_retry:
- if (!silent)
- bch2_print_string_as_lines(KERN_ERR, out.buf);
- break;
+ goto out;
case -BCH_ERR_btree_node_read_err_bad_node:
- if (!silent)
- bch2_print_string_as_lines(KERN_ERR, out.buf);
- ret = bch2_topology_error(c);
+ prt_str(&out, ", ");
+ ret = __bch2_topology_error(c, &out);
+ if (ret)
+ silent = false;
break;
case -BCH_ERR_btree_node_read_err_incompatible:
- if (!silent)
- bch2_print_string_as_lines(KERN_ERR, out.buf);
ret = -BCH_ERR_fsck_errors_not_fixed;
+ silent = false;
break;
- default:
- BUG();
}
+
+ if (!silent)
+ bch2_print_string_as_lines(KERN_ERR, out.buf);
out:
fsck_err:
printbuf_exit(&out);
@@ -817,7 +818,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
-BCH_ERR_btree_node_read_err_bad_node,
c, ca, b, i, NULL,
btree_node_bad_format,
- "invalid bkey format: %s\n %s", buf1.buf,
+ "invalid bkey format: %s\n%s", buf1.buf,
(printbuf_reset(&buf2),
bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
printbuf_reset(&buf1);
@@ -1352,7 +1353,7 @@ start:
"btree read error %s for %s",
bch2_blk_status_to_str(bio->bi_status), buf.buf);
if (rb->have_ioref)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
rb->have_ioref = false;
bch2_mark_io_failure(&failed, &rb->pick, false);
@@ -1608,6 +1609,7 @@ static void btree_node_read_all_replicas_endio(struct bio *bio)
struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
bch2_latency_acct(ca, rb->start_time, READ);
+ percpu_ref_put(&ca->io_ref[READ]);
}
ra->err[rb->idx] = bio->bi_status;
@@ -1907,7 +1909,8 @@ static void btree_node_scrub_work(struct work_struct *work)
scrub->key.k->k.p, 0, scrub->level - 1, 0);
struct btree *b;
- int ret = lockrestart_do(trans, PTR_ERR_OR_ZERO(b = bch2_btree_iter_peek_node(&iter)));
+ int ret = lockrestart_do(trans,
+ PTR_ERR_OR_ZERO(b = bch2_btree_iter_peek_node(trans, &iter)));
if (ret)
goto err;
@@ -1926,7 +1929,7 @@ err:
printbuf_exit(&err);
bch2_bkey_buf_exit(&scrub->key, c);;
btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf);
- percpu_ref_put(&scrub->ca->io_ref);
+ percpu_ref_put(&scrub->ca->io_ref[READ]);
kfree(scrub);
bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub);
}
@@ -1995,7 +1998,7 @@ int bch2_btree_node_scrub(struct btree_trans *trans,
return 0;
err_free:
btree_bounce_free(c, c->opts.btree_node_size, used_mempool, buf);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
err:
bch2_write_ref_put(c, BCH_WRITE_REF_btree_node_scrub);
return ret;
@@ -2143,6 +2146,7 @@ static void btree_node_write_endio(struct bio *bio)
if (ca && bio->bi_status) {
struct printbuf buf = PRINTBUF;
+ buf.atomic++;
prt_printf(&buf, "btree write error: %s\n ",
bch2_blk_status_to_str(bio->bi_status));
bch2_btree_pos_to_text(&buf, c, b);
@@ -2157,8 +2161,12 @@ static void btree_node_write_endio(struct bio *bio)
spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
}
+ /*
+ * XXX: we should be using io_ref[WRITE], but we aren't retrying failed
+ * btree writes yet (due to device removal/ro):
+ */
if (wbio->have_ioref)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
if (parent) {
bio_put(bio);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 7542c6f9c88e..e34e9598ef25 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -244,10 +244,8 @@ void bch2_trans_verify_paths(struct btree_trans *trans)
bch2_btree_path_verify(trans, path);
}
-static void bch2_btree_iter_verify(struct btree_iter *iter)
+static void bch2_btree_iter_verify(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
-
BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
@@ -276,9 +274,9 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
bkey_gt(iter->pos, iter->k.p)));
}
-static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
+static int bch2_btree_iter_verify_ret(struct btree_trans *trans,
+ struct btree_iter *iter, struct bkey_s_c k)
{
- struct btree_trans *trans = iter->trans;
struct btree_iter copy;
struct bkey_s_c prev;
int ret = 0;
@@ -299,7 +297,7 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
bch2_trans_iter_init(trans, &copy, iter->btree_id, iter->pos,
BTREE_ITER_nopreserve|
BTREE_ITER_all_snapshots);
- prev = bch2_btree_iter_prev(&copy);
+ prev = bch2_btree_iter_prev(trans, &copy);
if (!prev.k)
goto out;
@@ -365,9 +363,11 @@ static inline void bch2_btree_path_verify_level(struct btree_trans *trans,
struct btree_path *path, unsigned l) {}
static inline void bch2_btree_path_verify(struct btree_trans *trans,
struct btree_path *path) {}
-static inline void bch2_btree_iter_verify(struct btree_iter *iter) {}
+static inline void bch2_btree_iter_verify(struct btree_trans *trans,
+ struct btree_iter *iter) {}
static inline void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter) {}
-static inline int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k) { return 0; }
+static inline int bch2_btree_iter_verify_ret(struct btree_trans *trans, struct btree_iter *iter,
+ struct bkey_s_c k) { return 0; }
#endif
@@ -1487,22 +1487,14 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
for (struct jset_entry *e = trans->journal_entries;
e != btree_trans_journal_entries_top(trans);
- e = vstruct_next(e))
+ e = vstruct_next(e)) {
bch2_journal_entry_to_text(buf, trans->c, e);
+ prt_newline(buf);
+ }
printbuf_indent_sub(buf, 2);
}
-noinline __cold
-void bch2_dump_trans_updates(struct btree_trans *trans)
-{
- struct printbuf buf = PRINTBUF;
-
- bch2_trans_updates_to_text(&buf, trans);
- bch2_print_str(trans->c, buf.buf);
- printbuf_exit(&buf);
-}
-
static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
{
struct btree_path *path = trans->paths + path_idx;
@@ -1863,10 +1855,8 @@ hole:
return (struct bkey_s_c) { u, NULL };
}
-void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
+void bch2_set_btree_iter_dontneed(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
-
if (!iter->path || trans->restarted)
return;
@@ -1878,17 +1868,14 @@ void bch2_set_btree_iter_dontneed(struct btree_iter *iter)
/* Btree iterators: */
int __must_check
-__bch2_btree_iter_traverse(struct btree_iter *iter)
+__bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
{
- return bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
+ return bch2_btree_path_traverse(trans, iter->path, iter->flags);
}
int __must_check
-bch2_btree_iter_traverse(struct btree_iter *iter)
+bch2_btree_iter_traverse(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
- int ret;
-
bch2_trans_verify_not_unlocked_or_in_restart(trans);
iter->path = bch2_btree_path_set_pos(trans, iter->path,
@@ -1896,7 +1883,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
iter->flags & BTREE_ITER_intent,
btree_iter_ip_allocated(iter));
- ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
+ int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
return ret;
@@ -1908,14 +1895,14 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
/* Iterate across nodes (leaf and interior nodes) */
-struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
+struct btree *bch2_btree_iter_peek_node(struct btree_trans *trans,
+ struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
struct btree *b = NULL;
int ret;
EBUG_ON(trans->paths[iter->path].cached);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
@@ -1937,7 +1924,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out:
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return b;
err:
@@ -1946,26 +1933,26 @@ err:
}
/* Only kept for -tools */
-struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
+struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *trans,
+ struct btree_iter *iter)
{
struct btree *b;
- while (b = bch2_btree_iter_peek_node(iter),
+ while (b = bch2_btree_iter_peek_node(trans, iter),
bch2_err_matches(PTR_ERR_OR_ZERO(b), BCH_ERR_transaction_restart))
- bch2_trans_begin(iter->trans);
+ bch2_trans_begin(trans);
return b;
}
-struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
+struct btree *bch2_btree_iter_next_node(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
struct btree *b = NULL;
int ret;
EBUG_ON(trans->paths[iter->path].cached);
bch2_trans_verify_not_unlocked_or_in_restart(trans);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret)
@@ -2032,7 +2019,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
out:
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return b;
err:
@@ -2042,7 +2029,7 @@ err:
/* Iterate across keys (in leaf nodes only) */
-inline bool bch2_btree_iter_advance(struct btree_iter *iter)
+inline bool bch2_btree_iter_advance(struct btree_trans *trans, struct btree_iter *iter)
{
struct bpos pos = iter->k.p;
bool ret = !(iter->flags & BTREE_ITER_all_snapshots
@@ -2051,11 +2038,11 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
if (ret && !(iter->flags & BTREE_ITER_is_extents))
pos = bkey_successor(iter, pos);
- bch2_btree_iter_set_pos(iter, pos);
+ bch2_btree_iter_set_pos(trans, iter, pos);
return ret;
}
-inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
+inline bool bch2_btree_iter_rewind(struct btree_trans *trans, struct btree_iter *iter)
{
struct bpos pos = bkey_start_pos(&iter->k);
bool ret = !(iter->flags & BTREE_ITER_all_snapshots
@@ -2064,7 +2051,7 @@ inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
if (ret && !(iter->flags & BTREE_ITER_is_extents))
pos = bkey_predecessor(iter, pos);
- bch2_btree_iter_set_pos(iter, pos);
+ bch2_btree_iter_set_pos(trans, iter, pos);
return ret;
}
@@ -2191,9 +2178,9 @@ void btree_trans_peek_prev_journal(struct btree_trans *trans,
* bkey_s_c_null:
*/
static noinline
-struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos pos)
+struct bkey_s_c btree_trans_peek_key_cache(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos pos)
{
- struct btree_trans *trans = iter->trans;
struct bch_fs *c = trans->c;
struct bkey u;
struct bkey_s_c k;
@@ -2239,14 +2226,14 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
return k;
}
-static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
+static struct bkey_s_c __bch2_btree_iter_peek(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos search_key)
{
- struct btree_trans *trans = iter->trans;
struct bkey_s_c k, k2;
int ret;
EBUG_ON(btree_iter_path(trans, iter)->cached);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
@@ -2256,7 +2243,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
/* ensure that iter->k is consistent with iter->pos: */
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
k = bkey_s_c_err(ret);
break;
}
@@ -2266,7 +2253,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
if (unlikely(!l->b)) {
/* No btree nodes at requested level: */
- bch2_btree_iter_set_pos(iter, SPOS_MAX);
+ bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
k = bkey_s_c_null;
break;
}
@@ -2277,10 +2264,10 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
k.k &&
- (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
+ (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) {
k = k2;
if (bkey_err(k)) {
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
break;
}
}
@@ -2313,27 +2300,28 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
search_key = bpos_successor(l->b->key.k.p);
} else {
/* End of btree: */
- bch2_btree_iter_set_pos(iter, SPOS_MAX);
+ bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
k = bkey_s_c_null;
break;
}
}
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return k;
}
/**
* bch2_btree_iter_peek_max() - returns first key greater than or equal to
* iterator's current position
+ * @trans: btree transaction object
* @iter: iterator to peek from
* @end: search limit: returns keys less than or equal to @end
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos end)
+struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end)
{
- struct btree_trans *trans = iter->trans;
struct bpos search_key = btree_iter_search_key(iter);
struct bkey_s_c k;
struct bpos iter_pos = iter->pos;
@@ -2356,7 +2344,7 @@ struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *iter, struct bpos en
}
while (1) {
- k = __bch2_btree_iter_peek(iter, search_key);
+ k = __bch2_btree_iter_peek(trans, iter, search_key);
if (unlikely(!k.k))
goto end;
if (unlikely(bkey_err(k)))
@@ -2470,9 +2458,9 @@ out_no_locked:
if (!(iter->flags & BTREE_ITER_all_snapshots))
iter->pos.snapshot = iter->snapshot;
- ret = bch2_btree_iter_verify_ret(iter, k);
+ ret = bch2_btree_iter_verify_ret(trans, iter, k);
if (unlikely(ret)) {
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
k = bkey_s_c_err(ret);
}
@@ -2480,7 +2468,7 @@ out_no_locked:
return k;
end:
- bch2_btree_iter_set_pos(iter, end);
+ bch2_btree_iter_set_pos(trans, iter, end);
k = bkey_s_c_null;
goto out_no_locked;
}
@@ -2488,24 +2476,25 @@ end:
/**
* bch2_btree_iter_next() - returns first key greater than iterator's current
* position
+ * @trans: btree transaction object
* @iter: iterator to peek from
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_next(struct btree_trans *trans, struct btree_iter *iter)
{
- if (!bch2_btree_iter_advance(iter))
+ if (!bch2_btree_iter_advance(trans, iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek(iter);
+ return bch2_btree_iter_peek(trans, iter);
}
-static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, struct bpos search_key)
+static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos search_key)
{
- struct btree_trans *trans = iter->trans;
struct bkey_s_c k, k2;
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
@@ -2515,7 +2504,7 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, stru
int ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
/* ensure that iter->k is consistent with iter->pos: */
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
k = bkey_s_c_err(ret);
break;
}
@@ -2525,7 +2514,7 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, stru
if (unlikely(!l->b)) {
/* No btree nodes at requested level: */
- bch2_btree_iter_set_pos(iter, SPOS_MAX);
+ bch2_btree_iter_set_pos(trans, iter, SPOS_MAX);
k = bkey_s_c_null;
break;
}
@@ -2541,10 +2530,10 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, stru
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
k.k &&
- (k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
+ (k2 = btree_trans_peek_key_cache(trans, iter, k.k->p)).k) {
k = k2;
if (bkey_err(k2)) {
- bch2_btree_iter_set_pos(iter, iter->pos);
+ bch2_btree_iter_set_pos(trans, iter, iter->pos);
break;
}
}
@@ -2565,25 +2554,27 @@ static struct bkey_s_c __bch2_btree_iter_peek_prev(struct btree_iter *iter, stru
search_key = bpos_predecessor(path->l[0].b->data->min_key);
} else {
/* Start of btree: */
- bch2_btree_iter_set_pos(iter, POS_MIN);
+ bch2_btree_iter_set_pos(trans, iter, POS_MIN);
k = bkey_s_c_null;
break;
}
}
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return k;
}
/**
* bch2_btree_iter_peek_prev_min() - returns first key less than or equal to
* iterator's current position
+ * @trans: btree transaction object
* @iter: iterator to peek from
* @end: search limit: returns keys greater than or equal to @end
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bpos end)
+struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end)
{
if ((iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots)) &&
!bkey_eq(iter->pos, POS_MAX)) {
@@ -2595,7 +2586,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bp
* real visible extents - easiest to just use peek_slot() (which
* internally uses peek() for extents)
*/
- struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
if (bkey_err(k))
return k;
@@ -2605,7 +2596,6 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bp
return k;
}
- struct btree_trans *trans = iter->trans;
struct bpos search_key = iter->pos;
struct bkey_s_c k;
btree_path_idx_t saved_path = 0;
@@ -2621,7 +2611,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *iter, struct bp
}
while (1) {
- k = __bch2_btree_iter_peek_prev(iter, search_key);
+ k = __bch2_btree_iter_peek_prev(trans, iter, search_key);
if (unlikely(!k.k))
goto end;
if (unlikely(bkey_err(k)))
@@ -2712,10 +2702,10 @@ out_no_locked:
bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
return k;
end:
- bch2_btree_iter_set_pos(iter, end);
+ bch2_btree_iter_set_pos(trans, iter, end);
k = bkey_s_c_null;
goto out_no_locked;
}
@@ -2723,27 +2713,27 @@ end:
/**
* bch2_btree_iter_prev() - returns first key less than iterator's current
* position
+ * @trans: btree transaction object
* @iter: iterator to peek from
*
* Returns: key if found, or an error extractable with bkey_err().
*/
-struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *trans, struct btree_iter *iter)
{
- if (!bch2_btree_iter_rewind(iter))
+ if (!bch2_btree_iter_rewind(trans, iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek_prev(iter);
+ return bch2_btree_iter_peek_prev(trans, iter);
}
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *trans, struct btree_iter *iter)
{
- struct btree_trans *trans = iter->trans;
struct bpos search_key;
struct bkey_s_c k;
int ret;
bch2_trans_verify_not_unlocked_or_in_restart(trans);
- bch2_btree_iter_verify(iter);
+ bch2_btree_iter_verify(trans, iter);
bch2_btree_iter_verify_entry_exit(iter);
EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
@@ -2759,7 +2749,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
if (iter->pos.inode == KEY_INODE_MAX)
return bkey_s_c_null;
- bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
+ bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos));
}
search_key = btree_iter_search_key(iter);
@@ -2793,7 +2783,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
goto out;
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
- (k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
+ (k = btree_trans_peek_key_cache(trans, iter, iter->pos)).k) {
if (!bkey_err(k))
iter->k = *k.k;
/* We're not returning a key from iter->path: */
@@ -2820,8 +2810,8 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
if (iter->flags & BTREE_ITER_intent) {
struct btree_iter iter2;
- bch2_trans_copy_iter(&iter2, iter);
- k = bch2_btree_iter_peek_max(&iter2, end);
+ bch2_trans_copy_iter(trans, &iter2, iter);
+ k = bch2_btree_iter_peek_max(trans, &iter2, end);
if (k.k && !bkey_err(k)) {
swap(iter->key_cache_path, iter2.key_cache_path);
@@ -2832,9 +2822,9 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
} else {
struct bpos pos = iter->pos;
- k = bch2_btree_iter_peek_max(iter, end);
+ k = bch2_btree_iter_peek_max(trans, iter, end);
if (unlikely(bkey_err(k)))
- bch2_btree_iter_set_pos(iter, pos);
+ bch2_btree_iter_set_pos(trans, iter, pos);
else
iter->pos = pos;
}
@@ -2865,39 +2855,39 @@ out:
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
out_no_locked:
bch2_btree_iter_verify_entry_exit(iter);
- bch2_btree_iter_verify(iter);
- ret = bch2_btree_iter_verify_ret(iter, k);
+ bch2_btree_iter_verify(trans, iter);
+ ret = bch2_btree_iter_verify_ret(trans, iter, k);
if (unlikely(ret))
return bkey_s_c_err(ret);
return k;
}
-struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *trans, struct btree_iter *iter)
{
- if (!bch2_btree_iter_advance(iter))
+ if (!bch2_btree_iter_advance(trans, iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek_slot(iter);
+ return bch2_btree_iter_peek_slot(trans, iter);
}
-struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *trans, struct btree_iter *iter)
{
- if (!bch2_btree_iter_rewind(iter))
+ if (!bch2_btree_iter_rewind(trans, iter))
return bkey_s_c_null;
- return bch2_btree_iter_peek_slot(iter);
+ return bch2_btree_iter_peek_slot(trans, iter);
}
/* Obsolete, but still used by rust wrapper in -tools */
-struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
+struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *trans, struct btree_iter *iter)
{
struct bkey_s_c k;
- while (btree_trans_too_many_iters(iter->trans) ||
- (k = bch2_btree_iter_peek_type(iter, iter->flags),
+ while (btree_trans_too_many_iters(trans) ||
+ (k = bch2_btree_iter_peek_type(trans, iter, iter->flags),
bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
- bch2_trans_begin(iter->trans);
+ bch2_trans_begin(trans);
return k;
}
@@ -3043,7 +3033,6 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
iter->path = 0;
iter->update_path = 0;
iter->key_cache_path = 0;
- iter->trans = NULL;
}
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
@@ -3083,10 +3072,9 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
BUG_ON(iter->min_depth != depth);
}
-void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
+void bch2_trans_copy_iter(struct btree_trans *trans,
+ struct btree_iter *dst, struct btree_iter *src)
{
- struct btree_trans *trans = src->trans;
-
*dst = *src;
#ifdef TRACK_PATH_ALLOCATED
dst->ip_allocated = _RET_IP_;
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index 8823eec6b284..9d2cccf5d21a 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -9,7 +9,6 @@
void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
-void bch2_dump_trans_updates(struct btree_trans *);
void bch2_dump_trans_paths_updates(struct btree_trans *);
static inline int __bkey_err(const struct bkey *k)
@@ -394,36 +393,37 @@ void bch2_trans_node_add(struct btree_trans *trans, struct btree_path *, struct
void bch2_trans_node_drop(struct btree_trans *trans, struct btree *);
void bch2_trans_node_reinit_iter(struct btree_trans *, struct btree *);
-int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter);
-int __must_check bch2_btree_iter_traverse(struct btree_iter *);
+int __must_check __bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *);
+int __must_check bch2_btree_iter_traverse(struct btree_trans *, struct btree_iter *);
-struct btree *bch2_btree_iter_peek_node(struct btree_iter *);
-struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *);
-struct btree *bch2_btree_iter_next_node(struct btree_iter *);
+struct btree *bch2_btree_iter_peek_node(struct btree_trans *, struct btree_iter *);
+struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_trans *, struct btree_iter *);
+struct btree *bch2_btree_iter_next_node(struct btree_trans *, struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_peek_max(struct btree_iter *, struct bpos);
-struct bkey_s_c bch2_btree_iter_next(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_max(struct btree_trans *, struct btree_iter *, struct bpos);
+struct bkey_s_c bch2_btree_iter_next(struct btree_trans *, struct btree_iter *);
-static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
+static inline struct bkey_s_c bch2_btree_iter_peek(struct btree_trans *trans,
+ struct btree_iter *iter)
{
- return bch2_btree_iter_peek_max(iter, SPOS_MAX);
+ return bch2_btree_iter_peek_max(trans, iter, SPOS_MAX);
}
-struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_iter *, struct bpos);
+struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *, struct btree_iter *, struct bpos);
-static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
+static inline struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_trans *trans, struct btree_iter *iter)
{
- return bch2_btree_iter_peek_prev_min(iter, POS_MIN);
+ return bch2_btree_iter_peek_prev_min(trans, iter, POS_MIN);
}
-struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_prev(struct btree_trans *, struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *);
-struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_trans *, struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_next_slot(struct btree_trans *, struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_trans *, struct btree_iter *);
-bool bch2_btree_iter_advance(struct btree_iter *);
-bool bch2_btree_iter_rewind(struct btree_iter *);
+bool bch2_btree_iter_advance(struct btree_trans *, struct btree_iter *);
+bool bch2_btree_iter_rewind(struct btree_trans *, struct btree_iter *);
static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
{
@@ -434,10 +434,9 @@ static inline void __bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpo
iter->k.size = 0;
}
-static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
+static inline void bch2_btree_iter_set_pos(struct btree_trans *trans,
+ struct btree_iter *iter, struct bpos new_pos)
{
- struct btree_trans *trans = iter->trans;
-
if (unlikely(iter->update_path))
bch2_path_put(trans, iter->update_path,
iter->flags & BTREE_ITER_intent);
@@ -455,13 +454,14 @@ static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *it
iter->pos = bkey_start_pos(&iter->k);
}
-static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 snapshot)
+static inline void bch2_btree_iter_set_snapshot(struct btree_trans *trans,
+ struct btree_iter *iter, u32 snapshot)
{
struct bpos pos = iter->pos;
iter->snapshot = snapshot;
pos.snapshot = snapshot;
- bch2_btree_iter_set_pos(iter, pos);
+ bch2_btree_iter_set_pos(trans, iter, pos);
}
void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
@@ -503,7 +503,6 @@ static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
unsigned flags,
unsigned long ip)
{
- iter->trans = trans;
iter->update_path = 0;
iter->key_cache_path = 0;
iter->btree_id = btree_id;
@@ -540,9 +539,9 @@ static inline void bch2_trans_iter_init(struct btree_trans *trans,
void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
enum btree_id, struct bpos,
unsigned, unsigned, unsigned);
-void bch2_trans_copy_iter(struct btree_iter *, struct btree_iter *);
+void bch2_trans_copy_iter(struct btree_trans *, struct btree_iter *, struct btree_iter *);
-void bch2_set_btree_iter_dontneed(struct btree_iter *);
+void bch2_set_btree_iter_dontneed(struct btree_trans *, struct btree_iter *);
void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
@@ -589,7 +588,7 @@ static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
struct bkey_s_c k;
bch2_trans_iter_init(trans, iter, btree_id, pos, flags);
- k = bch2_btree_iter_peek_slot(iter);
+ k = bch2_btree_iter_peek_slot(trans, iter);
if (!bkey_err(k) && type && k.k->type != type)
k = bkey_s_c_err(-BCH_ERR_ENOENT_bkey_type_mismatch);
@@ -659,14 +658,14 @@ u32 bch2_trans_begin(struct btree_trans *);
int _ret3 = 0; \
do { \
_ret3 = lockrestart_do((_trans), ({ \
- struct btree *_b = bch2_btree_iter_peek_node(&_iter); \
+ struct btree *_b = bch2_btree_iter_peek_node(_trans, &_iter);\
if (!_b) \
break; \
\
PTR_ERR_OR_ZERO(_b) ?: (_do); \
})) ?: \
lockrestart_do((_trans), \
- PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter))); \
+ PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(_trans, &_iter)));\
} while (!_ret3); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
@@ -678,31 +677,34 @@ u32 bch2_trans_begin(struct btree_trans *);
__for_each_btree_node(_trans, _iter, _btree_id, _start, \
0, 0, _flags, _b, _do)
-static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
+static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_trans *trans,
+ struct btree_iter *iter,
unsigned flags)
{
- return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
- bch2_btree_iter_peek_prev(iter);
+ return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(trans, iter) :
+ bch2_btree_iter_peek_prev(trans, iter);
}
-static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
+static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_trans *trans,
+ struct btree_iter *iter,
unsigned flags)
{
- return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
- bch2_btree_iter_peek(iter);
+ return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(trans, iter) :
+ bch2_btree_iter_peek(trans, iter);
}
-static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_iter *iter,
- struct bpos end,
- unsigned flags)
+static inline struct bkey_s_c bch2_btree_iter_peek_max_type(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bpos end,
+ unsigned flags)
{
if (!(flags & BTREE_ITER_slots))
- return bch2_btree_iter_peek_max(iter, end);
+ return bch2_btree_iter_peek_max(trans, iter, end);
if (bkey_gt(iter->pos, end))
return bkey_s_c_null;
- return bch2_btree_iter_peek_slot(iter);
+ return bch2_btree_iter_peek_slot(trans, iter);
}
int __bch2_btree_trans_too_many_iters(struct btree_trans *);
@@ -769,14 +771,14 @@ transaction_restart: \
\
do { \
_ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_max_type(&(_iter), \
+ (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), \
_end, (_flags)); \
if (!(_k).k) \
break; \
\
bkey_err(_k) ?: (_do); \
})); \
- } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
+ } while (!_ret3 && bch2_btree_iter_advance(_trans, &(_iter))); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret3; \
@@ -814,14 +816,14 @@ transaction_restart: \
\
do { \
_ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_prev_type(&(_iter), \
+ (_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter), \
(_flags)); \
if (!(_k).k) \
break; \
\
bkey_err(_k) ?: (_do); \
})); \
- } while (!_ret3 && bch2_btree_iter_rewind(&(_iter))); \
+ } while (!_ret3 && bch2_btree_iter_rewind(_trans, &(_iter))); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret3; \
@@ -851,37 +853,38 @@ transaction_restart: \
(_do) ?: bch2_trans_commit(_trans, (_disk_res),\
(_journal_seq), (_commit_flags)))
-struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
+struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *,
+ struct btree_iter *);
#define for_each_btree_key_max_norestart(_trans, _iter, _btree_id, \
_start, _end, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
(_start), (_flags)); \
- (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags),\
+ (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags),\
!((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_advance(&(_iter)))
+ bch2_btree_iter_advance(_trans, &(_iter)))
-#define for_each_btree_key_max_continue_norestart(_iter, _end, _flags, _k, _ret)\
+#define for_each_btree_key_max_continue_norestart(_trans, _iter, _end, _flags, _k, _ret)\
for (; \
- (_k) = bch2_btree_iter_peek_max_type(&(_iter), _end, _flags), \
+ (_k) = bch2_btree_iter_peek_max_type(_trans, &(_iter), _end, _flags), \
!((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_advance(&(_iter)))
+ bch2_btree_iter_advance(_trans, &(_iter)))
#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \
for_each_btree_key_max_norestart(_trans, _iter, _btree_id, _start,\
SPOS_MAX, _flags, _k, _ret)
-#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \
- _start, _flags, _k, _ret) \
- for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
- (_start), (_flags)); \
- (_k) = bch2_btree_iter_peek_prev_type(&(_iter), _flags), \
- !((_ret) = bkey_err(_k)) && (_k).k; \
- bch2_btree_iter_rewind(&(_iter)))
+#define for_each_btree_key_reverse_norestart(_trans, _iter, _btree_id, \
+ _start, _flags, _k, _ret) \
+ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
+ (_start), (_flags)); \
+ (_k) = bch2_btree_iter_peek_prev_type(_trans, &(_iter), _flags), \
+ !((_ret) = bkey_err(_k)) && (_k).k; \
+ bch2_btree_iter_rewind(_trans, &(_iter)))
-#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
- for_each_btree_key_max_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
+#define for_each_btree_key_continue_norestart(_trans, _iter, _flags, _k, _ret) \
+ for_each_btree_key_max_continue_norestart(_trans, _iter, SPOS_MAX, _flags, _k, _ret)
/*
* This should not be used in a fastpath, without first trying _do in
diff --git a/fs/bcachefs/btree_journal_iter.c b/fs/bcachefs/btree_journal_iter.c
index 6d25e3f85ce8..7d6c971db23c 100644
--- a/fs/bcachefs/btree_journal_iter.c
+++ b/fs/bcachefs/btree_journal_iter.c
@@ -687,7 +687,8 @@ void bch2_journal_keys_put(struct bch_fs *c)
static void __journal_keys_sort(struct journal_keys *keys)
{
- sort(keys->data, keys->nr, sizeof(keys->data[0]), journal_sort_key_cmp, NULL);
+ sort_nonatomic(keys->data, keys->nr, sizeof(keys->data[0]),
+ journal_sort_key_cmp, NULL);
cond_resched();
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index edce59433375..2b186584a291 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -287,6 +287,19 @@ err:
return ret;
}
+static noinline_for_stack void do_trace_key_cache_fill(struct btree_trans *trans,
+ struct btree_path *ck_path,
+ struct bkey_s_c k)
+{
+ struct printbuf buf = PRINTBUF;
+
+ bch2_bpos_to_text(&buf, ck_path->pos);
+ prt_char(&buf, ' ');
+ bch2_bkey_val_to_text(&buf, trans->c, k);
+ trace_key_cache_fill(trans, buf.buf);
+ printbuf_exit(&buf);
+}
+
static noinline int btree_key_cache_fill(struct btree_trans *trans,
struct btree_path *ck_path,
unsigned flags)
@@ -306,7 +319,7 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
BTREE_ITER_key_cache_fill|
BTREE_ITER_cached_nofill);
iter.flags &= ~BTREE_ITER_with_journal;
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -320,18 +333,11 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
if (ret)
goto err;
- if (trace_key_cache_fill_enabled()) {
- struct printbuf buf = PRINTBUF;
-
- bch2_bpos_to_text(&buf, ck_path->pos);
- prt_char(&buf, ' ');
- bch2_bkey_val_to_text(&buf, trans->c, k);
- trace_key_cache_fill(trans, buf.buf);
- printbuf_exit(&buf);
- }
+ if (trace_key_cache_fill_enabled())
+ do_trace_key_cache_fill(trans, ck_path, k);
out:
/* We're not likely to need this iterator again: */
- bch2_set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(trans, &iter);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -412,7 +418,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
BTREE_ITER_intent);
b_iter.flags &= ~BTREE_ITER_with_key_cache;
- ret = bch2_btree_iter_traverse(&c_iter);
+ ret = bch2_btree_iter_traverse(trans, &c_iter);
if (ret)
goto out;
@@ -444,7 +450,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
!test_bit(JOURNAL_space_low, &c->journal.flags))
commit_flags |= BCH_TRANS_COMMIT_no_journal_res;
- struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(&b_iter);
+ struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(trans, &b_iter);
ret = bkey_err(btree_k);
if (ret)
goto err;
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index 678161321e42..86acf037590c 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -13,6 +13,7 @@
#include <linux/kthread.h>
#include <linux/min_heap.h>
+#include <linux/sched/sysctl.h>
#include <linux/sort.h>
struct find_btree_nodes_worker {
@@ -182,7 +183,7 @@ static void try_read_btree_node(struct find_btree_nodes *f, struct bch_dev *ca,
return;
if (bch2_csum_type_is_encryption(BSET_CSUM_TYPE(&bn->keys))) {
- if (!c->chacha20)
+ if (!c->chacha20_key_set)
return;
struct nonce nonce = btree_nonce(&bn->keys, 0);
@@ -270,7 +271,7 @@ static int read_btree_nodes_worker(void *p)
err:
bio_put(bio);
free_page((unsigned long) buf);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
closure_put(w->cl);
kfree(w);
return 0;
@@ -290,7 +291,7 @@ static int read_btree_nodes(struct find_btree_nodes *f)
struct find_btree_nodes_worker *w = kmalloc(sizeof(*w), GFP_KERNEL);
if (!w) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
ret = -ENOMEM;
goto err;
}
@@ -302,18 +303,19 @@ static int read_btree_nodes(struct find_btree_nodes *f)
struct task_struct *t = kthread_create(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name);
ret = PTR_ERR_OR_ZERO(t);
if (ret) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
kfree(w);
bch_err_msg(c, ret, "starting kthread");
break;
}
closure_get(&cl);
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[READ]);
wake_up_process(t);
}
err:
- closure_sync(&cl);
+ while (closure_sync_timeout(&cl, sysctl_hung_task_timeout_secs * HZ / 2))
+ ;
return f->ret ?: ret;
}
@@ -396,7 +398,7 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c)
bch2_print_string_as_lines(KERN_INFO, buf.buf);
}
- sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_cookie, NULL);
+ sort_nonatomic(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_cookie, NULL);
dst = 0;
darray_for_each(f->nodes, i) {
@@ -416,7 +418,7 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c)
}
f->nodes.nr = dst;
- sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
+ sort_nonatomic(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
if (0 && c->opts.verbose) {
printbuf_reset(&buf);
@@ -577,10 +579,12 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree,
found_btree_node_to_key(&tmp.k, &n);
- struct printbuf buf = PRINTBUF;
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&tmp.k));
- bch_verbose(c, "%s(): recovering %s", __func__, buf.buf);
- printbuf_exit(&buf);
+ if (c->opts.verbose) {
+ struct printbuf buf = PRINTBUF;
+ bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&tmp.k));
+ bch_verbose(c, "%s(): recovering %s", __func__, buf.buf);
+ printbuf_exit(&buf);
+ }
BUG_ON(bch2_bkey_validate(c, bkey_i_to_s_c(&tmp.k),
(struct bkey_validate_context) {
diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h
index 77578da2d23f..023c472dc9ee 100644
--- a/fs/bcachefs/btree_types.h
+++ b/fs/bcachefs/btree_types.h
@@ -367,7 +367,6 @@ static inline unsigned long btree_path_ip_allocated(struct btree_path *path)
* @nodes_intent_locked - bitmask indicating which locks are intent locks
*/
struct btree_iter {
- struct btree_trans *trans;
btree_path_idx_t path;
btree_path_idx_t update_path;
btree_path_idx_t key_cache_path;
diff --git a/fs/bcachefs/btree_update.c b/fs/bcachefs/btree_update.c
index bd2eb42edb24..1e6b7836cc01 100644
--- a/fs/bcachefs/btree_update.c
+++ b/fs/bcachefs/btree_update.c
@@ -126,7 +126,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
struct bpos new_pos)
{
struct bch_fs *c = trans->c;
- struct btree_iter old_iter, new_iter = { NULL };
+ struct btree_iter old_iter, new_iter = {};
struct bkey_s_c old_k, new_k;
snapshot_id_list s;
struct bkey_i *update;
@@ -140,7 +140,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
bch2_trans_iter_init(trans, &old_iter, id, old_pos,
BTREE_ITER_not_extents|
BTREE_ITER_all_snapshots);
- while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
+ while ((old_k = bch2_btree_iter_prev(trans, &old_iter)).k &&
!(ret = bkey_err(old_k)) &&
bkey_eq(old_pos, old_k.k->p)) {
struct bpos whiteout_pos =
@@ -296,7 +296,7 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
BTREE_ITER_intent|
BTREE_ITER_with_updates|
BTREE_ITER_not_extents);
- k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX));
+ k = bch2_btree_iter_peek_max(trans, &iter, POS(insert->k.p.inode, U64_MAX));
if ((ret = bkey_err(k)))
goto err;
if (!k.k)
@@ -322,8 +322,8 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
if (done)
goto out;
next:
- bch2_btree_iter_advance(&iter);
- k = bch2_btree_iter_peek_max(&iter, POS(insert->k.p.inode, U64_MAX));
+ bch2_btree_iter_advance(trans, &iter);
+ k = bch2_btree_iter_peek_max(trans, &iter, POS(insert->k.p.inode, U64_MAX));
if ((ret = bkey_err(k)))
goto err;
if (!k.k)
@@ -592,13 +592,13 @@ int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
enum btree_id btree, struct bpos end)
{
bch2_trans_iter_init(trans, iter, btree, end, BTREE_ITER_intent);
- struct bkey_s_c k = bch2_btree_iter_peek_prev(iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_prev(trans, iter);
int ret = bkey_err(k);
if (ret)
goto err;
- bch2_btree_iter_advance(iter);
- k = bch2_btree_iter_peek_slot(iter);
+ bch2_btree_iter_advance(trans, iter);
+ k = bch2_btree_iter_peek_slot(trans, iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -634,7 +634,7 @@ int bch2_btree_insert_nonextent(struct btree_trans *trans,
BTREE_ITER_cached|
BTREE_ITER_not_extents|
BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, k, flags);
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -646,7 +646,7 @@ int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
struct btree_iter iter;
bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
BTREE_ITER_intent|flags);
- int ret = bch2_btree_iter_traverse(&iter) ?:
+ int ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, k, flags);
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -695,7 +695,7 @@ int bch2_btree_delete(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter, btree, pos,
BTREE_ITER_cached|
BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_delete_at(trans, &iter, update_flags);
bch2_trans_iter_exit(trans, &iter);
@@ -713,7 +713,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
int ret = 0;
bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent);
- while ((k = bch2_btree_iter_peek_max(&iter, end)).k) {
+ while ((k = bch2_btree_iter_peek_max(trans, &iter, end)).k) {
struct disk_reservation disk_res =
bch2_disk_reservation_init(trans->c, 0);
struct bkey_i delete;
@@ -808,7 +808,7 @@ int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
struct btree_iter iter;
bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(&iter) ?:
+ int ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_bit_mod_iter(trans, &iter, set);
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -846,6 +846,19 @@ int bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf)
return 0;
}
+int bch2_trans_log_bkey(struct btree_trans *trans, enum btree_id btree,
+ unsigned level, struct bkey_i *k)
+{
+ struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(k->k.u64s));
+ int ret = PTR_ERR_OR_ZERO(e);
+ if (ret)
+ return ret;
+
+ journal_entry_init(e, BCH_JSET_ENTRY_log_bkey, btree, level, k->k.u64s);
+ bkey_copy(e->start, k);
+ return 0;
+}
+
__printf(3, 0)
static int
__bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
diff --git a/fs/bcachefs/btree_update.h b/fs/bcachefs/btree_update.h
index d2e1c04353f6..568e56c91190 100644
--- a/fs/bcachefs/btree_update.h
+++ b/fs/bcachefs/btree_update.h
@@ -170,6 +170,8 @@ void bch2_trans_commit_hook(struct btree_trans *,
int __bch2_trans_commit(struct btree_trans *, unsigned);
int bch2_trans_log_msg(struct btree_trans *, struct printbuf *);
+int bch2_trans_log_bkey(struct btree_trans *, enum btree_id, unsigned, struct bkey_i *);
+
__printf(2, 3) int bch2_fs_log_msg(struct bch_fs *, const char *, ...);
__printf(2, 3) int bch2_journal_log_msg(struct bch_fs *, const char *, ...);
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 67f1e3202835..55fbeeb8eaaa 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -35,6 +35,8 @@ static const char * const bch2_btree_update_modes[] = {
NULL
};
+static void bch2_btree_update_to_text(struct printbuf *, struct btree_update *);
+
static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
btree_path_idx_t, struct btree *, struct keylist *);
static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
@@ -54,6 +56,8 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
struct bkey_buf prev;
int ret = 0;
+ printbuf_indent_add_nextline(&buf, 2);
+
BUG_ON(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
!bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
b->data->min_key));
@@ -64,19 +68,20 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
if (b == btree_node_root(c, b)) {
if (!bpos_eq(b->data->min_key, POS_MIN)) {
- printbuf_reset(&buf);
+ ret = __bch2_topology_error(c, &buf);
+
bch2_bpos_to_text(&buf, b->data->min_key);
log_fsck_err(trans, btree_root_bad_min_key,
"btree root with incorrect min_key: %s", buf.buf);
- goto topology_repair;
+ goto out;
}
if (!bpos_eq(b->data->max_key, SPOS_MAX)) {
- printbuf_reset(&buf);
+ ret = __bch2_topology_error(c, &buf);
bch2_bpos_to_text(&buf, b->data->max_key);
log_fsck_err(trans, btree_root_bad_max_key,
"btree root with incorrect max_key: %s", buf.buf);
- goto topology_repair;
+ goto out;
}
}
@@ -94,20 +99,19 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
: bpos_successor(prev.k->k.p);
if (!bpos_eq(expected_min, bp.v->min_key)) {
- bch2_topology_error(c);
+ ret = __bch2_topology_error(c, &buf);
- printbuf_reset(&buf);
- prt_str(&buf, "end of prev node doesn't match start of next node\n in ");
+ prt_str(&buf, "end of prev node doesn't match start of next node\nin ");
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
prt_str(&buf, " node ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
- prt_str(&buf, "\n prev ");
+ prt_str(&buf, "\nprev ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
- prt_str(&buf, "\n next ");
+ prt_str(&buf, "\nnext ");
bch2_bkey_val_to_text(&buf, c, k);
log_fsck_err(trans, btree_node_topology_bad_min_key, "%s", buf.buf);
- goto topology_repair;
+ goto out;
}
bch2_bkey_buf_reassemble(&prev, c, k);
@@ -115,29 +119,25 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
}
if (bkey_deleted(&prev.k->k)) {
- bch2_topology_error(c);
+ ret = __bch2_topology_error(c, &buf);
- printbuf_reset(&buf);
- prt_str(&buf, "empty interior node\n in ");
+ prt_str(&buf, "empty interior node\nin ");
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
prt_str(&buf, " node ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
log_fsck_err(trans, btree_node_topology_empty_interior_node, "%s", buf.buf);
- goto topology_repair;
} else if (!bpos_eq(prev.k->k.p, b->key.k.p)) {
- bch2_topology_error(c);
+ ret = __bch2_topology_error(c, &buf);
- printbuf_reset(&buf);
- prt_str(&buf, "last child node doesn't end at end of parent node\n in ");
+ prt_str(&buf, "last child node doesn't end at end of parent node\nin ");
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
prt_str(&buf, " node ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
- prt_str(&buf, "\n last key ");
+ prt_str(&buf, "\nlast key ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
log_fsck_err(trans, btree_node_topology_bad_max_key, "%s", buf.buf);
- goto topology_repair;
}
out:
fsck_err:
@@ -145,9 +145,6 @@ fsck_err:
bch2_bkey_buf_exit(&prev, c);
printbuf_exit(&buf);
return ret;
-topology_repair:
- ret = bch2_topology_error(c);
- goto out;
}
/* Calculate ideal packed bkey format for new btree nodes: */
@@ -1271,7 +1268,8 @@ err:
bch2_btree_update_free(as, trans);
if (!bch2_err_matches(ret, ENOSPC) &&
!bch2_err_matches(ret, EROFS) &&
- ret != -BCH_ERR_journal_reclaim_would_deadlock)
+ ret != -BCH_ERR_journal_reclaim_would_deadlock &&
+ ret != -BCH_ERR_journal_shutdown)
bch_err_fn_ratelimited(c, ret);
return ERR_PTR(ret);
}
@@ -1782,11 +1780,24 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
int ret;
lockdep_assert_held(&c->gc_lock);
- BUG_ON(!btree_node_intent_locked(path, b->c.level));
BUG_ON(!b->c.level);
BUG_ON(!as || as->b);
bch2_verify_keylist_sorted(keys);
+ if (!btree_node_intent_locked(path, b->c.level)) {
+ struct printbuf buf = PRINTBUF;
+ bch2_log_msg_start(c, &buf);
+ prt_printf(&buf, "%s(): node not locked at level %u\n",
+ __func__, b->c.level);
+ bch2_btree_update_to_text(&buf, as);
+ bch2_btree_path_to_text(&buf, trans, path_idx);
+
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
+ printbuf_exit(&buf);
+ bch2_fs_emergency_read_only(c);
+ return -EIO;
+ }
+
ret = bch2_btree_node_lock_write(trans, path, &b->c);
if (ret)
return ret;
@@ -2007,18 +2018,22 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
}
if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) {
- struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
-
- bch2_bpos_to_text(&buf1, prev->data->max_key);
- bch2_bpos_to_text(&buf2, next->data->min_key);
- bch_err(c,
- "%s(): btree topology error:\n"
- " prev ends at %s\n"
- " next starts at %s",
- __func__, buf1.buf, buf2.buf);
- printbuf_exit(&buf1);
- printbuf_exit(&buf2);
- ret = bch2_topology_error(c);
+ struct printbuf buf = PRINTBUF;
+
+ printbuf_indent_add_nextline(&buf, 2);
+ prt_printf(&buf, "%s(): ", __func__);
+ ret = __bch2_topology_error(c, &buf);
+ prt_newline(&buf);
+
+ prt_printf(&buf, "prev ends at ");
+ bch2_bpos_to_text(&buf, prev->data->max_key);
+ prt_newline(&buf);
+
+ prt_printf(&buf, "next starts at ");
+ bch2_bpos_to_text(&buf, next->data->min_key);
+
+ bch_err(c, "%s", buf.buf);
+ printbuf_exit(&buf);
goto err;
}
@@ -2132,7 +2147,7 @@ static int get_iter_to_node(struct btree_trans *trans, struct btree_iter *iter,
bch2_trans_node_iter_init(trans, iter, b->c.btree_id, b->key.k.p,
BTREE_MAX_DEPTH, b->c.level,
BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(iter);
+ int ret = bch2_btree_iter_traverse(trans, iter);
if (ret)
goto err;
@@ -2224,7 +2239,7 @@ static int bch2_btree_node_rewrite_key(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter,
btree, k->k.p,
BTREE_MAX_DEPTH, level, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto out;
@@ -2247,7 +2262,7 @@ int bch2_btree_node_rewrite_pos(struct btree_trans *trans,
/* Traverse one depth lower to get a pointer to the node itself: */
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, pos, 0, level - 1, 0);
- struct btree *b = bch2_btree_iter_peek_node(&iter);
+ struct btree *b = bch2_btree_iter_peek_node(trans, &iter);
int ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
@@ -2288,7 +2303,9 @@ static void async_btree_node_rewrite_work(struct work_struct *work)
int ret = bch2_trans_do(c, bch2_btree_node_rewrite_key(trans,
a->btree_id, a->level, a->key.k, 0));
- if (ret != -ENOENT)
+ if (ret != -ENOENT &&
+ !bch2_err_matches(ret, EROFS) &&
+ ret != -BCH_ERR_journal_shutdown)
bch_err_fn_ratelimited(c, ret);
spin_lock(&c->btree_node_rewrites_lock);
@@ -2389,7 +2406,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
bool skip_triggers)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter2 = { NULL };
+ struct btree_iter iter2 = {};
struct btree *parent;
int ret;
@@ -2413,7 +2430,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
parent = btree_node_parent(btree_iter_path(trans, iter), b);
if (parent) {
- bch2_trans_copy_iter(&iter2, iter);
+ bch2_trans_copy_iter(trans, &iter2, iter);
iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
iter2.flags & BTREE_ITER_intent,
@@ -2427,7 +2444,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
trans->paths_sorted = false;
- ret = bch2_btree_iter_traverse(&iter2) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter2) ?:
bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_norun);
if (ret)
goto err;
diff --git a/fs/bcachefs/btree_write_buffer.c b/fs/bcachefs/btree_write_buffer.c
index 2c09d19dd621..0941fb2c026d 100644
--- a/fs/bcachefs/btree_write_buffer.c
+++ b/fs/bcachefs/btree_write_buffer.c
@@ -144,7 +144,7 @@ static inline int wb_flush_one(struct btree_trans *trans, struct btree_iter *ite
EBUG_ON(!trans->c->btree_write_buffer.flushing.pin.seq);
EBUG_ON(trans->c->btree_write_buffer.flushing.pin.seq > wb->journal_seq);
- ret = bch2_btree_iter_traverse(iter);
+ ret = bch2_btree_iter_traverse(trans, iter);
if (ret)
return ret;
@@ -208,7 +208,7 @@ btree_write_buffered_insert(struct btree_trans *trans,
trans->journal_res.seq = wb->journal_seq;
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, &wb->k,
BTREE_UPDATE_internal_snapshot_node);
bch2_trans_iter_exit(trans, &iter);
@@ -285,7 +285,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
struct bch_fs *c = trans->c;
struct journal *j = &c->journal;
struct btree_write_buffer *wb = &c->btree_write_buffer;
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
size_t overwritten = 0, fast = 0, slowpath = 0, could_not_insert = 0;
bool write_locked = false;
bool accounting_replay_done = test_bit(BCH_FS_accounting_replay_done, &c->flags);
@@ -368,7 +368,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
write_locked = false;
ret = lockrestart_do(trans,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_foreground_maybe_merge(trans, iter.path, 0,
BCH_WATERMARK_reclaim|
BCH_TRANS_COMMIT_journal_reclaim|
@@ -385,7 +385,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
BTREE_ITER_intent|BTREE_ITER_all_snapshots);
}
- bch2_btree_iter_set_pos(&iter, k->k.k.p);
+ bch2_btree_iter_set_pos(trans, &iter, k->k.k.p);
btree_iter_path(trans, &iter)->preserve = false;
bool accounting_accumulated = false;
@@ -428,10 +428,10 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
*/
trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
- sort(wb->flushing.keys.data,
- wb->flushing.keys.nr,
- sizeof(wb->flushing.keys.data[0]),
- wb_key_seq_cmp, NULL);
+ sort_nonatomic(wb->flushing.keys.data,
+ wb->flushing.keys.nr,
+ sizeof(wb->flushing.keys.data[0]),
+ wb_key_seq_cmp, NULL);
darray_for_each(wb->flushing.keys, i) {
if (!i->journal_seq)
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index e56ef623ebc1..fea61e60a9ee 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -30,6 +30,12 @@
void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
{
+ for (unsigned i = 0; i < BCH_DATA_NR; i++)
+ usage->buckets[i] = percpu_u64_get(&ca->usage->d[i].buckets);
+}
+
+void bch2_dev_usage_full_read_fast(struct bch_dev *ca, struct bch_dev_usage_full *usage)
+{
memset(usage, 0, sizeof(*usage));
acc_u64s_percpu((u64 *) usage, (u64 __percpu *) ca->usage, dev_usage_u64s());
}
@@ -75,7 +81,7 @@ bch2_fs_usage_read_short(struct bch_fs *c)
void bch2_dev_usage_to_text(struct printbuf *out,
struct bch_dev *ca,
- struct bch_dev_usage *usage)
+ struct bch_dev_usage_full *usage)
{
if (out->nr_tabstops < 5) {
printbuf_tabstops_reset(out);
@@ -365,7 +371,7 @@ found:
struct btree_iter iter;
bch2_trans_node_iter_init(trans, &iter, btree, new->k.p, 0, level,
BTREE_ITER_intent|BTREE_ITER_all_snapshots);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, new,
BTREE_UPDATE_internal_snapshot_node|
BTREE_TRIGGER_norun);
@@ -381,6 +387,36 @@ err:
return ret;
}
+static int bucket_ref_update_err(struct btree_trans *trans, struct printbuf *buf,
+ struct bkey_s_c k, bool insert, enum bch_sb_error_id id)
+{
+ struct bch_fs *c = trans->c;
+ bool repeat = false, print = true, suppress = false;
+
+ prt_printf(buf, "\nwhile marking ");
+ bch2_bkey_val_to_text(buf, c, k);
+ prt_newline(buf);
+
+ __bch2_count_fsck_err(c, id, buf->buf, &repeat, &print, &suppress);
+
+ int ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
+
+ if (insert) {
+ print = true;
+ suppress = false;
+
+ bch2_trans_updates_to_text(buf, trans);
+ __bch2_inconsistent_error(c, buf);
+ ret = -BCH_ERR_bucket_ref_update;
+ }
+
+ if (suppress)
+ prt_printf(buf, "Ratelimiting new instances of previous error\n");
+ if (print)
+ bch2_print_string_as_lines(KERN_ERR, buf->buf);
+ return ret;
+}
+
int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
struct bkey_s_c k,
const struct bch_extent_ptr *ptr,
@@ -396,32 +432,29 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
BUG_ON(!sectors);
- if (gen_after(ptr->gen, b_gen)) {
- bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
- log_fsck_err(trans, ptr_gen_newer_than_bucket_gen,
- "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
- "while marking %s",
+ if (unlikely(gen_after(ptr->gen, b_gen))) {
+ bch2_log_msg_start(c, &buf);
+ prt_printf(&buf,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen",
ptr->dev, bucket_nr, b_gen,
bch2_data_type_str(bucket_data_type ?: ptr_data_type),
- ptr->gen,
- (bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (inserting)
- goto err;
+ ptr->gen);
+
+ ret = bucket_ref_update_err(trans, &buf, k, inserting,
+ BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen);
goto out;
}
- if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
- bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
- log_fsck_err(trans, ptr_too_stale,
- "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
- "while marking %s",
+ if (unlikely(gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX)) {
+ bch2_log_msg_start(c, &buf);
+ prt_printf(&buf,
+ "bucket %u:%zu gen %u data type %s: ptr gen %u too stale",
ptr->dev, bucket_nr, b_gen,
bch2_data_type_str(bucket_data_type ?: ptr_data_type),
- ptr->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (inserting)
- goto err;
+ ptr->gen);
+
+ ret = bucket_ref_update_err(trans, &buf, k, inserting,
+ BCH_FSCK_ERR_ptr_too_stale);
goto out;
}
@@ -430,62 +463,50 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
goto out;
}
- if (b_gen != ptr->gen) {
- bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
- log_fsck_err(trans, stale_dirty_ptr,
- "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
- "while marking %s",
+ if (unlikely(b_gen != ptr->gen)) {
+ bch2_log_msg_start(c, &buf);
+ prt_printf(&buf,
+ "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)",
ptr->dev, bucket_nr, b_gen,
bucket_gen_get(ca, bucket_nr),
bch2_data_type_str(bucket_data_type ?: ptr_data_type),
- ptr->gen,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (inserting)
- goto err;
+ ptr->gen);
+
+ ret = bucket_ref_update_err(trans, &buf, k, inserting,
+ BCH_FSCK_ERR_stale_dirty_ptr);
goto out;
}
- if (bucket_data_type_mismatch(bucket_data_type, ptr_data_type)) {
- bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
- log_fsck_err(trans, ptr_bucket_data_type_mismatch,
- "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
- "while marking %s",
- ptr->dev, bucket_nr, b_gen,
- bch2_data_type_str(bucket_data_type),
- bch2_data_type_str(ptr_data_type),
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (inserting)
- goto err;
+ if (unlikely(bucket_data_type_mismatch(bucket_data_type, ptr_data_type))) {
+ bch2_log_msg_start(c, &buf);
+ prt_printf(&buf, "bucket %u:%zu gen %u different types of data in same bucket: %s, %s",
+ ptr->dev, bucket_nr, b_gen,
+ bch2_data_type_str(bucket_data_type),
+ bch2_data_type_str(ptr_data_type));
+
+ ret = bucket_ref_update_err(trans, &buf, k, inserting,
+ BCH_FSCK_ERR_ptr_bucket_data_type_mismatch);
goto out;
}
- if ((u64) *bucket_sectors + sectors > U32_MAX) {
- bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_allocations);
- log_fsck_err(trans, bucket_sector_count_overflow,
- "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
- "while marking %s",
+ if (unlikely((u64) *bucket_sectors + sectors > U32_MAX)) {
+ bch2_log_msg_start(c, &buf);
+ prt_printf(&buf,
+ "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX",
ptr->dev, bucket_nr, b_gen,
bch2_data_type_str(bucket_data_type ?: ptr_data_type),
- *bucket_sectors, sectors,
- (printbuf_reset(&buf),
- bch2_bkey_val_to_text(&buf, c, k), buf.buf));
- if (inserting)
- goto err;
+ *bucket_sectors, sectors);
+
+ ret = bucket_ref_update_err(trans, &buf, k, inserting,
+ BCH_FSCK_ERR_bucket_sector_count_overflow);
sectors = -*bucket_sectors;
+ goto out;
}
*bucket_sectors += sectors;
out:
printbuf_exit(&buf);
return ret;
-err:
-fsck_err:
- bch2_dump_trans_updates(trans);
- bch2_inconsistent_error(c);
- ret = -BCH_ERR_bucket_ref_update;
- goto out;
}
void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
@@ -651,9 +672,9 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
stripe_blockcount_get(&s->v, p.ec.block) +
sectors);
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
+ struct disk_accounting_pos acc;
+ memset(&acc, 0, sizeof(acc));
+ acc.type = BCH_DISK_ACCOUNTING_replicas;
bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
acc.replicas.data_type = data_type;
ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
@@ -677,20 +698,22 @@ err:
if (!m || !m->alive) {
gc_stripe_unlock(m);
struct printbuf buf = PRINTBUF;
+ bch2_log_msg_start(c, &buf);
+ prt_printf(&buf, "pointer to nonexistent stripe %llu\n while marking ",
+ (u64) p.ec.idx);
bch2_bkey_val_to_text(&buf, c, k);
- bch_err_ratelimited(c, "pointer to nonexistent stripe %llu\n while marking %s",
- (u64) p.ec.idx, buf.buf);
+ __bch2_inconsistent_error(c, &buf);
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
printbuf_exit(&buf);
- bch2_inconsistent_error(c);
return -BCH_ERR_trigger_stripe_pointer;
}
m->block_sectors[p.ec.block] += sectors;
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
- memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e));
+ struct disk_accounting_pos acc;
+ memset(&acc, 0, sizeof(acc));
+ acc.type = BCH_DISK_ACCOUNTING_replicas;
+ unsafe_memcpy(&acc.replicas, &m->r.e, replicas_entry_bytes(&m->r.e), "VLA");
gc_stripe_unlock(m);
acc.replicas.data_type = data_type;
@@ -717,12 +740,12 @@ static int __trigger_extent(struct btree_trans *trans,
: BCH_DATA_user;
int ret = 0;
- struct disk_accounting_pos acc_replicas_key = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- .replicas.data_type = data_type,
- .replicas.nr_devs = 0,
- .replicas.nr_required = 1,
- };
+ struct disk_accounting_pos acc_replicas_key;
+ memset(&acc_replicas_key, 0, sizeof(acc_replicas_key));
+ acc_replicas_key.type = BCH_DISK_ACCOUNTING_replicas;
+ acc_replicas_key.replicas.data_type = data_type;
+ acc_replicas_key.replicas.nr_devs = 0;
+ acc_replicas_key.replicas.nr_required = 1;
unsigned cur_compression_type = 0;
u64 compression_acct[3] = { 1, 0, 0 };
@@ -1115,7 +1138,7 @@ int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
for_each_online_member(c, ca) {
int ret = bch2_trans_mark_dev_sb(c, ca, flags);
if (ret) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
return ret;
}
}
@@ -1314,7 +1337,7 @@ void bch2_dev_buckets_free(struct bch_dev *ca)
int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
{
- ca->usage = alloc_percpu(struct bch_dev_usage);
+ ca->usage = alloc_percpu(struct bch_dev_usage_full);
if (!ca->usage)
return -BCH_ERR_ENOMEM_usage_init;
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index c5363256e363..1c38b165f48b 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -172,7 +172,16 @@ static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
return ret;
}
-void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *);
+void bch2_dev_usage_full_read_fast(struct bch_dev *, struct bch_dev_usage_full *);
+static inline struct bch_dev_usage_full bch2_dev_usage_full_read(struct bch_dev *ca)
+{
+ struct bch_dev_usage_full ret;
+
+ bch2_dev_usage_full_read_fast(ca, &ret);
+ return ret;
+}
+
+void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage_full *);
static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
{
@@ -207,7 +216,7 @@ static inline u64 dev_buckets_free(struct bch_dev *ca,
enum bch_watermark watermark)
{
return max_t(s64, 0,
- usage.d[BCH_DATA_free].buckets -
+ usage.buckets[BCH_DATA_free]-
ca->nr_open_buckets -
bch2_dev_buckets_reserved(ca, watermark));
}
@@ -217,10 +226,10 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca,
enum bch_watermark watermark)
{
return max_t(s64, 0,
- usage.d[BCH_DATA_free].buckets
- + usage.d[BCH_DATA_cached].buckets
- + usage.d[BCH_DATA_need_gc_gens].buckets
- + usage.d[BCH_DATA_need_discard].buckets
+ usage.buckets[BCH_DATA_free]
+ + usage.buckets[BCH_DATA_cached]
+ + usage.buckets[BCH_DATA_need_gc_gens]
+ + usage.buckets[BCH_DATA_need_discard]
- ca->nr_open_buckets
- bch2_dev_buckets_reserved(ca, watermark));
}
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h
index 900b8680c8b5..0aed2500ade3 100644
--- a/fs/bcachefs/buckets_types.h
+++ b/fs/bcachefs/buckets_types.h
@@ -54,7 +54,12 @@ struct bucket_gens {
u8 b[] __counted_by(nbuckets);
};
+/* Only info on bucket countns: */
struct bch_dev_usage {
+ u64 buckets[BCH_DATA_NR];
+};
+
+struct bch_dev_usage_full {
struct bch_dev_usage_type {
u64 buckets;
u64 sectors; /* _compressed_ sectors: */
diff --git a/fs/bcachefs/chardev.c b/fs/bcachefs/chardev.c
index 57d55b3ddc71..5891b3a1e61c 100644
--- a/fs/bcachefs/chardev.c
+++ b/fs/bcachefs/chardev.c
@@ -350,8 +350,8 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
if (ctx->arg.op == BCH_DATA_OP_scrub) {
struct bch_dev *ca = bch2_dev_tryget(c, ctx->arg.scrub.dev);
if (ca) {
- struct bch_dev_usage u;
- bch2_dev_usage_read_fast(ca, &u);
+ struct bch_dev_usage_full u;
+ bch2_dev_usage_full_read_fast(ca, &u);
for (unsigned i = BCH_DATA_btree; i < ARRAY_SIZE(u.d); i++)
if (ctx->arg.scrub.data_types & BIT(i))
e.p.sectors_total += u.d[i].sectors;
@@ -426,10 +426,8 @@ static long bch2_ioctl_fs_usage(struct bch_fs *c,
arg.replica_entries_bytes = replicas.nr;
for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++) {
- struct disk_accounting_pos k = {
- .type = BCH_DISK_ACCOUNTING_persistent_reserved,
- .persistent_reserved.nr_replicas = i,
- };
+ struct disk_accounting_pos k;
+ disk_accounting_key_init(k, persistent_reserved, .nr_replicas = i);
bch2_accounting_mem_read(c,
disk_accounting_pos_to_bpos(&k),
@@ -475,7 +473,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
struct bch_ioctl_dev_usage __user *user_arg)
{
struct bch_ioctl_dev_usage arg;
- struct bch_dev_usage src;
+ struct bch_dev_usage_full src;
struct bch_dev *ca;
unsigned i;
@@ -495,7 +493,7 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
if (IS_ERR(ca))
return PTR_ERR(ca);
- src = bch2_dev_usage_read(ca);
+ src = bch2_dev_usage_full_read(ca);
arg.state = ca->mi.state;
arg.bucket_size = ca->mi.bucket_size;
@@ -516,7 +514,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
struct bch_ioctl_dev_usage_v2 __user *user_arg)
{
struct bch_ioctl_dev_usage_v2 arg;
- struct bch_dev_usage src;
+ struct bch_dev_usage_full src;
struct bch_dev *ca;
int ret = 0;
@@ -536,7 +534,7 @@ static long bch2_ioctl_dev_usage_v2(struct bch_fs *c,
if (IS_ERR(ca))
return PTR_ERR(ca);
- src = bch2_dev_usage_read(ca);
+ src = bch2_dev_usage_full_read(ca);
arg.state = ca->mi.state;
arg.bucket_size = ca->mi.bucket_size;
@@ -617,7 +615,7 @@ static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
for_each_online_member(c, ca)
if (ca->dev == dev) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
return ca->dev_idx;
}
diff --git a/fs/bcachefs/checksum.c b/fs/bcachefs/checksum.c
index 3726689093e3..d0a34a097b80 100644
--- a/fs/bcachefs/checksum.c
+++ b/fs/bcachefs/checksum.c
@@ -7,17 +7,12 @@
#include "super-io.h"
#include <linux/crc32c.h>
-#include <linux/crypto.h>
#include <linux/xxhash.h>
#include <linux/key.h>
#include <linux/random.h>
#include <linux/ratelimit.h>
-#include <linux/scatterlist.h>
-#include <crypto/algapi.h>
#include <crypto/chacha.h>
-#include <crypto/hash.h>
#include <crypto/poly1305.h>
-#include <crypto/skcipher.h>
#include <keys/user-type.h>
/*
@@ -96,116 +91,40 @@ static void bch2_checksum_update(struct bch2_checksum_state *state, const void *
}
}
-static inline int do_encrypt_sg(struct crypto_sync_skcipher *tfm,
- struct nonce nonce,
- struct scatterlist *sg, size_t len)
+static void bch2_chacha20_init(u32 state[CHACHA_STATE_WORDS],
+ const struct bch_key *key, struct nonce nonce)
{
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ u32 key_words[CHACHA_KEY_SIZE / sizeof(u32)];
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, 0, NULL, NULL);
- skcipher_request_set_crypt(req, sg, sg, len, nonce.d);
+ BUILD_BUG_ON(sizeof(key_words) != sizeof(*key));
+ memcpy(key_words, key, sizeof(key_words));
+ le32_to_cpu_array(key_words, ARRAY_SIZE(key_words));
- int ret = crypto_skcipher_encrypt(req);
- if (ret)
- pr_err("got error %i from crypto_skcipher_encrypt()", ret);
-
- return ret;
-}
-
-static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
- struct nonce nonce,
- void *buf, size_t len)
-{
- if (!is_vmalloc_addr(buf)) {
- struct scatterlist sg = {};
-
- sg_mark_end(&sg);
- sg_set_page(&sg, virt_to_page(buf), len, offset_in_page(buf));
- return do_encrypt_sg(tfm, nonce, &sg, len);
- } else {
- DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
- size_t sgl_len = 0;
- int ret;
-
- darray_init(&sgl);
-
- while (len) {
- unsigned offset = offset_in_page(buf);
- struct scatterlist sg = {
- .page_link = (unsigned long) vmalloc_to_page(buf),
- .offset = offset,
- .length = min(len, PAGE_SIZE - offset),
- };
+ BUILD_BUG_ON(sizeof(nonce) != CHACHA_IV_SIZE);
+ chacha_init(state, key_words, (const u8 *)nonce.d);
- if (darray_push(&sgl, sg)) {
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
- if (ret)
- goto err;
-
- nonce = nonce_add(nonce, sgl_len);
- sgl_len = 0;
- sgl.nr = 0;
- BUG_ON(darray_push(&sgl, sg));
- }
-
- buf += sg.length;
- len -= sg.length;
- sgl_len += sg.length;
- }
-
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(tfm, nonce, sgl.data, sgl_len);
-err:
- darray_exit(&sgl);
- return ret;
- }
+ memzero_explicit(key_words, sizeof(key_words));
}
-int bch2_chacha_encrypt_key(struct bch_key *key, struct nonce nonce,
- void *buf, size_t len)
+static void bch2_chacha20(const struct bch_key *key, struct nonce nonce,
+ void *data, size_t len)
{
- struct crypto_sync_skcipher *chacha20 =
- crypto_alloc_sync_skcipher("chacha20", 0, 0);
- int ret;
-
- ret = PTR_ERR_OR_ZERO(chacha20);
- if (ret) {
- pr_err("error requesting chacha20 cipher: %s", bch2_err_str(ret));
- return ret;
- }
-
- ret = crypto_skcipher_setkey(&chacha20->base,
- (void *) key, sizeof(*key));
- if (ret) {
- pr_err("error from crypto_skcipher_setkey(): %s", bch2_err_str(ret));
- goto err;
- }
+ u32 state[CHACHA_STATE_WORDS];
- ret = do_encrypt(chacha20, nonce, buf, len);
-err:
- crypto_free_sync_skcipher(chacha20);
- return ret;
+ bch2_chacha20_init(state, key, nonce);
+ chacha20_crypt(state, data, data, len);
+ memzero_explicit(state, sizeof(state));
}
-static int gen_poly_key(struct bch_fs *c, struct shash_desc *desc,
- struct nonce nonce)
+static void bch2_poly1305_init(struct poly1305_desc_ctx *desc,
+ struct bch_fs *c, struct nonce nonce)
{
- u8 key[POLY1305_KEY_SIZE];
- int ret;
+ u8 key[POLY1305_KEY_SIZE] = { 0 };
nonce.d[3] ^= BCH_NONCE_POLY;
- memset(key, 0, sizeof(key));
- ret = do_encrypt(c->chacha20, nonce, key, sizeof(key));
- if (ret)
- return ret;
-
- desc->tfm = c->poly1305;
- crypto_shash_init(desc);
- crypto_shash_update(desc, key, sizeof(key));
- return 0;
+ bch2_chacha20(&c->chacha20_key, nonce, key, sizeof(key));
+ poly1305_init(desc, key);
}
struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
@@ -230,14 +149,13 @@ struct bch_csum bch2_checksum(struct bch_fs *c, unsigned type,
case BCH_CSUM_chacha20_poly1305_80:
case BCH_CSUM_chacha20_poly1305_128: {
- SHASH_DESC_ON_STACK(desc, c->poly1305);
+ struct poly1305_desc_ctx dctx;
u8 digest[POLY1305_DIGEST_SIZE];
struct bch_csum ret = { 0 };
- gen_poly_key(c, desc, nonce);
-
- crypto_shash_update(desc, data, len);
- crypto_shash_final(desc, digest);
+ bch2_poly1305_init(&dctx, c, nonce);
+ poly1305_update(&dctx, data, len);
+ poly1305_final(&dctx, digest);
memcpy(&ret, digest, bch_crc_bytes[type]);
return ret;
@@ -253,11 +171,12 @@ int bch2_encrypt(struct bch_fs *c, unsigned type,
if (!bch2_csum_type_is_encryption(type))
return 0;
- if (bch2_fs_inconsistent_on(!c->chacha20,
+ if (bch2_fs_inconsistent_on(!c->chacha20_key_set,
c, "attempting to encrypt without encryption key"))
return -BCH_ERR_no_encryption_key;
- return do_encrypt(c->chacha20, nonce, data, len);
+ bch2_chacha20(&c->chacha20_key, nonce, data, len);
+ return 0;
}
static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
@@ -296,26 +215,26 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
case BCH_CSUM_chacha20_poly1305_80:
case BCH_CSUM_chacha20_poly1305_128: {
- SHASH_DESC_ON_STACK(desc, c->poly1305);
+ struct poly1305_desc_ctx dctx;
u8 digest[POLY1305_DIGEST_SIZE];
struct bch_csum ret = { 0 };
- gen_poly_key(c, desc, nonce);
+ bch2_poly1305_init(&dctx, c, nonce);
#ifdef CONFIG_HIGHMEM
__bio_for_each_segment(bv, bio, *iter, *iter) {
void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
- crypto_shash_update(desc, p, bv.bv_len);
+ poly1305_update(&dctx, p, bv.bv_len);
kunmap_local(p);
}
#else
__bio_for_each_bvec(bv, bio, *iter, *iter)
- crypto_shash_update(desc,
+ poly1305_update(&dctx,
page_address(bv.bv_page) + bv.bv_offset,
bv.bv_len);
#endif
- crypto_shash_final(desc, digest);
+ poly1305_final(&dctx, digest);
memcpy(&ret, digest, bch_crc_bytes[type]);
return ret;
@@ -338,43 +257,33 @@ int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
{
struct bio_vec bv;
struct bvec_iter iter;
- DARRAY_PREALLOCATED(struct scatterlist, 4) sgl;
- size_t sgl_len = 0;
+ u32 chacha_state[CHACHA_STATE_WORDS];
int ret = 0;
- if (bch2_fs_inconsistent_on(!c->chacha20,
+ if (bch2_fs_inconsistent_on(!c->chacha20_key_set,
c, "attempting to encrypt without encryption key"))
return -BCH_ERR_no_encryption_key;
- darray_init(&sgl);
+ bch2_chacha20_init(chacha_state, &c->chacha20_key, nonce);
bio_for_each_segment(bv, bio, iter) {
- struct scatterlist sg = {
- .page_link = (unsigned long) bv.bv_page,
- .offset = bv.bv_offset,
- .length = bv.bv_len,
- };
-
- if (darray_push(&sgl, sg)) {
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
- if (ret)
- goto err;
-
- nonce = nonce_add(nonce, sgl_len);
- sgl_len = 0;
- sgl.nr = 0;
-
- BUG_ON(darray_push(&sgl, sg));
+ void *p;
+
+ /*
+ * chacha_crypt() assumes that the length is a multiple of
+ * CHACHA_BLOCK_SIZE on any non-final call.
+ */
+ if (!IS_ALIGNED(bv.bv_len, CHACHA_BLOCK_SIZE)) {
+ bch_err_ratelimited(c, "bio not aligned for encryption");
+ ret = -EIO;
+ break;
}
- sgl_len += sg.length;
+ p = bvec_kmap_local(&bv);
+ chacha20_crypt(chacha_state, p, p, bv.bv_len);
+ kunmap_local(p);
}
-
- sg_mark_end(&darray_last(sgl));
- ret = do_encrypt_sg(c->chacha20, nonce, sgl.data, sgl_len);
-err:
- darray_exit(&sgl);
+ memzero_explicit(chacha_state, sizeof(chacha_state));
return ret;
}
@@ -650,10 +559,7 @@ int bch2_decrypt_sb_key(struct bch_fs *c,
}
/* decrypt real key: */
- ret = bch2_chacha_encrypt_key(&user_key, bch2_sb_key_nonce(c),
- &sb_key, sizeof(sb_key));
- if (ret)
- goto err;
+ bch2_chacha20(&user_key, bch2_sb_key_nonce(c), &sb_key, sizeof(sb_key));
if (bch2_key_is_encrypted(&sb_key)) {
bch_err(c, "incorrect encryption key");
@@ -668,31 +574,6 @@ err:
return ret;
}
-static int bch2_alloc_ciphers(struct bch_fs *c)
-{
- if (c->chacha20)
- return 0;
-
- struct crypto_sync_skcipher *chacha20 = crypto_alloc_sync_skcipher("chacha20", 0, 0);
- int ret = PTR_ERR_OR_ZERO(chacha20);
- if (ret) {
- bch_err(c, "error requesting chacha20 module: %s", bch2_err_str(ret));
- return ret;
- }
-
- struct crypto_shash *poly1305 = crypto_alloc_shash("poly1305", 0, 0);
- ret = PTR_ERR_OR_ZERO(poly1305);
- if (ret) {
- bch_err(c, "error requesting poly1305 module: %s", bch2_err_str(ret));
- crypto_free_sync_skcipher(chacha20);
- return ret;
- }
-
- c->chacha20 = chacha20;
- c->poly1305 = poly1305;
- return 0;
-}
-
#if 0
/*
@@ -797,35 +678,21 @@ err:
void bch2_fs_encryption_exit(struct bch_fs *c)
{
- if (c->poly1305)
- crypto_free_shash(c->poly1305);
- if (c->chacha20)
- crypto_free_sync_skcipher(c->chacha20);
+ memzero_explicit(&c->chacha20_key, sizeof(c->chacha20_key));
}
int bch2_fs_encryption_init(struct bch_fs *c)
{
struct bch_sb_field_crypt *crypt;
- struct bch_key key;
- int ret = 0;
+ int ret;
crypt = bch2_sb_field_get(c->disk_sb.sb, crypt);
if (!crypt)
- goto out;
+ return 0;
- ret = bch2_alloc_ciphers(c);
+ ret = bch2_decrypt_sb_key(c, crypt, &c->chacha20_key);
if (ret)
- goto out;
-
- ret = bch2_decrypt_sb_key(c, crypt, &key);
- if (ret)
- goto out;
-
- ret = crypto_skcipher_setkey(&c->chacha20->base,
- (void *) &key.key, sizeof(key.key));
- if (ret)
- goto out;
-out:
- memzero_explicit(&key, sizeof(key));
- return ret;
+ return ret;
+ c->chacha20_key_set = true;
+ return 0;
}
diff --git a/fs/bcachefs/checksum.h b/fs/bcachefs/checksum.h
index 4ac251c8fcd8..1310782d3ae9 100644
--- a/fs/bcachefs/checksum.h
+++ b/fs/bcachefs/checksum.h
@@ -69,7 +69,6 @@ static inline void bch2_csum_err_msg(struct printbuf *out,
bch2_csum_to_text(out, type, expected);
}
-int bch2_chacha_encrypt_key(struct bch_key *, struct nonce, void *, size_t);
int bch2_request_key(struct bch_sb *, struct bch_key *);
#ifndef __KERNEL__
int bch2_revoke_key(struct bch_sb *);
@@ -156,7 +155,7 @@ static inline bool bch2_checksum_type_valid(const struct bch_fs *c,
if (type >= BCH_CSUM_NR)
return false;
- if (bch2_csum_type_is_encryption(type) && !c->chacha20)
+ if (bch2_csum_type_is_encryption(type) && !c->chacha20_key_set)
return false;
return true;
diff --git a/fs/bcachefs/clock.c b/fs/bcachefs/clock.c
index 1f8e035d7119..d6dd12d74d4f 100644
--- a/fs/bcachefs/clock.c
+++ b/fs/bcachefs/clock.c
@@ -121,7 +121,7 @@ void bch2_kthread_io_clock_wait(struct io_clock *clock,
} while (0);
__set_current_state(TASK_RUNNING);
- del_timer_sync(&wait.cpu_timer);
+ timer_delete_sync(&wait.cpu_timer);
destroy_timer_on_stack(&wait.cpu_timer);
bch2_io_timer_del(clock, &wait.io_timer);
}
diff --git a/fs/bcachefs/compress.c b/fs/bcachefs/compress.c
index 85fc90342492..28ed32449913 100644
--- a/fs/bcachefs/compress.c
+++ b/fs/bcachefs/compress.c
@@ -371,13 +371,14 @@ static int attempt_compress(struct bch_fs *c,
};
zlib_set_workspace(&strm, workspace);
- zlib_deflateInit2(&strm,
+ if (zlib_deflateInit2(&strm,
compression.level
? clamp_t(unsigned, compression.level,
Z_BEST_SPEED, Z_BEST_COMPRESSION)
: Z_DEFAULT_COMPRESSION,
Z_DEFLATED, -MAX_WBITS, DEF_MEM_LEVEL,
- Z_DEFAULT_STRATEGY);
+ Z_DEFAULT_STRATEGY) != Z_OK)
+ return 0;
if (zlib_deflate(&strm, Z_FINISH) != Z_STREAM_END)
return 0;
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 0ec273daccb7..b211c97238ab 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -22,6 +22,13 @@
#include <linux/ioprio.h>
+static const char * const bch2_data_update_type_strs[] = {
+#define x(t, n, ...) [n] = #t,
+ BCH_DATA_UPDATE_TYPES()
+#undef x
+ NULL
+};
+
static void bkey_put_dev_refs(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
@@ -181,6 +188,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
container_of(op, struct data_update, op);
struct keylist *keys = &op->insert_keys;
struct bkey_buf _new, _insert;
+ struct printbuf journal_msg = PRINTBUF;
int ret = 0;
bch2_bkey_buf_init(&_new);
@@ -208,7 +216,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -372,7 +380,12 @@ restart_drop_extra_replicas:
printbuf_exit(&buf);
}
- ret = bch2_insert_snapshot_whiteouts(trans, m->btree_id,
+ printbuf_reset(&journal_msg);
+ prt_str(&journal_msg, bch2_data_update_type_strs[m->type]);
+
+ ret = bch2_trans_log_msg(trans, &journal_msg) ?:
+ bch2_trans_log_bkey(trans, m->btree_id, 0, m->k.k) ?:
+ bch2_insert_snapshot_whiteouts(trans, m->btree_id,
k.k->p, bkey_start_pos(&insert->k)) ?:
bch2_insert_snapshot_whiteouts(trans, m->btree_id,
k.k->p, insert->k.p) ?:
@@ -385,7 +398,7 @@ restart_drop_extra_replicas:
BCH_TRANS_COMMIT_no_enospc|
m->data_opts.btree_insert_flags);
if (!ret) {
- bch2_btree_iter_set_pos(&iter, next_pos);
+ bch2_btree_iter_set_pos(trans, &iter, next_pos);
this_cpu_add(c->counters[BCH_COUNTER_io_move_finish], new->k.size);
if (trace_io_move_finish_enabled())
@@ -413,10 +426,11 @@ nowork:
count_event(c, io_move_fail);
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
goto next;
}
out:
+ printbuf_exit(&journal_msg);
bch2_trans_iter_exit(trans, &iter);
bch2_bkey_buf_exit(&_insert, c);
bch2_bkey_buf_exit(&_new, c);
@@ -483,7 +497,7 @@ static int bch2_update_unwritten_extent(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
BTREE_ITER_slots);
ret = lockrestart_do(trans, ({
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
bkey_err(k);
}));
bch2_trans_iter_exit(trans, &iter);
@@ -577,6 +591,9 @@ void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c,
void bch2_data_update_to_text(struct printbuf *out, struct data_update *m)
{
+ prt_str(out, bch2_data_update_type_strs[m->type]);
+ prt_newline(out);
+
bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
prt_newline(out);
@@ -590,7 +607,7 @@ void bch2_data_update_inflight_to_text(struct printbuf *out, struct data_update
prt_newline(out);
printbuf_indent_add(out, 2);
bch2_data_update_opts_to_text(out, m->op.c, &m->op.opts, &m->data_opts);
- prt_printf(out, "read_done:\t\%u\n", m->read_done);
+ prt_printf(out, "read_done:\t%u\n", m->read_done);
bch2_write_op_to_text(out, &m->op);
printbuf_indent_sub(out, 2);
}
@@ -738,6 +755,9 @@ int bch2_data_update_init(struct btree_trans *trans,
bch2_bkey_buf_init(&m->k);
bch2_bkey_buf_reassemble(&m->k, c, k);
+ m->type = data_opts.btree_insert_flags & BCH_WATERMARK_copygc
+ ? BCH_DATA_UPDATE_copygc
+ : BCH_DATA_UPDATE_rebalance;
m->btree_id = btree_id;
m->data_opts = data_opts;
m->ctxt = ctxt;
diff --git a/fs/bcachefs/data_update.h b/fs/bcachefs/data_update.h
index c194cbbf5b51..ed05125867da 100644
--- a/fs/bcachefs/data_update.h
+++ b/fs/bcachefs/data_update.h
@@ -24,7 +24,19 @@ struct data_update_opts {
void bch2_data_update_opts_to_text(struct printbuf *, struct bch_fs *,
struct bch_io_opts *, struct data_update_opts *);
+#define BCH_DATA_UPDATE_TYPES() \
+ x(copygc, 0) \
+ x(rebalance, 1) \
+ x(promote, 2)
+
+enum bch_data_update_types {
+#define x(n, id) BCH_DATA_UPDATE_##n = id,
+ BCH_DATA_UPDATE_TYPES()
+#undef x
+};
+
struct data_update {
+ enum bch_data_update_types type;
/* extent being updated: */
bool read_done;
enum btree_id btree_id;
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index 788af88f6979..5a8bc7013512 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -57,7 +57,7 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
submit_bio_wait(bio);
bio_put(bio);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
memcpy(n_ondisk, n_sorted, btree_buf_bytes(b));
@@ -297,7 +297,7 @@ out:
if (bio)
bio_put(bio);
kvfree(n_ondisk);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
}
#ifdef CONFIG_DEBUG_FS
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index d7f9f79318a2..8488a7578115 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -287,8 +287,8 @@ static void dirent_init_casefolded_name(struct bkey_i_dirent *dirent,
EBUG_ON(!dirent->v.d_casefold);
EBUG_ON(!cf_name->len);
- dirent->v.d_cf_name_block.d_name_len = name->len;
- dirent->v.d_cf_name_block.d_cf_name_len = cf_name->len;
+ dirent->v.d_cf_name_block.d_name_len = cpu_to_le16(name->len);
+ dirent->v.d_cf_name_block.d_cf_name_len = cpu_to_le16(cf_name->len);
memcpy(&dirent->v.d_cf_name_block.d_names[0], name->name, name->len);
memcpy(&dirent->v.d_cf_name_block.d_names[name->len], cf_name->name, cf_name->len);
memset(&dirent->v.d_cf_name_block.d_names[name->len + cf_name->len], 0,
@@ -417,8 +417,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
enum bch_rename_mode mode)
{
struct qstr src_name_lookup, dst_name_lookup;
- struct btree_iter src_iter = { NULL };
- struct btree_iter dst_iter = { NULL };
+ struct btree_iter src_iter = {};
+ struct btree_iter dst_iter = {};
struct bkey_s_c old_src, old_dst = bkey_s_c_null;
struct bkey_i_dirent *new_src = NULL, *new_dst = NULL;
struct bpos dst_pos =
@@ -586,16 +586,16 @@ out_set_src:
}
if (delete_src) {
- bch2_btree_iter_set_snapshot(&src_iter, old_src.k->p.snapshot);
- ret = bch2_btree_iter_traverse(&src_iter) ?:
+ bch2_btree_iter_set_snapshot(trans, &src_iter, old_src.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(trans, &src_iter) ?:
bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_internal_snapshot_node);
if (ret)
goto out;
}
if (delete_dst) {
- bch2_btree_iter_set_snapshot(&dst_iter, old_dst.k->p.snapshot);
- ret = bch2_btree_iter_traverse(&dst_iter) ?:
+ bch2_btree_iter_set_snapshot(trans, &dst_iter, old_dst.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(trans, &dst_iter) ?:
bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_internal_snapshot_node);
if (ret)
goto out;
@@ -642,7 +642,7 @@ u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir,
const struct qstr *name, subvol_inum *inum)
{
struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
int ret = lockrestart_do(trans,
bch2_dirent_lookup_trans(trans, &iter, dir, hash_info, name, inum, 0));
@@ -771,7 +771,7 @@ int bch2_fsck_remove_dirent(struct btree_trans *trans, struct bpos pos)
bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
&dir_hash_info, &iter,
BTREE_UPDATE_internal_snapshot_node);
diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c
index b32e91ba8be8..b007319b72e9 100644
--- a/fs/bcachefs/disk_accounting.c
+++ b/fs/bcachefs/disk_accounting.c
@@ -114,10 +114,9 @@ int bch2_mod_dev_cached_sectors(struct btree_trans *trans,
unsigned dev, s64 sectors,
bool gc)
{
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
-
+ struct disk_accounting_pos acc;
+ memset(&acc, 0, sizeof(acc));
+ acc.type = BCH_DISK_ACCOUNTING_replicas;
bch2_replicas_entry_cached(&acc.replicas, dev);
return bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc);
@@ -135,6 +134,12 @@ static inline bool is_zero(char *start, char *end)
#define field_end(p, member) (((void *) (&p.member)) + sizeof(p.member))
+static const unsigned bch2_accounting_type_nr_counters[] = {
+#define x(f, id, nr) [BCH_DISK_ACCOUNTING_##f] = nr,
+ BCH_DISK_ACCOUNTING_TYPES()
+#undef x
+};
+
int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k,
struct bkey_validate_context from)
{
@@ -193,6 +198,11 @@ int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k,
bkey_fsck_err_on(!is_zero(end, (void *) (&acc_k + 1)),
c, accounting_key_junk_at_end,
"junk at end of accounting key");
+
+ bkey_fsck_err_on(bch2_accounting_counters(k.k) != bch2_accounting_type_nr_counters[acc_k.type],
+ c, accounting_key_nr_counters_wrong,
+ "accounting key with %u counters, should be %u",
+ bch2_accounting_counters(k.k), bch2_accounting_type_nr_counters[acc_k.type]);
fsck_err:
return ret;
}
@@ -635,7 +645,7 @@ static int bch2_disk_accounting_validate_late(struct btree_trans *trans,
if (fsck_err_on(!bch2_replicas_marked_locked(c, &r.e),
trans, accounting_replicas_not_marked,
- "accounting not marked in superblock replicas\n %s",
+ "accounting not marked in superblock replicas\n%s",
(printbuf_reset(&buf),
bch2_accounting_key_to_text(&buf, &acc),
buf.buf))) {
@@ -665,7 +675,7 @@ fsck_err:
return ret;
invalid_device:
if (fsck_err(trans, accounting_to_invalid_device,
- "accounting entry points to invalid device %i\n %s",
+ "accounting entry points to invalid device %i\n%s",
invalid_dev,
(printbuf_reset(&buf),
bch2_accounting_key_to_text(&buf, &acc),
@@ -726,8 +736,10 @@ int bch2_accounting_read(struct bch_fs *c)
break;
if (!bch2_accounting_is_mem(acc_k)) {
- struct disk_accounting_pos next = { .type = acc_k.type + 1 };
- bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next));
+ struct disk_accounting_pos next;
+ memset(&next, 0, sizeof(next));
+ next.type = acc_k.type + 1;
+ bch2_btree_iter_set_pos(trans, &iter, disk_accounting_pos_to_bpos(&next));
continue;
}
@@ -882,15 +894,13 @@ int bch2_dev_usage_remove(struct bch_fs *c, unsigned dev)
int bch2_dev_usage_init(struct bch_dev *ca, bool gc)
{
struct bch_fs *c = ca->fs;
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_dev_data_type,
- .dev_data_type.dev = ca->dev_idx,
- .dev_data_type.data_type = BCH_DATA_free,
- };
u64 v[3] = { ca->mi.nbuckets - ca->mi.first_bucket, 0, 0 };
int ret = bch2_trans_do(c, ({
- bch2_disk_accounting_mod(trans, &acc, v, ARRAY_SIZE(v), gc) ?:
+ bch2_disk_accounting_mod2(trans, gc,
+ v, dev_data_type,
+ .dev = ca->dev_idx,
+ .data_type = BCH_DATA_free) ?:
(!gc ? bch2_trans_commit(trans, NULL, NULL, 0) : 0);
}));
bch_err_fn(c, ret);
@@ -917,8 +927,10 @@ void bch2_verify_accounting_clean(struct bch_fs *c)
break;
if (!bch2_accounting_is_mem(acc_k)) {
- struct disk_accounting_pos next = { .type = acc_k.type + 1 };
- bch2_btree_iter_set_pos(&iter, disk_accounting_pos_to_bpos(&next));
+ struct disk_accounting_pos next;
+ memset(&next, 0, sizeof(next));
+ next.type = acc_k.type + 1;
+ bch2_btree_iter_set_pos(trans, &iter, disk_accounting_pos_to_bpos(&next));
continue;
}
diff --git a/fs/bcachefs/disk_accounting.h b/fs/bcachefs/disk_accounting.h
index f9214e2d1346..abb1f6206fe9 100644
--- a/fs/bcachefs/disk_accounting.h
+++ b/fs/bcachefs/disk_accounting.h
@@ -33,10 +33,12 @@ static inline bool bch2_accounting_key_is_zero(struct bkey_s_c_accounting a)
static inline void bch2_accounting_accumulate(struct bkey_i_accounting *dst,
struct bkey_s_c_accounting src)
{
- EBUG_ON(dst->k.u64s != src.k->u64s);
-
- for (unsigned i = 0; i < bch2_accounting_counters(&dst->k); i++)
+ for (unsigned i = 0;
+ i < min(bch2_accounting_counters(&dst->k),
+ bch2_accounting_counters(src.k));
+ i++)
dst->v.d[i] += src.v->d[i];
+
if (bversion_cmp(dst->k.bversion, src.k->bversion) < 0)
dst->k.bversion = src.k->bversion;
}
diff --git a/fs/bcachefs/disk_accounting_format.h b/fs/bcachefs/disk_accounting_format.h
index 15190196485f..8269af1dbe2a 100644
--- a/fs/bcachefs/disk_accounting_format.h
+++ b/fs/bcachefs/disk_accounting_format.h
@@ -95,40 +95,81 @@ static inline bool data_type_is_hidden(enum bch_data_type type)
}
}
+/*
+ * field 1: name
+ * field 2: id
+ * field 3: number of counters (max 3)
+ */
+
#define BCH_DISK_ACCOUNTING_TYPES() \
- x(nr_inodes, 0) \
- x(persistent_reserved, 1) \
- x(replicas, 2) \
- x(dev_data_type, 3) \
- x(compression, 4) \
- x(snapshot, 5) \
- x(btree, 6) \
- x(rebalance_work, 7) \
- x(inum, 8)
+ x(nr_inodes, 0, 1) \
+ x(persistent_reserved, 1, 1) \
+ x(replicas, 2, 1) \
+ x(dev_data_type, 3, 3) \
+ x(compression, 4, 3) \
+ x(snapshot, 5, 1) \
+ x(btree, 6, 1) \
+ x(rebalance_work, 7, 1) \
+ x(inum, 8, 3)
enum disk_accounting_type {
-#define x(f, nr) BCH_DISK_ACCOUNTING_##f = nr,
+#define x(f, nr, ...) BCH_DISK_ACCOUNTING_##f = nr,
BCH_DISK_ACCOUNTING_TYPES()
#undef x
BCH_DISK_ACCOUNTING_TYPE_NR,
};
+/*
+ * No subtypes - number of inodes in the entire filesystem
+ *
+ * XXX: perhaps we could add a per-subvolume counter?
+ */
struct bch_acct_nr_inodes {
};
+/*
+ * Tracks KEY_TYPE_reservation sectors, broken out by number of replicas for the
+ * reservation:
+ */
struct bch_acct_persistent_reserved {
__u8 nr_replicas;
};
+/*
+ * device, data type counter fields:
+ * [
+ * nr_buckets
+ * live sectors (in buckets of that data type)
+ * sectors of internal fragmentation
+ * ]
+ *
+ * XXX: live sectors should've been done differently, you can have multiple data
+ * types in the same bucket (user, stripe, cached) and this collapses them to
+ * the bucket data type, and makes the internal fragmentation counter redundant
+ */
struct bch_acct_dev_data_type {
__u8 dev;
__u8 data_type;
};
+/*
+ * Compression type fields:
+ * [
+ * number of extents
+ * uncompressed size
+ * compressed size
+ * ]
+ *
+ * Compression ratio, average extent size (fragmentation).
+ */
struct bch_acct_compression {
__u8 type;
};
+/*
+ * On disk usage by snapshot id; counts same values as replicas counter, but
+ * aggregated differently
+ */
struct bch_acct_snapshot {
__u32 id;
} __packed;
@@ -137,10 +178,27 @@ struct bch_acct_btree {
__u32 id;
} __packed;
+/*
+ * inum counter fields:
+ * [
+ * number of extents
+ * sum of extent sizes - bkey size
+ * this field is similar to inode.bi_sectors, except here extents in
+ * different snapshots but the same inode number are all collapsed to the
+ * same counter
+ * sum of on disk size - same values tracked by replicas counters
+ * ]
+ *
+ * This tracks on disk fragmentation.
+ */
struct bch_acct_inum {
__u64 inum;
} __packed;
+/*
+ * Simple counter of the amount of data (on disk sectors) rebalance needs to
+ * move, extents counted here are also in the rebalance_work btree.
+ */
struct bch_acct_rebalance_work {
};
@@ -149,7 +207,7 @@ struct disk_accounting_pos {
struct {
__u8 type;
union {
- struct bch_acct_nr_inodes nr_inodes;
+ struct bch_acct_nr_inodes nr_inodes;
struct bch_acct_persistent_reserved persistent_reserved;
struct bch_replicas_entry_v1 replicas;
struct bch_acct_dev_data_type dev_data_type;
diff --git a/fs/bcachefs/disk_groups.c b/fs/bcachefs/disk_groups.c
index 5df8de0b8c02..1186280b29e9 100644
--- a/fs/bcachefs/disk_groups.c
+++ b/fs/bcachefs/disk_groups.c
@@ -555,9 +555,9 @@ void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v)
? rcu_dereference(c->devs[t.dev])
: NULL;
- if (ca && percpu_ref_tryget(&ca->io_ref)) {
+ if (ca && percpu_ref_tryget(&ca->io_ref[READ])) {
prt_printf(out, "/dev/%s", ca->name);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
} else if (ca) {
prt_printf(out, "offline device %u", t.dev);
} else {
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index f2b9225fe0bc..a396865e8b17 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -105,6 +105,7 @@ struct ec_bio {
struct bch_dev *ca;
struct ec_stripe_buf *buf;
size_t idx;
+ int rw;
u64 submit_time;
struct bio bio;
};
@@ -320,7 +321,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
if (flags & BTREE_TRIGGER_gc) {
struct bucket *g = gc_bucket(ca, bucket.offset);
- if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
+ if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n%s",
ptr->dev,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
ret = -BCH_ERR_mark_stripe;
@@ -453,24 +454,25 @@ int bch2_trigger_stripe(struct btree_trans *trans,
if (new_s) {
s64 sectors = (u64) le16_to_cpu(new_s->sectors) * new_s->nr_redundant;
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
+ struct disk_accounting_pos acc;
+ memset(&acc, 0, sizeof(acc));
+ acc.type = BCH_DISK_ACCOUNTING_replicas;
bch2_bkey_to_replicas(&acc.replicas, new);
int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc);
if (ret)
return ret;
if (gc)
- memcpy(&gc->r.e, &acc.replicas, replicas_entry_bytes(&acc.replicas));
+ unsafe_memcpy(&gc->r.e, &acc.replicas,
+ replicas_entry_bytes(&acc.replicas), "VLA");
}
if (old_s) {
s64 sectors = -((s64) le16_to_cpu(old_s->sectors)) * old_s->nr_redundant;
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
+ struct disk_accounting_pos acc;
+ memset(&acc, 0, sizeof(acc));
+ acc.type = BCH_DISK_ACCOUNTING_replicas;
bch2_bkey_to_replicas(&acc.replicas, old);
int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc);
if (ret)
@@ -703,6 +705,7 @@ static void ec_block_endio(struct bio *bio)
struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
struct bch_dev *ca = ec_bio->ca;
struct closure *cl = bio->bi_private;
+ int rw = ec_bio->rw;
bch2_account_io_completion(ca, bio_data_dir(bio),
ec_bio->submit_time, !bio->bi_status);
@@ -724,7 +727,7 @@ static void ec_block_endio(struct bio *bio)
}
bio_put(&ec_bio->bio);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[rw]);
closure_put(cl);
}
@@ -775,6 +778,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
ec_bio->ca = ca;
ec_bio->buf = buf;
ec_bio->idx = idx;
+ ec_bio->rw = rw;
ec_bio->submit_time = local_clock();
ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
@@ -784,14 +788,14 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
closure_get(cl);
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[rw]);
submit_bio(&ec_bio->bio);
offset += b;
}
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[rw]);
}
static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
@@ -1264,7 +1268,7 @@ static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
ob->sectors_free,
GFP_KERNEL, 0);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
if (ret)
s->err = ret;
@@ -1836,7 +1840,7 @@ static int __get_existing_stripe(struct btree_trans *trans,
ret = 1;
}
out:
- bch2_set_btree_iter_dontneed(&iter);
+ bch2_set_btree_iter_dontneed(trans, &iter);
err:
bch2_trans_iter_exit(trans, &iter);
return ret;
@@ -1949,7 +1953,7 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st
if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
if (start_pos.offset) {
start_pos = min_pos;
- bch2_btree_iter_set_pos(&iter, start_pos);
+ bch2_btree_iter_set_pos(trans, &iter, start_pos);
continue;
}
@@ -2110,14 +2114,14 @@ static int bch2_invalidate_stripe_to_dev(struct btree_trans *trans, struct bkey_
if (ret)
return ret;
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_replicas,
- };
+ struct disk_accounting_pos acc;
s64 sectors = 0;
for (unsigned i = 0; i < s->v.nr_blocks; i++)
sectors -= stripe_blockcount_get(&s->v, i);
+ memset(&acc, 0, sizeof(acc));
+ acc.type = BCH_DISK_ACCOUNTING_replicas;
bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
acc.replicas.data_type = BCH_DATA_user;
ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
@@ -2131,6 +2135,8 @@ static int bch2_invalidate_stripe_to_dev(struct btree_trans *trans, struct bkey_
sectors = -sectors;
+ memset(&acc, 0, sizeof(acc));
+ acc.type = BCH_DISK_ACCOUNTING_replicas;
bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
acc.replicas.data_type = BCH_DATA_user;
ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index 101806d7ebe1..c8696f01eb14 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -5,6 +5,8 @@
#define BCH_ERRCODES() \
x(ERANGE, ERANGE_option_too_small) \
x(ERANGE, ERANGE_option_too_big) \
+ x(EINVAL, injected) \
+ x(BCH_ERR_injected, injected_fs_start) \
x(EINVAL, mount_option) \
x(BCH_ERR_mount_option, option_name) \
x(BCH_ERR_mount_option, option_value) \
@@ -308,6 +310,7 @@
x(BCH_ERR_data_write, data_write_misaligned) \
x(BCH_ERR_decompress, data_read) \
x(BCH_ERR_data_read, no_device_to_read_from) \
+ x(BCH_ERR_data_read, no_devices_valid) \
x(BCH_ERR_data_read, data_read_io_err) \
x(BCH_ERR_data_read, data_read_csum_err) \
x(BCH_ERR_data_read, data_read_retry) \
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index 207f35d3cce2..baf5dfb32298 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -11,7 +11,16 @@
#define FSCK_ERR_RATELIMIT_NR 10
-bool bch2_inconsistent_error(struct bch_fs *c)
+void bch2_log_msg_start(struct bch_fs *c, struct printbuf *out)
+{
+ printbuf_indent_add_nextline(out, 2);
+
+#ifdef BCACHEFS_LOG_PREFIX
+ prt_printf(out, bch2_log_msg(c, ""));
+#endif
+}
+
+bool __bch2_inconsistent_error(struct bch_fs *c, struct printbuf *out)
{
set_bit(BCH_FS_error, &c->flags);
@@ -21,10 +30,11 @@ bool bch2_inconsistent_error(struct bch_fs *c)
case BCH_ON_ERROR_fix_safe:
case BCH_ON_ERROR_ro:
if (bch2_fs_emergency_read_only(c))
- bch_err(c, "inconsistency detected - emergency read only at journal seq %llu",
- journal_cur_seq(&c->journal));
+ prt_printf(out, "inconsistency detected - emergency read only at journal seq %llu\n",
+ journal_cur_seq(&c->journal));
return true;
case BCH_ON_ERROR_panic:
+ bch2_print_string_as_lines_nonblocking(KERN_ERR, out->buf);
panic(bch2_fmt(c, "panic after error"));
return true;
default:
@@ -32,11 +42,66 @@ bool bch2_inconsistent_error(struct bch_fs *c)
}
}
-int bch2_topology_error(struct bch_fs *c)
+bool bch2_inconsistent_error(struct bch_fs *c)
+{
+ struct printbuf buf = PRINTBUF;
+ buf.atomic++;
+
+ printbuf_indent_add_nextline(&buf, 2);
+
+ bool ret = __bch2_inconsistent_error(c, &buf);
+ if (ret)
+ bch_err(c, "%s", buf.buf);
+ printbuf_exit(&buf);
+ return ret;
+}
+
+__printf(3, 0)
+static bool bch2_fs_trans_inconsistent(struct bch_fs *c, struct btree_trans *trans,
+ const char *fmt, va_list args)
{
+ struct printbuf buf = PRINTBUF;
+ buf.atomic++;
+
+ bch2_log_msg_start(c, &buf);
+
+ prt_vprintf(&buf, fmt, args);
+ prt_newline(&buf);
+
+ if (trans)
+ bch2_trans_updates_to_text(&buf, trans);
+ bool ret = __bch2_inconsistent_error(c, &buf);
+ bch2_print_string_as_lines_nonblocking(KERN_ERR, buf.buf);
+
+ printbuf_exit(&buf);
+ return ret;
+}
+
+bool bch2_fs_inconsistent(struct bch_fs *c, const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ bool ret = bch2_fs_trans_inconsistent(c, NULL, fmt, args);
+ va_end(args);
+ return ret;
+}
+
+bool bch2_trans_inconsistent(struct btree_trans *trans, const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ bool ret = bch2_fs_trans_inconsistent(trans->c, trans, fmt, args);
+ va_end(args);
+ return ret;
+}
+
+int __bch2_topology_error(struct bch_fs *c, struct printbuf *out)
+{
+ prt_printf(out, "btree topology error: ");
+
set_bit(BCH_FS_topology_error, &c->flags);
if (!test_bit(BCH_FS_recovery_running, &c->flags)) {
- bch2_inconsistent_error(c);
+ __bch2_inconsistent_error(c, out);
return -BCH_ERR_btree_need_topology_repair;
} else {
return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology) ?:
@@ -44,6 +109,24 @@ int bch2_topology_error(struct bch_fs *c)
}
}
+int bch2_fs_topology_error(struct bch_fs *c, const char *fmt, ...)
+{
+ struct printbuf buf = PRINTBUF;
+
+ bch2_log_msg_start(c, &buf);
+
+ va_list args;
+ va_start(args, fmt);
+ prt_vprintf(&buf, fmt, args);
+ va_end(args);
+
+ int ret = __bch2_topology_error(c, &buf);
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
+
+ printbuf_exit(&buf);
+ return ret;
+}
+
void bch2_fatal_error(struct bch_fs *c)
{
if (bch2_fs_emergency_read_only(c))
@@ -184,7 +267,8 @@ static enum ask_yn bch2_fsck_ask_yn(struct bch_fs *c, struct btree_trans *trans)
#endif
-static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt)
+static struct fsck_err_state *fsck_err_get(struct bch_fs *c,
+ enum bch_sb_error_id id)
{
struct fsck_err_state *s;
@@ -192,7 +276,7 @@ static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt)
return NULL;
list_for_each_entry(s, &c->fsck_error_msgs, list)
- if (s->fmt == fmt) {
+ if (s->id == id) {
/*
* move it to the head of the list: repeated fsck errors
* are common
@@ -210,7 +294,7 @@ static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt)
}
INIT_LIST_HEAD(&s->list);
- s->fmt = fmt;
+ s->id = id;
list_add(&s->list, &c->fsck_error_msgs);
return s;
}
@@ -260,15 +344,59 @@ static int do_fsck_ask_yn(struct bch_fs *c,
return ask;
}
+static struct fsck_err_state *count_fsck_err_locked(struct bch_fs *c,
+ enum bch_sb_error_id id, const char *msg,
+ bool *repeat, bool *print, bool *suppress)
+{
+ bch2_sb_error_count(c, id);
+
+ struct fsck_err_state *s = fsck_err_get(c, id);
+ if (s) {
+ /*
+ * We may be called multiple times for the same error on
+ * transaction restart - this memoizes instead of asking the user
+ * multiple times for the same error:
+ */
+ if (s->last_msg && !strcmp(msg, s->last_msg)) {
+ *repeat = true;
+ *print = false;
+ return s;
+ }
+
+ kfree(s->last_msg);
+ s->last_msg = kstrdup(msg, GFP_KERNEL);
+
+ if (c->opts.ratelimit_errors &&
+ s->nr >= FSCK_ERR_RATELIMIT_NR) {
+ if (s->nr == FSCK_ERR_RATELIMIT_NR)
+ *suppress = true;
+ else
+ *print = false;
+ }
+
+ s->nr++;
+ }
+ return s;
+}
+
+void __bch2_count_fsck_err(struct bch_fs *c,
+ enum bch_sb_error_id id, const char *msg,
+ bool *repeat, bool *print, bool *suppress)
+{
+ bch2_sb_error_count(c, id);
+
+ mutex_lock(&c->fsck_error_msgs_lock);
+ count_fsck_err_locked(c, id, msg, repeat, print, suppress);
+ mutex_unlock(&c->fsck_error_msgs_lock);
+}
+
int __bch2_fsck_err(struct bch_fs *c,
struct btree_trans *trans,
enum bch_fsck_flags flags,
enum bch_sb_error_id err,
const char *fmt, ...)
{
- struct fsck_err_state *s = NULL;
va_list args;
- bool print = true, suppressing = false, inconsistent = false, exiting = false;
struct printbuf buf = PRINTBUF, *out = &buf;
int ret = -BCH_ERR_fsck_ignore;
const char *action_orig = "fix?", *action = action_orig;
@@ -303,7 +431,12 @@ int __bch2_fsck_err(struct bch_fs *c,
? -BCH_ERR_fsck_fix
: -BCH_ERR_fsck_ignore;
- bch2_sb_error_count(c, err);
+ printbuf_indent_add_nextline(out, 2);
+
+#ifdef BCACHEFS_LOG_PREFIX
+ if (strncmp(fmt, "bcachefs", 8))
+ prt_printf(out, bch2_log_msg(c, ""));
+#endif
va_start(args, fmt);
prt_vprintf(out, fmt, args);
@@ -323,42 +456,15 @@ int __bch2_fsck_err(struct bch_fs *c,
}
mutex_lock(&c->fsck_error_msgs_lock);
- s = fsck_err_get(c, fmt);
- if (s) {
- /*
- * We may be called multiple times for the same error on
- * transaction restart - this memoizes instead of asking the user
- * multiple times for the same error:
- */
- if (s->last_msg && !strcmp(buf.buf, s->last_msg)) {
- ret = s->ret;
- goto err_unlock;
- }
-
- kfree(s->last_msg);
- s->last_msg = kstrdup(buf.buf, GFP_KERNEL);
- if (!s->last_msg) {
- ret = -ENOMEM;
- goto err_unlock;
- }
-
- if (c->opts.ratelimit_errors &&
- !(flags & FSCK_NO_RATELIMIT) &&
- s->nr >= FSCK_ERR_RATELIMIT_NR) {
- if (s->nr == FSCK_ERR_RATELIMIT_NR)
- suppressing = true;
- else
- print = false;
- }
-
- s->nr++;
+ bool repeat = false, print = true, suppress = false;
+ bool inconsistent = false, exiting = false;
+ struct fsck_err_state *s =
+ count_fsck_err_locked(c, err, buf.buf, &repeat, &print, &suppress);
+ if (repeat) {
+ ret = s->ret;
+ goto err_unlock;
}
-#ifdef BCACHEFS_LOG_PREFIX
- if (!strncmp(fmt, "bcachefs:", 9))
- prt_printf(out, bch2_log_msg(c, ""));
-#endif
-
if ((flags & FSCK_AUTOFIX) &&
(c->opts.errors == BCH_ON_ERROR_continue ||
c->opts.errors == BCH_ON_ERROR_fix_safe)) {
@@ -377,6 +483,7 @@ int __bch2_fsck_err(struct bch_fs *c,
!(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE))) {
prt_str(out, ", shutting down");
inconsistent = true;
+ print = true;
ret = -BCH_ERR_fsck_errors_not_fixed;
} else if (flags & FSCK_CAN_FIX) {
prt_str(out, ", ");
@@ -435,24 +542,30 @@ int __bch2_fsck_err(struct bch_fs *c,
print = true;
}
print:
+ prt_newline(out);
+
+ if (inconsistent)
+ __bch2_inconsistent_error(c, out);
+ else if (exiting)
+ prt_printf(out, "Unable to continue, halting\n");
+ else if (suppress)
+ prt_printf(out, "Ratelimiting new instances of previous error\n");
+
if (print) {
+ /* possibly strip an empty line, from printbuf_indent_add */
+ while (out->pos && out->buf[out->pos - 1] == ' ')
+ --out->pos;
+ printbuf_nul_terminate(out);
+
if (bch2_fs_stdio_redirect(c))
- bch2_print(c, "%s\n", out->buf);
+ bch2_print(c, "%s", out->buf);
else
bch2_print_string_as_lines(KERN_ERR, out->buf);
}
- if (exiting)
- bch_err(c, "Unable to continue, halting");
- else if (suppressing)
- bch_err(c, "Ratelimiting new instances of previous error");
-
if (s)
s->ret = ret;
- if (inconsistent)
- bch2_inconsistent_error(c);
-
/*
* We don't yet track whether the filesystem currently has errors, for
* log_fsck_err()s: that would require us to track for every error type
@@ -514,16 +627,14 @@ int __bch2_bkey_fsck_err(struct bch_fs *c,
prt_printf(&buf, " level=%u: ", from.level);
bch2_bkey_val_to_text(&buf, c, k);
- prt_str(&buf, "\n ");
+ prt_newline(&buf);
va_list args;
va_start(args, fmt);
prt_vprintf(&buf, fmt, args);
va_end(args);
- prt_str(&buf, ": delete?");
-
- int ret = __bch2_fsck_err(c, NULL, fsck_flags, err, "%s", buf.buf);
+ int ret = __bch2_fsck_err(c, NULL, fsck_flags, err, "%s, delete?", buf.buf);
printbuf_exit(&buf);
return ret;
}
@@ -536,7 +647,7 @@ void bch2_flush_fsck_errs(struct bch_fs *c)
list_for_each_entry_safe(s, n, &c->fsck_error_msgs, list) {
if (s->ratelimited && s->last_msg)
- bch_err(c, "Saw %llu errors like:\n %s", s->nr, s->last_msg);
+ bch_err(c, "Saw %llu errors like:\n %s", s->nr, s->last_msg);
list_del(&s->list);
kfree(s->last_msg);
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
index 7d3f0e2a5fd6..d0d024dc714b 100644
--- a/fs/bcachefs/error.h
+++ b/fs/bcachefs/error.h
@@ -18,6 +18,8 @@ struct work_struct;
/* Error messages: */
+void bch2_log_msg_start(struct bch_fs *, struct printbuf *);
+
/*
* Inconsistency errors: The on disk data is inconsistent. If these occur during
* initial recovery, they don't indicate a bug in the running code - we walk all
@@ -29,21 +31,10 @@ struct work_struct;
* BCH_ON_ERROR_CONTINUE mode
*/
+bool __bch2_inconsistent_error(struct bch_fs *, struct printbuf *);
bool bch2_inconsistent_error(struct bch_fs *);
-
-int bch2_topology_error(struct bch_fs *);
-
-#define bch2_fs_topology_error(c, ...) \
-({ \
- bch_err(c, "btree topology error: " __VA_ARGS__); \
- bch2_topology_error(c); \
-})
-
-#define bch2_fs_inconsistent(c, ...) \
-({ \
- bch_err(c, __VA_ARGS__); \
- bch2_inconsistent_error(c); \
-})
+__printf(2, 3)
+bool bch2_fs_inconsistent(struct bch_fs *, const char *, ...);
#define bch2_fs_inconsistent_on(cond, ...) \
({ \
@@ -53,26 +44,21 @@ int bch2_topology_error(struct bch_fs *);
_ret; \
})
-/*
- * When a transaction update discovers or is causing a fs inconsistency, it's
- * helpful to also dump the pending updates:
- */
-#define bch2_trans_inconsistent(trans, ...) \
-({ \
- bch_err(trans->c, __VA_ARGS__); \
- bch2_dump_trans_updates(trans); \
- bch2_inconsistent_error(trans->c); \
-})
+__printf(2, 3)
+bool bch2_trans_inconsistent(struct btree_trans *, const char *, ...);
-#define bch2_trans_inconsistent_on(cond, trans, ...) \
+#define bch2_trans_inconsistent_on(cond, ...) \
({ \
bool _ret = unlikely(!!(cond)); \
- \
if (_ret) \
- bch2_trans_inconsistent(trans, __VA_ARGS__); \
+ bch2_trans_inconsistent(__VA_ARGS__); \
_ret; \
})
+int __bch2_topology_error(struct bch_fs *, struct printbuf *);
+__printf(2, 3)
+int bch2_fs_topology_error(struct bch_fs *, const char *, ...);
+
/*
* Fsck errors: inconsistency errors we detect at mount time, and should ideally
* be able to repair:
@@ -80,7 +66,7 @@ int bch2_topology_error(struct bch_fs *);
struct fsck_err_state {
struct list_head list;
- const char *fmt;
+ enum bch_sb_error_id id;
u64 nr;
bool ratelimited;
int ret;
@@ -90,6 +76,12 @@ struct fsck_err_state {
#define fsck_err_count(_c, _err) bch2_sb_err_count(_c, BCH_FSCK_ERR_##_err)
+void __bch2_count_fsck_err(struct bch_fs *,
+ enum bch_sb_error_id, const char *,
+ bool *, bool *, bool *);
+#define bch2_count_fsck_err(_c, _err, ...) \
+ __bch2_count_fsck_err(_c, BCH_FSCK_ERR_##_err, __VA_ARGS__)
+
__printf(5, 6) __cold
int __bch2_fsck_err(struct bch_fs *, struct btree_trans *,
enum bch_fsck_flags,
diff --git a/fs/bcachefs/extent_update.c b/fs/bcachefs/extent_update.c
index 6aac579a692a..6bb42985306e 100644
--- a/fs/bcachefs/extent_update.c
+++ b/fs/bcachefs/extent_update.c
@@ -112,7 +112,7 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
unsigned nr_iters = 0;
int ret;
- ret = bch2_btree_iter_traverse(iter);
+ ret = bch2_btree_iter_traverse(trans, iter);
if (ret)
return ret;
@@ -126,9 +126,9 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
if (ret < 0)
return ret;
- bch2_trans_copy_iter(&copy, iter);
+ bch2_trans_copy_iter(trans, &copy, iter);
- for_each_btree_key_max_continue_norestart(copy, insert->k.p, 0, k, ret) {
+ for_each_btree_key_max_continue_norestart(trans, copy, insert->k.p, 0, k, ret) {
unsigned offset = 0;
if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k)))
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index ae1a1d917805..ae7c7a177e10 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -227,8 +227,11 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
if (have_io_errors)
return -BCH_ERR_data_read_io_err;
- WARN_ONCE(1, "unhandled error case in %s\n", __func__);
- return -EINVAL;
+ /*
+ * If we get here, we have pointers (bkey_ptrs_validate() ensures that),
+ * but they don't point to valid devices:
+ */
+ return -BCH_ERR_no_devices_valid;
}
/* KEY_TYPE_btree_ptr: */
diff --git a/fs/bcachefs/fs-io-buffered.c b/fs/bcachefs/fs-io-buffered.c
index 5ab1c73c8d4c..e3a75dcca60c 100644
--- a/fs/bcachefs/fs-io-buffered.c
+++ b/fs/bcachefs/fs-io-buffered.c
@@ -183,12 +183,12 @@ static void bchfs_read(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- bch2_btree_iter_set_pos(&iter,
+ bch2_btree_iter_set_pos(trans, &iter,
POS(inum.inum, rbio->bio.bi_iter.bi_sector));
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -225,6 +225,21 @@ static void bchfs_read(struct btree_trans *trans,
bch2_read_extent(trans, rbio, iter.pos,
data_btree, k, offset_into_extent, flags);
+ /*
+ * Careful there's a landmine here if bch2_read_extent() ever
+ * starts returning transaction restarts here.
+ *
+ * We've changed rbio->bi_iter.bi_size to be "bytes we can read
+ * from this extent" with the swap call, and we restore it
+ * below. That restore needs to come before checking for
+ * errors.
+ *
+ * But unlike __bch2_read(), we use the rbio bvec iter, not one
+ * on the stack, so we can't do the restore right after the
+ * bch2_read_extent() call: we don't own that iterator anymore
+ * if BCH_READ_last_fragment is set, since we may have submitted
+ * that rbio instead of cloning it.
+ */
if (flags & BCH_READ_last_fragment)
break;
diff --git a/fs/bcachefs/fs-io.c b/fs/bcachefs/fs-io.c
index 717e7b94c66f..65c2c33d253d 100644
--- a/fs/bcachefs/fs-io.c
+++ b/fs/bcachefs/fs-io.c
@@ -48,7 +48,7 @@ static void nocow_flush_endio(struct bio *_bio)
struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
closure_put(bio->cl);
- percpu_ref_put(&bio->ca->io_ref);
+ percpu_ref_put(&bio->ca->io_ref[WRITE]);
bio_put(&bio->bio);
}
@@ -71,7 +71,7 @@ void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
rcu_read_lock();
ca = rcu_dereference(c->devs[dev]);
- if (ca && !percpu_ref_tryget(&ca->io_ref))
+ if (ca && !percpu_ref_tryget(&ca->io_ref[WRITE]))
ca = NULL;
rcu_read_unlock();
@@ -636,9 +636,9 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
if (ret)
goto bkey_err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
if ((ret = bkey_err(k)))
goto bkey_err;
@@ -649,13 +649,13 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
/* already reserved */
if (bkey_extent_is_reservation(k) &&
bch2_bkey_nr_ptrs_fully_allocated(k) >= opts.data_replicas) {
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
continue;
}
if (bkey_extent_is_data(k.k) &&
!(mode & FALLOC_FL_ZERO_RANGE)) {
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
continue;
}
@@ -676,7 +676,7 @@ static noinline int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
if (ret)
goto bkey_err;
}
- bch2_btree_iter_set_pos(&iter, POS(iter.pos.inode, hole_start));
+ bch2_btree_iter_set_pos(trans, &iter, POS(iter.pos.inode, hole_start));
if (ret)
goto bkey_err;
@@ -999,17 +999,28 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
POS(inode->v.i_ino, offset >> 9),
POS(inode->v.i_ino, U64_MAX),
inum.subvol, BTREE_ITER_slots, k, ({
- if (k.k->p.inode != inode->v.i_ino) {
- next_hole = bch2_seek_pagecache_hole(&inode->v,
- offset, MAX_LFS_FILESIZE, 0, false);
- break;
- } else if (!bkey_extent_is_data(k.k)) {
- next_hole = bch2_seek_pagecache_hole(&inode->v,
- max(offset, bkey_start_offset(k.k) << 9),
- k.k->p.offset << 9, 0, false);
-
- if (next_hole < k.k->p.offset << 9)
+ if (k.k->p.inode != inode->v.i_ino ||
+ !bkey_extent_is_data(k.k)) {
+ loff_t start_offset = k.k->p.inode == inode->v.i_ino
+ ? max(offset, bkey_start_offset(k.k) << 9)
+ : offset;
+ loff_t end_offset = k.k->p.inode == inode->v.i_ino
+ ? MAX_LFS_FILESIZE
+ : k.k->p.offset << 9;
+
+ /*
+ * Found a hole in the btree, now make sure it's
+ * a hole in the pagecache. We might have to
+ * keep searching if this hole is entirely dirty
+ * in the page cache:
+ */
+ bch2_trans_unlock(trans);
+ loff_t pagecache_hole = bch2_seek_pagecache_hole(&inode->v,
+ start_offset, end_offset, 0, false);
+ if (pagecache_hole < end_offset) {
+ next_hole = pagecache_hole;
break;
+ }
} else {
offset = max(offset, bkey_start_offset(k.k) << 9);
}
diff --git a/fs/bcachefs/fs-ioctl.c b/fs/bcachefs/fs-ioctl.c
index f45054cee746..c1553e44e049 100644
--- a/fs/bcachefs/fs-ioctl.c
+++ b/fs/bcachefs/fs-ioctl.c
@@ -537,10 +537,12 @@ static long bch2_ioctl_subvolume_destroy(struct bch_fs *c, struct file *filp,
ret = -EXDEV;
goto err;
}
- ret = __bch2_unlink(dir, victim, true);
+
+ ret = inode_permission(file_mnt_idmap(filp), d_inode(victim), MAY_WRITE) ?:
+ __bch2_unlink(dir, victim, true);
if (!ret) {
fsnotify_rmdir(dir, victim);
- d_delete(victim);
+ d_invalidate(victim);
}
err:
inode_unlock(dir);
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index c88c149d5de5..5a41b1a8e54f 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -88,7 +88,7 @@ int __must_check bch2_write_inode(struct bch_fs *c,
void *p, unsigned fields)
{
struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bch_inode_unpacked inode_u;
int ret;
retry:
@@ -673,7 +673,7 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
* back to this dirent
*/
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT),
- c, "dirent to missing inode:\n %s",
+ c, "dirent to missing inode:\n%s",
(bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf));
if (ret)
goto err;
@@ -1075,7 +1075,7 @@ int bch2_setattr_nonsize(struct mnt_idmap *idmap,
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_qid qid;
struct btree_trans *trans;
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter inode_iter = {};
struct bch_inode_unpacked inode_u;
struct posix_acl *acl = NULL;
kuid_t kuid;
@@ -1330,9 +1330,9 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
if (ret)
continue;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- k = bch2_btree_iter_peek_max(&iter, end);
+ k = bch2_btree_iter_peek_max(trans, &iter, end);
ret = bkey_err(k);
if (ret)
continue;
@@ -1342,7 +1342,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
if (!bkey_extent_is_data(k.k) &&
k.k->type != KEY_TYPE_reservation) {
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
continue;
}
@@ -1380,7 +1380,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
bkey_copy(prev.k, cur.k);
have_extent = true;
- bch2_btree_iter_set_pos(&iter,
+ bch2_btree_iter_set_pos(trans, &iter,
POS(iter.pos.inode, iter.pos.offset + sectors));
}
bch2_trans_iter_exit(trans, &iter);
@@ -1697,17 +1697,17 @@ retry:
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&iter1, snapshot);
- bch2_btree_iter_set_snapshot(&iter2, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter1, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter2, snapshot);
ret = bch2_inode_find_by_inum_trans(trans, inode_inum(inode), &inode_u);
if (ret)
goto err;
if (inode_u.bi_dir == dir->ei_inode.bi_inum) {
- bch2_btree_iter_set_pos(&iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset));
+ bch2_btree_iter_set_pos(trans, &iter1, POS(inode_u.bi_dir, inode_u.bi_dir_offset));
- k = bch2_btree_iter_peek_slot(&iter1);
+ k = bch2_btree_iter_peek_slot(trans, &iter1);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1731,7 +1731,7 @@ retry:
* File with multiple hardlinks and our backref is to the wrong
* directory - linear search:
*/
- for_each_btree_key_continue_norestart(iter2, 0, k, ret) {
+ for_each_btree_key_continue_norestart(trans, iter2, 0, k, ret) {
if (k.k->p.inode > dir->ei_inode.bi_inum)
break;
@@ -2179,7 +2179,7 @@ static int bch2_fs_get_tree(struct fs_context *fc)
/* Some options can't be parsed until after the fs is started: */
opts = bch2_opts_empty();
- ret = bch2_parse_mount_opts(c, &opts, NULL, opts_parse->parse_later.buf);
+ ret = bch2_parse_mount_opts(c, &opts, NULL, opts_parse->parse_later.buf, false);
if (ret)
goto err_stop_fs;
@@ -2237,7 +2237,7 @@ got_sb:
/* XXX: create an anonymous device for multi device filesystems */
sb->s_bdev = bdev;
sb->s_dev = bdev->bd_dev;
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
break;
}
@@ -2290,7 +2290,8 @@ err_stop_fs:
goto err;
err_put_super:
- __bch2_fs_stop(c);
+ if (!sb->s_root)
+ __bch2_fs_stop(c);
deactivate_locked_super(sb);
goto err;
}
@@ -2333,6 +2334,8 @@ static int bch2_fs_parse_param(struct fs_context *fc,
int ret = bch2_parse_one_mount_opt(c, &opts->opts,
&opts->parse_later, param->key,
param->string);
+ if (ret)
+ pr_err("Error parsing option %s: %s", param->key, bch2_err_str(ret));
return bch2_err_class(ret);
}
diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c
index 091057023fc5..18308f3d64a1 100644
--- a/fs/bcachefs/fsck.c
+++ b/fs/bcachefs/fsck.c
@@ -186,7 +186,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
{
struct bch_fs *c = trans->c;
struct qstr lostfound_str = QSTR("lost+found");
- struct btree_iter lostfound_iter = { NULL };
+ struct btree_iter lostfound_iter = {};
u64 inum = 0;
unsigned d_type = 0;
int ret;
@@ -295,8 +295,8 @@ create_lostfound:
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&lostfound_iter, snapshot);
- ret = bch2_btree_iter_traverse(&lostfound_iter);
+ bch2_btree_iter_set_snapshot(trans, &lostfound_iter, snapshot);
+ ret = bch2_btree_iter_traverse(trans, &lostfound_iter);
if (ret)
goto err;
@@ -544,7 +544,7 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub
new_inode.bi_subvol = subvolid;
int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?:
- bch2_btree_iter_traverse(&inode_iter) ?:
+ bch2_btree_iter_traverse(trans, &inode_iter) ?:
bch2_inode_write(trans, &inode_iter, &new_inode);
bch2_trans_iter_exit(trans, &inode_iter);
if (ret)
@@ -609,7 +609,7 @@ static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32
struct btree_iter iter = {};
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0);
- struct bkey_s_c k = bch2_btree_iter_peek_prev_min(&iter, POS(inum, 0));
+ struct bkey_s_c k = bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum, 0));
bch2_trans_iter_exit(trans, &iter);
int ret = bkey_err(k);
if (ret)
@@ -1421,14 +1421,14 @@ static int check_key_has_inode(struct btree_trans *trans,
if (fsck_err_on(!i,
trans, key_in_missing_inode,
- "key in missing inode:\n %s",
+ "key in missing inode:\n%s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
goto delete;
if (fsck_err_on(i && !btree_matches_i_mode(iter->btree_id, i->inode.bi_mode),
trans, key_in_wrong_inode_type,
- "key for wrong inode mode %o:\n %s",
+ "key for wrong inode mode %o:\n%s",
i->inode.bi_mode,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
@@ -1557,7 +1557,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
- struct btree_iter iter1, iter2 = { NULL };
+ struct btree_iter iter1, iter2 = {};
struct bkey_s_c k1, k2;
int ret;
@@ -1566,18 +1566,18 @@ static int overlapping_extents_found(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter1, btree, pos1,
BTREE_ITER_all_snapshots|
BTREE_ITER_not_extents);
- k1 = bch2_btree_iter_peek_max(&iter1, POS(pos1.inode, U64_MAX));
+ k1 = bch2_btree_iter_peek_max(trans, &iter1, POS(pos1.inode, U64_MAX));
ret = bkey_err(k1);
if (ret)
goto err;
- prt_str(&buf, "\n ");
+ prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k1);
if (!bpos_eq(pos1, k1.k->p)) {
- prt_str(&buf, "\n wanted\n ");
+ prt_str(&buf, "\nwanted\n ");
bch2_bpos_to_text(&buf, pos1);
- prt_str(&buf, "\n ");
+ prt_str(&buf, "\n");
bch2_bkey_to_text(&buf, &pos2);
bch_err(c, "%s: error finding first overlapping extent when repairing, got%s",
@@ -1586,12 +1586,12 @@ static int overlapping_extents_found(struct btree_trans *trans,
goto err;
}
- bch2_trans_copy_iter(&iter2, &iter1);
+ bch2_trans_copy_iter(trans, &iter2, &iter1);
while (1) {
- bch2_btree_iter_advance(&iter2);
+ bch2_btree_iter_advance(trans, &iter2);
- k2 = bch2_btree_iter_peek_max(&iter2, POS(pos1.inode, U64_MAX));
+ k2 = bch2_btree_iter_peek_max(trans, &iter2, POS(pos1.inode, U64_MAX));
ret = bkey_err(k2);
if (ret)
goto err;
@@ -1600,7 +1600,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
break;
}
- prt_str(&buf, "\n ");
+ prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k2);
if (bpos_gt(k2.k->p, pos2.p) ||
@@ -1611,7 +1611,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
goto err;
}
- prt_printf(&buf, "\n overwriting %s extent",
+ prt_printf(&buf, "\noverwriting %s extent",
pos1.snapshot >= pos2.p.snapshot ? "first" : "second");
if (fsck_err(trans, extent_overlapping,
@@ -1632,6 +1632,8 @@ static int overlapping_extents_found(struct btree_trans *trans,
bch2_trans_commit(trans, &res, NULL, BCH_TRANS_COMMIT_no_enospc);
bch2_disk_reservation_put(c, &res);
+ bch_info(c, "repair ret %s", bch2_err_str(ret));
+
if (ret)
goto err;
@@ -1784,14 +1786,14 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
if (fsck_err_on(k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
!bkey_extent_is_reservation(k),
trans, extent_past_end_of_inode,
- "extent type past end of inode %llu:%u, i_size %llu\n %s",
+ "extent type past end of inode %llu:%u, i_size %llu\n%s",
i->inode.bi_inum, i->snapshot, i->inode.bi_size,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
struct btree_iter iter2;
- bch2_trans_copy_iter(&iter2, iter);
- bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
- ret = bch2_btree_iter_traverse(&iter2) ?:
+ bch2_trans_copy_iter(trans, &iter2, iter);
+ bch2_btree_iter_set_snapshot(trans, &iter2, i->snapshot);
+ ret = bch2_btree_iter_traverse(trans, &iter2) ?:
bch2_btree_delete_at(trans, &iter2,
BTREE_UPDATE_internal_snapshot_node);
bch2_trans_iter_exit(trans, &iter2);
@@ -2183,7 +2185,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
BTREE_ID_dirents,
SPOS(k.k->p.inode, k.k->p.offset, *i),
BTREE_ITER_intent);
- ret = bch2_btree_iter_traverse(&delete_iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &delete_iter) ?:
bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
hash_info,
&delete_iter,
@@ -2410,7 +2412,7 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter,
bch2_trans_iter_exit(trans, &parent_iter);
bch2_trans_iter_init(trans, &parent_iter,
BTREE_ID_subvolumes, POS(0, parent), 0);
- k = bch2_btree_iter_peek_slot(&parent_iter);
+ k = bch2_btree_iter_peek_slot(trans, &parent_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -3021,7 +3023,7 @@ long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg)
if (arg.opts) {
char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
ret = PTR_ERR_OR_ZERO(optstr) ?:
- bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr);
+ bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr, false);
if (!IS_ERR(optstr))
kfree(optstr);
@@ -3129,7 +3131,7 @@ long bch2_ioctl_fsck_online(struct bch_fs *c, struct bch_ioctl_fsck_online arg)
char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
ret = PTR_ERR_OR_ZERO(optstr) ?:
- bch2_parse_mount_opts(c, &thr->opts, NULL, optstr);
+ bch2_parse_mount_opts(c, &thr->opts, NULL, optstr, false);
if (!IS_ERR(optstr))
kfree(optstr);
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 80051073f613..b51d98cf8a80 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -940,7 +940,7 @@ int bch2_inode_create(struct btree_trans *trans,
BTREE_ITER_intent);
struct bkey_s_c k;
again:
- while ((k = bch2_btree_iter_peek(iter)).k &&
+ while ((k = bch2_btree_iter_peek(trans, iter)).k &&
!(ret = bkey_err(k)) &&
bkey_lt(k.k->p, POS(0, max))) {
if (pos < iter->pos.offset)
@@ -951,7 +951,7 @@ again:
* we've found just one:
*/
pos = iter->pos.offset + 1;
- bch2_btree_iter_set_pos(iter, POS(0, pos));
+ bch2_btree_iter_set_pos(trans, iter, POS(0, pos));
}
if (!ret && pos < max)
@@ -967,12 +967,12 @@ again:
/* Retry from start */
pos = start = min;
- bch2_btree_iter_set_pos(iter, POS(0, pos));
+ bch2_btree_iter_set_pos(trans, iter, POS(0, pos));
le32_add_cpu(&cursor->v.gen, 1);
goto again;
found_slot:
- bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot));
- k = bch2_btree_iter_peek_slot(iter);
+ bch2_btree_iter_set_pos(trans, iter, SPOS(0, pos, snapshot));
+ k = bch2_btree_iter_peek_slot(trans, iter);
ret = bkey_err(k);
if (ret) {
bch2_trans_iter_exit(trans, iter);
@@ -1009,9 +1009,9 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- k = bch2_btree_iter_peek_max(&iter, end);
+ k = bch2_btree_iter_peek_max(trans, &iter, end);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1042,7 +1042,7 @@ err:
int bch2_inode_rm(struct bch_fs *c, subvol_inum inum)
{
struct btree_trans *trans = bch2_trans_get(c);
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bkey_s_c k;
u32 snapshot;
int ret;
@@ -1207,7 +1207,7 @@ int bch2_inum_opts_get(struct btree_trans *trans, subvol_inum inum, struct bch_i
static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bkey_i_inode_generation delete;
struct bch_inode_unpacked inode_u;
struct bkey_s_c k;
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
index 6b842c8d21be..cc07729a4b62 100644
--- a/fs/bcachefs/io_misc.c
+++ b/fs/bcachefs/io_misc.c
@@ -43,7 +43,7 @@ int bch2_extent_fallocate(struct btree_trans *trans,
bch2_bkey_buf_init(&new);
closure_init_stack(&cl);
- k = bch2_btree_iter_peek_slot(iter);
+ k = bch2_btree_iter_peek_slot(trans, iter);
ret = bkey_err(k);
if (ret)
return ret;
@@ -164,12 +164,12 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
if (ret)
continue;
- bch2_btree_iter_set_snapshot(iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, iter, snapshot);
/*
* peek_max() doesn't have ideal semantics for extents:
*/
- k = bch2_btree_iter_peek_max(iter, end_pos);
+ k = bch2_btree_iter_peek_max(trans, iter, end_pos);
if (!k.k)
break;
@@ -230,7 +230,7 @@ static int truncate_set_isize(struct btree_trans *trans,
u64 new_i_size,
bool warn)
{
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct bch_inode_unpacked inode_u;
int ret;
@@ -399,7 +399,7 @@ case LOGGED_OP_FINSERT_start:
if (ret)
goto err;
} else {
- bch2_btree_iter_set_pos(&iter, POS(inum.inum, src_offset));
+ bch2_btree_iter_set_pos(trans, &iter, POS(inum.inum, src_offset));
ret = bch2_fpunch_at(trans, &iter, inum, src_offset + len, i_sectors_delta);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -425,12 +425,12 @@ case LOGGED_OP_FINSERT_shift_extents:
if (ret)
goto btree_err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
- bch2_btree_iter_set_pos(&iter, SPOS(inum.inum, pos, snapshot));
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
+ bch2_btree_iter_set_pos(trans, &iter, SPOS(inum.inum, pos, snapshot));
k = insert
- ? bch2_btree_iter_peek_prev_min(&iter, POS(inum.inum, 0))
- : bch2_btree_iter_peek_max(&iter, POS(inum.inum, U64_MAX));
+ ? bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum.inum, 0))
+ : bch2_btree_iter_peek_max(trans, &iter, POS(inum.inum, U64_MAX));
if ((ret = bkey_err(k)))
goto btree_err;
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index f1503df57dc7..fd627c8d1053 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -259,6 +259,7 @@ static struct bch_read_bio *__promote_alloc(struct btree_trans *trans,
&orig->opts,
update_opts,
btree_id, k);
+ op->write.type = BCH_DATA_UPDATE_promote;
/*
* possible errors: -BCH_ERR_nocow_lock_blocked,
* -BCH_ERR_ENOSPC_disk_reservation:
@@ -393,7 +394,7 @@ static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
if (rbio->have_ioref) {
struct bch_dev *ca = bch2_dev_have_ref(rbio->c, rbio->pick.ptr.dev);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
}
if (rbio->split) {
@@ -908,7 +909,7 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
prt_printf(&buf, "memory gen: %u", gen);
- ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
+ ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(trans, &iter)));
if (!ret) {
prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k);
@@ -976,7 +977,8 @@ retry_pick:
goto err;
}
- if (unlikely(bch2_csum_type_is_encryption(pick.crc.csum_type)) && !c->chacha20) {
+ if (unlikely(bch2_csum_type_is_encryption(pick.crc.csum_type)) &&
+ !c->chacha20_key_set) {
struct printbuf buf = PRINTBUF;
bch2_read_err_msg_trans(trans, &buf, orig, read_pos);
prt_printf(&buf, "attempting to read encrypted data without encryption key\n ");
@@ -1002,7 +1004,7 @@ retry_pick:
unlikely(dev_ptr_stale(ca, &pick.ptr))) {
read_from_stale_dirty_pointer(trans, ca, k, pick.ptr);
bch2_mark_io_failure(failed, &pick, false);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
goto retry_pick;
}
@@ -1035,7 +1037,7 @@ retry_pick:
*/
if (pick.crc.compressed_size > u->op.wbio.bio.bi_iter.bi_size) {
if (ca)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
rbio->ret = -BCH_ERR_data_read_buffer_too_small;
goto out_read_done;
}
@@ -1284,12 +1286,12 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
- bch2_btree_iter_set_pos(&iter,
+ bch2_btree_iter_set_pos(trans, &iter,
POS(inum.inum, bvec_iter.bi_sector));
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -1322,13 +1324,14 @@ int __bch2_read(struct btree_trans *trans, struct bch_read_bio *rbio,
ret = __bch2_read_extent(trans, rbio, bvec_iter, iter.pos,
data_btree, k,
offset_into_extent, failed, flags, -1);
+ swap(bvec_iter.bi_size, bytes);
+
if (ret)
goto err;
if (flags & BCH_READ_last_fragment)
break;
- swap(bvec_iter.bi_size, bytes);
bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
err:
if (ret == -BCH_ERR_data_read_retry_csum_err_maybe_userspace)
diff --git a/fs/bcachefs/io_read.h b/fs/bcachefs/io_read.h
index cd21950417f6..c78025d863e0 100644
--- a/fs/bcachefs/io_read.h
+++ b/fs/bcachefs/io_read.h
@@ -137,8 +137,10 @@ static inline void bch2_read_extent(struct btree_trans *trans,
enum btree_id data_btree, struct bkey_s_c k,
unsigned offset_into_extent, unsigned flags)
{
- __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
- data_btree, k, offset_into_extent, NULL, flags, -1);
+ int ret = __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
+ data_btree, k, offset_into_extent, NULL, flags, -1);
+ /* __bch2_read_extent only returns errors if BCH_READ_in_retry is set */
+ WARN(ret, "unhandled error from __bch2_read_extent()");
}
int __bch2_read(struct btree_trans *, struct bch_read_bio *, struct bvec_iter,
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index 29671075e3f1..a418fa62f09d 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -168,9 +168,9 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
*i_sectors_delta = 0;
*disk_sectors_delta = 0;
- bch2_trans_copy_iter(&iter, extent_iter);
+ bch2_trans_copy_iter(trans, &iter, extent_iter);
- for_each_btree_key_max_continue_norestart(iter,
+ for_each_btree_key_max_continue_norestart(trans, iter,
new->k.p, BTREE_ITER_slots, old, ret) {
s64 sectors = min(new->k.p.offset, old.k->p.offset) -
max(bkey_start_offset(&new->k),
@@ -292,7 +292,7 @@ int bch2_extent_update(struct btree_trans *trans,
* path already traversed at iter->pos because
* bch2_trans_extent_update() will use it to attempt extent merging
*/
- ret = __bch2_btree_iter_traverse(iter);
+ ret = __bch2_btree_iter_traverse(trans, iter);
if (ret)
return ret;
@@ -337,7 +337,7 @@ int bch2_extent_update(struct btree_trans *trans,
if (i_sectors_delta_total)
*i_sectors_delta_total += i_sectors_delta;
- bch2_btree_iter_set_pos(iter, next_pos);
+ bch2_btree_iter_set_pos(trans, iter, next_pos);
return 0;
}
@@ -434,12 +434,6 @@ void bch2_write_op_error(struct bch_write_op *op, u64 offset, const char *fmt, .
printbuf_exit(&buf);
}
-static void bch2_write_csum_err_msg(struct bch_write_op *op)
-{
- bch2_write_op_error(op, op->pos.offset,
- "error verifying existing checksum while rewriting existing data (memory corruption?)");
-}
-
void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
enum bch_data_type type,
const struct bkey_i *k,
@@ -451,6 +445,11 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
BUG_ON(c->opts.nochanges);
bkey_for_each_ptr(ptrs, ptr) {
+ /*
+ * XXX: btree writes should be using io_ref[WRITE], but we
+ * aren't retrying failed btree writes yet (due to device
+ * removal/ro):
+ */
struct bch_dev *ca = nocow
? bch2_dev_have_ref(c, ptr->dev)
: bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE);
@@ -703,12 +702,19 @@ static void bch2_write_endio(struct bio *bio)
bch2_account_io_completion(ca, BCH_MEMBER_ERROR_write,
wbio->submit_time, !bio->bi_status);
- if (bio->bi_status) {
- bch_err_inum_offset_ratelimited(ca,
- op->pos.inode,
- wbio->inode_offset << 9,
- "data write error: %s",
- bch2_blk_status_to_str(bio->bi_status));
+ if (unlikely(bio->bi_status)) {
+ if (ca)
+ bch_err_inum_offset_ratelimited(ca,
+ op->pos.inode,
+ wbio->inode_offset << 9,
+ "data write error: %s",
+ bch2_blk_status_to_str(bio->bi_status));
+ else
+ bch_err_inum_offset_ratelimited(c,
+ op->pos.inode,
+ wbio->inode_offset << 9,
+ "data write error: %s",
+ bch2_blk_status_to_str(bio->bi_status));
set_bit(wbio->dev, op->failed.d);
op->flags |= BCH_WRITE_io_error;
}
@@ -721,7 +727,7 @@ static void bch2_write_endio(struct bio *bio)
}
if (wbio->have_ioref)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
if (wbio->bounce)
bch2_bio_free_pages_pool(c, bio);
@@ -839,7 +845,7 @@ static noinline int bch2_write_prep_encoded_data(struct bch_write_op *op, struct
{
struct bch_fs *c = op->c;
struct bio *bio = &op->wbio.bio;
- struct nonce nonce = extent_nonce(op->version, op->crc);
+ struct bch_csum csum;
int ret = 0;
BUG_ON(bio_sectors(bio) != op->crc.compressed_size);
@@ -866,7 +872,8 @@ static noinline int bch2_write_prep_encoded_data(struct bch_write_op *op, struct
*/
if (crc_is_compressed(op->crc)) {
/* Last point we can still verify checksum: */
- struct bch_csum csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio);
+ struct nonce nonce = extent_nonce(op->version, op->crc);
+ csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio);
if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
goto csum_err;
@@ -905,7 +912,8 @@ static noinline int bch2_write_prep_encoded_data(struct bch_write_op *op, struct
*/
if (bch2_csum_type_is_encryption(op->crc.csum_type) &&
(op->compression_opt || op->crc.csum_type != op->csum_type)) {
- struct bch_csum csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio);
+ struct nonce nonce = extent_nonce(op->version, op->crc);
+ csum = bch2_checksum_bio(c, op->crc.csum_type, nonce, bio);
if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
goto csum_err;
@@ -919,7 +927,16 @@ static noinline int bch2_write_prep_encoded_data(struct bch_write_op *op, struct
return 0;
csum_err:
- bch2_write_csum_err_msg(op);
+ bch2_write_op_error(op, op->pos.offset,
+ "error verifying existing checksum while moving existing data (memory corruption?)\n"
+ " expected %0llx:%0llx got %0llx:%0llx type %s",
+ op->crc.csum.hi,
+ op->crc.csum.lo,
+ csum.hi,
+ csum.lo,
+ op->crc.csum_type < BCH_CSUM_NR
+ ? __bch2_csum_types[op->crc.csum_type]
+ : "(unknown)");
return -BCH_ERR_data_write_csum;
}
@@ -935,6 +952,9 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
bool page_alloc_failed = false;
int ret, more = 0;
+ if (op->incompressible)
+ op->compression_opt = 0;
+
BUG_ON(!bio_sectors(src));
ec_buf = bch2_writepoint_ec_buf(c, wp);
@@ -1046,12 +1066,13 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
* data can't be modified (by userspace) while it's in
* flight.
*/
- if (bch2_rechecksum_bio(c, src, version, op->crc,
+ ret = bch2_rechecksum_bio(c, src, version, op->crc,
&crc, &op->crc,
src_len >> 9,
bio_sectors(src) - (src_len >> 9),
- op->csum_type))
- goto csum_err;
+ op->csum_type);
+ if (ret)
+ goto err;
/*
* rchecksum_bio sets compression_type on crc from op->crc,
* this isn't always correct as sometimes we're changing
@@ -1061,12 +1082,12 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
crc.nonce = nonce;
} else {
if ((op->flags & BCH_WRITE_data_encoded) &&
- bch2_rechecksum_bio(c, src, version, op->crc,
+ (ret = bch2_rechecksum_bio(c, src, version, op->crc,
NULL, &op->crc,
src_len >> 9,
bio_sectors(src) - (src_len >> 9),
- op->crc.csum_type))
- goto csum_err;
+ op->crc.csum_type)))
+ goto err;
crc.compressed_size = dst_len >> 9;
crc.uncompressed_size = src_len >> 9;
@@ -1125,9 +1146,6 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
do_write:
*_dst = dst;
return more;
-csum_err:
- bch2_write_csum_err_msg(op);
- ret = -BCH_ERR_data_write_csum;
err:
if (to_wbio(dst)->bounce)
bch2_bio_free_pages_pool(c, dst);
@@ -1287,7 +1305,7 @@ retry:
if (ret)
break;
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
break;
@@ -1371,7 +1389,7 @@ retry:
bch2_keylist_push(&op->insert_keys);
if (op->flags & BCH_WRITE_submitted)
break;
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
}
out:
bch2_trans_iter_exit(trans, &iter);
@@ -1408,7 +1426,7 @@ err:
return;
err_get_ioref:
darray_for_each(buckets, i)
- percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref);
+ percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref[WRITE]);
/* Fall back to COW path: */
goto out;
diff --git a/fs/bcachefs/journal.c b/fs/bcachefs/journal.c
index bfdaea6569ae..d8f74b6d0a75 100644
--- a/fs/bcachefs/journal.c
+++ b/fs/bcachefs/journal.c
@@ -62,8 +62,7 @@ static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u6
prt_newline(out);
}
- prt_printf(out, "expires:\t");
- prt_printf(out, "%li jiffies\n", buf->expires - jiffies);
+ prt_printf(out, "expires:\t%li jiffies\n", buf->expires - jiffies);
prt_printf(out, "flags:\t");
if (buf->noflush)
@@ -142,6 +141,8 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags)
bool stuck = false;
struct printbuf buf = PRINTBUF;
+ buf.atomic++;
+
if (!(error == -BCH_ERR_journal_full ||
error == -BCH_ERR_journal_pin_full) ||
nr_unwritten_journal_entries(j) ||
@@ -167,12 +168,12 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags)
return stuck;
}
j->err_seq = journal_cur_seq(j);
- spin_unlock(&j->lock);
- bch_err(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)",
- bch2_err_str(error));
- bch2_journal_debug_to_text(&buf, j);
- bch_err(c, "%s", buf.buf);
+ __bch2_journal_debug_to_text(&buf, j);
+ spin_unlock(&j->lock);
+ prt_printf(&buf, bch2_fmt(c, "Journal stuck! Hava a pre-reservation but journal full (error %s)"),
+ bch2_err_str(error));
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
printbuf_reset(&buf);
bch2_journal_pins_to_text(&buf, j);
@@ -728,8 +729,8 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
struct printbuf buf = PRINTBUF;
bch2_journal_debug_to_text(&buf, j);
- bch_err(c, "Journal stuck? Waited for 10 seconds...\n%s",
- buf.buf);
+ bch2_print_string_as_lines(KERN_ERR, buf.buf);
+ prt_printf(&buf, bch2_fmt(c, "Journal stuck? Waited for 10 seconds, err %s"), bch2_err_str(ret));
printbuf_exit(&buf);
closure_wait_event(&j->async_wait,
@@ -1314,7 +1315,7 @@ int bch2_fs_journal_alloc(struct bch_fs *c)
int ret = bch2_dev_journal_alloc(ca, true);
if (ret) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
return ret;
}
}
@@ -1403,6 +1404,14 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
nr = cur_seq - last_seq;
+ /*
+ * Extra fudge factor, in case we crashed when the journal pin fifo was
+ * nearly or completely full. We'll need to be able to open additional
+ * journal entries (at least a few) in order for journal replay to get
+ * going:
+ */
+ nr += nr / 4;
+
if (nr + 1 > j->pin.size) {
free_fifo(&j->pin);
init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
@@ -1460,11 +1469,9 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
j->reservations.idx = journal_cur_seq(j);
c->last_bucket_seq_cleanup = journal_cur_seq(j);
-
- bch2_journal_space_available(j);
spin_unlock(&j->lock);
- return bch2_journal_reclaim_start(j);
+ return 0;
}
/* init/exit: */
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 4ed6137f0439..2a54ac79189b 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -214,12 +214,12 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
fsck_err_on(same_device,
c, journal_entry_dup_same_device,
- "duplicate journal entry on same device\n %s",
+ "duplicate journal entry on same device\n%s",
buf.buf);
fsck_err_on(not_identical,
c, journal_entry_replicas_data_mismatch,
- "found duplicate but non identical journal entries\n %s",
+ "found duplicate but non identical journal entries\n%s",
buf.buf);
if (entry_ptr.csum_good && !identical)
@@ -308,8 +308,8 @@ static void journal_entry_err_msg(struct printbuf *out,
break; \
case WRITE: \
bch2_sb_error_count(c, BCH_FSCK_ERR_##_err); \
- bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\
- if (bch2_fs_inconsistent(c)) { \
+ if (bch2_fs_inconsistent(c, \
+ "corrupt metadata before write: %s\n", _buf.buf)) {\
ret = -BCH_ERR_fsck_errors_not_fixed; \
goto fsck_err; \
} \
@@ -764,6 +764,23 @@ static void journal_entry_overwrite_to_text(struct printbuf *out, struct bch_fs
journal_entry_btree_keys_to_text(out, c, entry);
}
+static int journal_entry_log_bkey_validate(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ unsigned version, int big_endian,
+ struct bkey_validate_context from)
+{
+ from.flags = 0;
+ return journal_entry_btree_keys_validate(c, jset, entry,
+ version, big_endian, from);
+}
+
+static void journal_entry_log_bkey_to_text(struct printbuf *out, struct bch_fs *c,
+ struct jset_entry *entry)
+{
+ journal_entry_btree_keys_to_text(out, c, entry);
+}
+
static int journal_entry_write_buffer_keys_validate(struct bch_fs *c,
struct jset *jset,
struct jset_entry *entry,
@@ -1201,7 +1218,7 @@ static CLOSURE_CALLBACK(bch2_journal_read_device)
out:
bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
kvfree(buf.data);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
closure_return(cl);
return;
err:
@@ -1236,7 +1253,7 @@ int bch2_journal_read(struct bch_fs *c,
if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
ca->mi.state == BCH_MEMBER_STATE_ro) &&
- percpu_ref_tryget(&ca->io_ref))
+ percpu_ref_tryget(&ca->io_ref[READ]))
closure_call(&ca->journal.read,
bch2_journal_read_device,
system_unbound_wq,
@@ -1371,8 +1388,8 @@ int bch2_journal_read(struct bch_fs *c,
missing_end = seq - 1;
fsck_err(c, journal_entries_missing,
"journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
- " prev at %s\n"
- " next at %s, continue?",
+ "prev at %s\n"
+ "next at %s, continue?",
missing_start, missing_end,
*last_seq, *blacklist_seq - 1,
buf1.buf, buf2.buf);
@@ -1426,7 +1443,7 @@ int bch2_journal_read(struct bch_fs *c,
!bch2_replicas_marked(c, &replicas.e) &&
(le64_to_cpu(i->j.seq) == *last_seq ||
fsck_err(c, journal_entry_replicas_not_marked,
- "superblock not marked as containing replicas for journal entry %llu\n %s",
+ "superblock not marked as containing replicas for journal entry %llu\n%s",
le64_to_cpu(i->j.seq), buf.buf))) {
ret = bch2_mark_replicas(c, &replicas.e);
if (ret)
@@ -1443,7 +1460,7 @@ fsck_err:
static void journal_advance_devs_to_next_bucket(struct journal *j,
struct dev_alloc_list *devs,
- unsigned sectors, u64 seq)
+ unsigned sectors, __le64 seq)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
@@ -1623,7 +1640,8 @@ static CLOSURE_CALLBACK(journal_write_done)
: j->noflush_write_time, j->write_start_time);
if (!w->devs_written.nr) {
- bch_err(c, "unable to write journal to sufficient devices");
+ if (!bch2_journal_error(j))
+ bch_err(c, "unable to write journal to sufficient devices");
err = -BCH_ERR_journal_write_err;
} else {
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
@@ -1750,7 +1768,7 @@ static void journal_write_endio(struct bio *bio)
}
closure_put(&w->io);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[WRITE]);
}
static CLOSURE_CALLBACK(journal_write_submit)
@@ -1825,7 +1843,7 @@ static CLOSURE_CALLBACK(journal_write_preflush)
if (w->separate_flush) {
for_each_rw_member(c, ca) {
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[WRITE]);
struct journal_device *ja = &ca->journal;
struct bio *bio = &ja->bio[w->idx]->bio;
@@ -2081,12 +2099,12 @@ CLOSURE_CALLBACK(bch2_journal_write)
struct printbuf buf = PRINTBUF;
buf.atomic++;
+ __bch2_journal_debug_to_text(&buf, j);
+ spin_unlock(&j->lock);
prt_printf(&buf, bch2_fmt(c, "Unable to allocate journal write at seq %llu for %zu sectors: %s"),
le64_to_cpu(w->data->seq),
vstruct_sectors(w->data, c->block_bits),
bch2_err_str(ret));
- __bch2_journal_debug_to_text(&buf, j);
- spin_unlock(&j->lock);
bch2_print_string_as_lines(KERN_ERR, buf.buf);
printbuf_exit(&buf);
}
diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c
index a299d9ec8ee4..2f63fc6d456f 100644
--- a/fs/bcachefs/lru.c
+++ b/fs/bcachefs/lru.c
@@ -101,8 +101,7 @@ int bch2_lru_check_set(struct btree_trans *trans,
goto err;
if (fsck_err(trans, alloc_key_to_missing_lru_entry,
- "missing %s lru entry\n"
- " %s",
+ "missing %s lru entry\n%s",
bch2_lru_types[lru_type(lru_k)],
(bch2_bkey_val_to_text(&buf, c, referring_k), buf.buf))) {
ret = bch2_lru_set(trans, lru_id, dev_bucket, time);
@@ -190,8 +189,8 @@ static int bch2_check_lru_key(struct btree_trans *trans,
if (fsck_err(trans, lru_entry_bad,
"incorrect lru entry: lru %s time %llu\n"
- " %s\n"
- " for %s",
+ "%s\n"
+ "for %s",
bch2_lru_types[type],
lru_pos_time(lru_k.k->p),
(bch2_bkey_val_to_text(&buf1, c, lru_k), buf1.buf),
diff --git a/fs/bcachefs/migrate.c b/fs/bcachefs/migrate.c
index 57ad662871ba..90dcf80bd64a 100644
--- a/fs/bcachefs/migrate.c
+++ b/fs/bcachefs/migrate.c
@@ -130,7 +130,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c,
retry:
ret = 0;
while (bch2_trans_begin(trans),
- (b = bch2_btree_iter_peek_node(&iter)) &&
+ (b = bch2_btree_iter_peek_node(trans, &iter)) &&
!(ret = PTR_ERR_OR_ZERO(b))) {
bch2_progress_update_iter(trans, progress, &iter, "dropping metadata");
@@ -154,7 +154,7 @@ retry:
if (ret)
break;
next:
- bch2_btree_iter_next_node(&iter);
+ bch2_btree_iter_next_node(trans, &iter);
}
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c
index 8fcdc6984f6e..fc396b9fa754 100644
--- a/fs/bcachefs/move.c
+++ b/fs/bcachefs/move.c
@@ -528,6 +528,37 @@ int bch2_move_ratelimit(struct moving_context *ctxt)
return 0;
}
+/*
+ * Move requires non extents iterators, and there's also no need for it to
+ * signal indirect_extent_missing_error:
+ */
+static struct bkey_s_c bch2_lookup_indirect_extent_for_move(struct btree_trans *trans,
+ struct btree_iter *iter,
+ struct bkey_s_c_reflink_p p)
+{
+ if (unlikely(REFLINK_P_ERROR(p.v)))
+ return bkey_s_c_null;
+
+ struct bpos reflink_pos = POS(0, REFLINK_P_IDX(p.v));
+
+ bch2_trans_iter_init(trans, iter,
+ BTREE_ID_reflink, reflink_pos,
+ BTREE_ITER_not_extents);
+
+ struct bkey_s_c k = bch2_btree_iter_peek(trans, iter);
+ if (!k.k || bkey_err(k)) {
+ bch2_trans_iter_exit(trans, iter);
+ return k;
+ }
+
+ if (bkey_lt(reflink_pos, bkey_start_pos(k.k))) {
+ bch2_trans_iter_exit(trans, iter);
+ return bkey_s_c_null;
+ }
+
+ return k;
+}
+
static int bch2_move_data_btree(struct moving_context *ctxt,
struct bpos start,
struct bpos end,
@@ -572,7 +603,7 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(&iter);
+ k = bch2_btree_iter_peek(trans, &iter);
if (!k.k)
break;
@@ -592,17 +623,16 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
k.k->type == KEY_TYPE_reflink_p &&
REFLINK_P_MAY_UPDATE_OPTIONS(bkey_s_c_to_reflink_p(k).v)) {
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
- s64 offset_into_extent = 0;
bch2_trans_iter_exit(trans, &reflink_iter);
- k = bch2_lookup_indirect_extent(trans, &reflink_iter, &offset_into_extent, p, true, 0);
+ k = bch2_lookup_indirect_extent_for_move(trans, &reflink_iter, p);
ret = bkey_err(k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
break;
- if (bkey_deleted(k.k))
+ if (!k.k)
goto next_nondata;
/*
@@ -611,7 +641,6 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
* pointer - need to fixup iter->k
*/
extent_iter = &reflink_iter;
- offset_into_extent = 0;
}
if (!bkey_extent_is_direct_data(k.k))
@@ -652,7 +681,7 @@ next:
if (ctxt->stats)
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
next_nondata:
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
}
bch2_trans_iter_exit(trans, &reflink_iter);
@@ -765,7 +794,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
bch2_trans_begin(trans);
- k = bch2_btree_iter_peek(&bp_iter);
+ k = bch2_btree_iter_peek(trans, &bp_iter);
ret = bkey_err(k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
@@ -847,7 +876,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
if (ctxt->stats)
atomic64_add(sectors, &ctxt->stats->sectors_seen);
next:
- bch2_btree_iter_advance(&bp_iter);
+ bch2_btree_iter_advance(trans, &bp_iter);
}
err:
bch2_trans_iter_exit(trans, &bp_iter);
@@ -962,7 +991,7 @@ static int bch2_move_btree(struct bch_fs *c,
retry:
ret = 0;
while (bch2_trans_begin(trans),
- (b = bch2_btree_iter_peek_node(&iter)) &&
+ (b = bch2_btree_iter_peek_node(trans, &iter)) &&
!(ret = PTR_ERR_OR_ZERO(b))) {
if (kthread && kthread_should_stop())
break;
@@ -982,7 +1011,7 @@ retry:
if (ret)
break;
next:
- bch2_btree_iter_next_node(&iter);
+ bch2_btree_iter_next_node(trans, &iter);
}
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
goto retry;
diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c
index 5126c870ce5b..159410c50861 100644
--- a/fs/bcachefs/movinggc.c
+++ b/fs/bcachefs/movinggc.c
@@ -280,7 +280,11 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
s64 wait = S64_MAX, fragmented_allowed, fragmented;
for_each_rw_member(c, ca) {
- struct bch_dev_usage usage = bch2_dev_usage_read(ca);
+ struct bch_dev_usage_full usage_full = bch2_dev_usage_full_read(ca);
+ struct bch_dev_usage usage;
+
+ for (unsigned i = 0; i < BCH_DATA_NR; i++)
+ usage.buckets[i] = usage_full.d[i].buckets;
fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
ca->mi.bucket_size) >> 1);
@@ -288,7 +292,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
for (unsigned i = 0; i < BCH_DATA_NR; i++)
if (data_type_movable(i))
- fragmented += usage.d[i].fragmented;
+ fragmented += usage_full.d[i].fragmented;
wait = min(wait, max(0LL, fragmented_allowed - fragmented));
}
diff --git a/fs/bcachefs/namei.c b/fs/bcachefs/namei.c
index 93246ad31541..0d65ea96f7a2 100644
--- a/fs/bcachefs/namei.c
+++ b/fs/bcachefs/namei.c
@@ -28,8 +28,8 @@ int bch2_create_trans(struct btree_trans *trans,
unsigned flags)
{
struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = { NULL };
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter dir_iter = {};
+ struct btree_iter inode_iter = {};
subvol_inum new_inum = dir;
u64 now = bch2_current_time(c);
u64 cpu = raw_smp_processor_id();
@@ -127,8 +127,8 @@ int bch2_create_trans(struct btree_trans *trans,
if (ret)
goto err;
- bch2_btree_iter_set_snapshot(&dir_iter, dir_snapshot);
- ret = bch2_btree_iter_traverse(&dir_iter);
+ bch2_btree_iter_set_snapshot(trans, &dir_iter, dir_snapshot);
+ ret = bch2_btree_iter_traverse(trans, &dir_iter);
if (ret)
goto err;
}
@@ -177,9 +177,9 @@ int bch2_create_trans(struct btree_trans *trans,
new_inode->bi_depth = dir_u->bi_depth + 1;
inode_iter.flags &= ~BTREE_ITER_all_snapshots;
- bch2_btree_iter_set_snapshot(&inode_iter, snapshot);
+ bch2_btree_iter_set_snapshot(trans, &inode_iter, snapshot);
- ret = bch2_btree_iter_traverse(&inode_iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &inode_iter) ?:
bch2_inode_write(trans, &inode_iter, new_inode);
err:
bch2_trans_iter_exit(trans, &inode_iter);
@@ -193,8 +193,8 @@ int bch2_link_trans(struct btree_trans *trans,
const struct qstr *name)
{
struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = { NULL };
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter dir_iter = {};
+ struct btree_iter inode_iter = {};
struct bch_hash_info dir_hash;
u64 now = bch2_current_time(c);
u64 dir_offset = 0;
@@ -253,9 +253,9 @@ int bch2_unlink_trans(struct btree_trans *trans,
bool deleting_subvol)
{
struct bch_fs *c = trans->c;
- struct btree_iter dir_iter = { NULL };
- struct btree_iter dirent_iter = { NULL };
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter dir_iter = {};
+ struct btree_iter dirent_iter = {};
+ struct btree_iter inode_iter = {};
struct bch_hash_info dir_hash;
subvol_inum inum;
u64 now = bch2_current_time(c);
@@ -301,7 +301,7 @@ int bch2_unlink_trans(struct btree_trans *trans,
if (ret)
goto err;
- k = bch2_btree_iter_peek_slot(&dirent_iter);
+ k = bch2_btree_iter_peek_slot(trans, &dirent_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -310,8 +310,8 @@ int bch2_unlink_trans(struct btree_trans *trans,
* If we're deleting a subvolume, we need to really delete the
* dirent, not just emit a whiteout in the current snapshot:
*/
- bch2_btree_iter_set_snapshot(&dirent_iter, k.k->p.snapshot);
- ret = bch2_btree_iter_traverse(&dirent_iter);
+ bch2_btree_iter_set_snapshot(trans, &dirent_iter, k.k->p.snapshot);
+ ret = bch2_btree_iter_traverse(trans, &dirent_iter);
if (ret)
goto err;
} else {
@@ -390,10 +390,10 @@ int bch2_rename_trans(struct btree_trans *trans,
enum bch_rename_mode mode)
{
struct bch_fs *c = trans->c;
- struct btree_iter src_dir_iter = { NULL };
- struct btree_iter dst_dir_iter = { NULL };
- struct btree_iter src_inode_iter = { NULL };
- struct btree_iter dst_inode_iter = { NULL };
+ struct btree_iter src_dir_iter = {};
+ struct btree_iter dst_dir_iter = {};
+ struct btree_iter src_inode_iter = {};
+ struct btree_iter dst_inode_iter = {};
struct bch_hash_info src_hash, dst_hash;
subvol_inum src_inum, dst_inum;
u64 src_offset, dst_offset;
@@ -666,7 +666,7 @@ static int bch2_check_dirent_inode_dirent(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct printbuf buf = PRINTBUF;
- struct btree_iter bp_iter = { NULL };
+ struct btree_iter bp_iter = {};
int ret = 0;
if (inode_points_to_dirent(target, d))
@@ -700,9 +700,9 @@ static int bch2_check_dirent_inode_dirent(struct btree_trans *trans,
if (bch2_inode_should_have_single_bp(target) &&
!fsck_err(trans, inode_wrong_backpointer,
- "dirent points to inode that does not point back:\n %s",
+ "dirent points to inode that does not point back:\n%s",
(bch2_bkey_val_to_text(&buf, c, d.s_c),
- prt_printf(&buf, "\n "),
+ prt_newline(&buf),
bch2_inode_unpacked_to_text(&buf, target),
buf.buf)))
goto err;
diff --git a/fs/bcachefs/opts.c b/fs/bcachefs/opts.c
index 81fd6b7977d3..af3258814822 100644
--- a/fs/bcachefs/opts.c
+++ b/fs/bcachefs/opts.c
@@ -44,7 +44,7 @@ const char * const __bch2_btree_ids[] = {
NULL
};
-static const char * const __bch2_csum_types[] = {
+const char * const __bch2_csum_types[] = {
BCH_CSUM_TYPES()
NULL
};
@@ -482,14 +482,12 @@ void bch2_opts_to_text(struct printbuf *out,
int bch2_opt_check_may_set(struct bch_fs *c, struct bch_dev *ca, int id, u64 v)
{
- lockdep_assert_held(&c->state_lock);
-
int ret = 0;
switch (id) {
case Opt_state:
if (ca)
- return __bch2_dev_set_state(c, ca, v, BCH_FORCE_IF_DEGRADED);
+ return bch2_dev_set_state(c, ca, v, BCH_FORCE_IF_DEGRADED);
break;
case Opt_compression:
@@ -551,14 +549,15 @@ int bch2_parse_one_mount_opt(struct bch_fs *c, struct bch_opts *opts,
goto bad_opt;
ret = bch2_opt_parse(c, &bch2_opt_table[id], val, &v, &err);
- if (ret == -BCH_ERR_option_needs_open_fs && parse_later) {
- prt_printf(parse_later, "%s=%s,", name, val);
- if (parse_later->allocation_failure) {
- ret = -ENOMEM;
- goto out;
+ if (ret == -BCH_ERR_option_needs_open_fs) {
+ ret = 0;
+
+ if (parse_later) {
+ prt_printf(parse_later, "%s=%s,", name, val);
+ if (parse_later->allocation_failure)
+ ret = -ENOMEM;
}
- ret = 0;
goto out;
}
@@ -569,28 +568,24 @@ int bch2_parse_one_mount_opt(struct bch_fs *c, struct bch_opts *opts,
bch2_opt_set_by_id(opts, id, v);
ret = 0;
- goto out;
-
+out:
+ printbuf_exit(&err);
+ return ret;
bad_opt:
- pr_err("Bad mount option %s", name);
ret = -BCH_ERR_option_name;
goto out;
-
bad_val:
- pr_err("Invalid mount option %s", err.buf);
ret = -BCH_ERR_option_value;
-
-out:
- printbuf_exit(&err);
- return ret;
+ goto out;
}
int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts,
- struct printbuf *parse_later, char *options)
+ struct printbuf *parse_later, char *options,
+ bool ignore_unknown)
{
char *copied_opts, *copied_opts_start;
char *opt, *name, *val;
- int ret;
+ int ret = 0;
if (!options)
return 0;
@@ -615,14 +610,14 @@ int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts,
val = opt;
ret = bch2_parse_one_mount_opt(c, opts, parse_later, name, val);
- if (ret < 0)
- goto out;
+ if (ret == -BCH_ERR_option_name && ignore_unknown)
+ ret = 0;
+ if (ret) {
+ pr_err("Error parsing option %s: %s", name, bch2_err_str(ret));
+ break;
+ }
}
- ret = 0;
- goto out;
-
-out:
kfree(copied_opts_start);
return ret;
}
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index bb621804d45a..4d06313076ff 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -16,6 +16,7 @@ extern const char * const bch2_version_upgrade_opts[];
extern const char * const bch2_sb_features[];
extern const char * const bch2_sb_compat[];
extern const char * const __bch2_btree_ids[];
+extern const char * const __bch2_csum_types[];
extern const char * const __bch2_csum_opts[];
extern const char * const __bch2_compression_types[];
extern const char * const bch2_compression_opts[];
@@ -635,7 +636,7 @@ int bch2_opts_check_may_set(struct bch_fs *);
int bch2_parse_one_mount_opt(struct bch_fs *, struct bch_opts *,
struct printbuf *, const char *, const char *);
int bch2_parse_mount_opts(struct bch_fs *, struct bch_opts *, struct printbuf *,
- char *);
+ char *, bool);
/* inode opts: */
diff --git a/fs/bcachefs/printbuf.c b/fs/bcachefs/printbuf.c
index 4cf5a2af1e6f..3302bbc78a09 100644
--- a/fs/bcachefs/printbuf.c
+++ b/fs/bcachefs/printbuf.c
@@ -277,6 +277,25 @@ void bch2_printbuf_indent_add(struct printbuf *buf, unsigned spaces)
}
/**
+ * bch2_printbuf_indent_add_nextline() - add to the current indent level for
+ * subsequent lines
+ *
+ * @buf: printbuf to control
+ * @spaces: number of spaces to add to the current indent level
+ *
+ * Subsequent lines - not the current line - will be indented by @spaces more
+ * spaces.
+ */
+void bch2_printbuf_indent_add_nextline(struct printbuf *buf, unsigned spaces)
+{
+ if (WARN_ON_ONCE(buf->indent + spaces < buf->indent))
+ spaces = 0;
+
+ buf->indent += spaces;
+ buf->has_indent_or_tabstops = true;
+}
+
+/**
* bch2_printbuf_indent_sub() - subtract from the current indent level
*
* @buf: printbuf to control
diff --git a/fs/bcachefs/printbuf.h b/fs/bcachefs/printbuf.h
index d0dd398baa2b..1ca476adbf6f 100644
--- a/fs/bcachefs/printbuf.h
+++ b/fs/bcachefs/printbuf.h
@@ -112,6 +112,7 @@ void bch2_printbuf_tabstop_pop(struct printbuf *);
int bch2_printbuf_tabstop_push(struct printbuf *, unsigned);
void bch2_printbuf_indent_add(struct printbuf *, unsigned);
+void bch2_printbuf_indent_add_nextline(struct printbuf *, unsigned);
void bch2_printbuf_indent_sub(struct printbuf *, unsigned);
void bch2_prt_newline(struct printbuf *);
diff --git a/fs/bcachefs/progress.c b/fs/bcachefs/progress.c
index bafd1c91a802..d09898566abe 100644
--- a/fs/bcachefs/progress.c
+++ b/fs/bcachefs/progress.c
@@ -16,10 +16,8 @@ void bch2_progress_init(struct progress_indicator_state *s,
if (!(btree_id_mask & BIT_ULL(i)))
continue;
- struct disk_accounting_pos acc = {
- .type = BCH_DISK_ACCOUNTING_btree,
- .btree.id = i,
- };
+ struct disk_accounting_pos acc;
+ disk_accounting_key_init(acc, btree, .id = i);
u64 v;
bch2_accounting_mem_read(c, disk_accounting_pos_to_bpos(&acc), &v, 1);
diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c
index 8b857fc33244..3d4755d73af7 100644
--- a/fs/bcachefs/quota.c
+++ b/fs/bcachefs/quota.c
@@ -516,7 +516,7 @@ static int bch2_fs_quota_read_inode(struct btree_trans *trans,
bch2_quota_acct(c, bch_qid(&u), Q_INO, 1,
KEY_TYPE_QUOTA_NOCHECK);
advance:
- bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
+ bch2_btree_iter_set_pos(trans, iter, bpos_nosnap_successor(iter->pos));
return 0;
}
diff --git a/fs/bcachefs/rebalance.c b/fs/bcachefs/rebalance.c
index 29a569384146..c63fa53f30d2 100644
--- a/fs/bcachefs/rebalance.c
+++ b/fs/bcachefs/rebalance.c
@@ -233,7 +233,7 @@ int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum)
bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
BTREE_ITER_intent);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -281,7 +281,7 @@ static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum,
bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
BTREE_ITER_intent);
- k = bch2_btree_iter_peek_slot(&iter);
+ k = bch2_btree_iter_peek_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -301,7 +301,7 @@ static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans,
struct btree_iter *work_iter)
{
return !kthread_should_stop()
- ? bch2_btree_iter_peek(work_iter)
+ ? bch2_btree_iter_peek(trans, work_iter)
: bkey_s_c_null;
}
@@ -335,7 +335,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
work_pos,
BTREE_ITER_all_snapshots);
- struct bkey_s_c k = bch2_btree_iter_peek_slot(extent_iter);
+ struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, extent_iter);
if (bkey_err(k))
return k;
@@ -511,7 +511,7 @@ static int do_rebalance(struct moving_context *ctxt)
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
struct bch_fs_rebalance *r = &c->rebalance;
- struct btree_iter rebalance_work_iter, extent_iter = { NULL };
+ struct btree_iter rebalance_work_iter, extent_iter = {};
struct bkey_s_c k;
int ret = 0;
@@ -552,7 +552,7 @@ static int do_rebalance(struct moving_context *ctxt)
if (ret)
break;
- bch2_btree_iter_advance(&rebalance_work_iter);
+ bch2_btree_iter_advance(trans, &rebalance_work_iter);
}
bch2_trans_iter_exit(trans, &extent_iter);
@@ -600,12 +600,13 @@ void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
struct bch_fs_rebalance *r = &c->rebalance;
/* print pending work */
- struct disk_accounting_pos acc = { .type = BCH_DISK_ACCOUNTING_rebalance_work, };
+ struct disk_accounting_pos acc;
+ disk_accounting_key_init(acc, rebalance_work);
u64 v;
bch2_accounting_mem_read(c, disk_accounting_pos_to_bpos(&acc), &v, 1);
prt_printf(out, "pending work:\t");
- prt_human_readable_u64(out, v);
+ prt_human_readable_u64(out, v << 9);
prt_printf(out, "\n\n");
prt_str(out, bch2_rebalance_state_strs[r->state]);
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index 266c5770c824..d2b07f602da9 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -198,7 +198,7 @@ static int bch2_journal_replay_accounting_key(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, k->level,
BTREE_ITER_intent);
- int ret = bch2_btree_iter_traverse(&iter);
+ int ret = bch2_btree_iter_traverse(trans, &iter);
if (ret)
goto out;
@@ -261,7 +261,7 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, k->level,
iter_flags);
- ret = bch2_btree_iter_traverse(&iter);
+ ret = bch2_btree_iter_traverse(trans, &iter);
if (ret)
goto out;
@@ -270,7 +270,7 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
bch2_trans_iter_exit(trans, &iter);
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
BTREE_MAX_DEPTH, 0, iter_flags);
- ret = bch2_btree_iter_traverse(&iter) ?:
+ ret = bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_increase_depth(trans, iter.path, 0) ?:
-BCH_ERR_transaction_restart_nested;
goto out;
@@ -389,9 +389,9 @@ int bch2_journal_replay(struct bch_fs *c)
* Now, replay any remaining keys in the order in which they appear in
* the journal, unpinning those journal entries as we go:
*/
- sort(keys_sorted.data, keys_sorted.nr,
- sizeof(keys_sorted.data[0]),
- journal_sort_seq_cmp, NULL);
+ sort_nonatomic(keys_sorted.data, keys_sorted.nr,
+ sizeof(keys_sorted.data[0]),
+ journal_sort_seq_cmp, NULL);
darray_for_each(keys_sorted, kp) {
cond_resched();
diff --git a/fs/bcachefs/recovery_passes.c b/fs/bcachefs/recovery_passes.c
index 0b3c951c32da..593ff142530d 100644
--- a/fs/bcachefs/recovery_passes.c
+++ b/fs/bcachefs/recovery_passes.c
@@ -234,28 +234,22 @@ static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
int bch2_run_online_recovery_passes(struct bch_fs *c)
{
- int ret = 0;
-
- down_read(&c->state_lock);
-
for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
struct recovery_pass_fn *p = recovery_pass_fns + i;
if (!(p->when & PASS_ONLINE))
continue;
- ret = bch2_run_recovery_pass(c, i);
+ int ret = bch2_run_recovery_pass(c, i);
if (bch2_err_matches(ret, BCH_ERR_restart_recovery)) {
i = c->curr_recovery_pass;
continue;
}
if (ret)
- break;
+ return ret;
}
- up_read(&c->state_lock);
-
- return ret;
+ return 0;
}
int bch2_run_recovery_passes(struct bch_fs *c)
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index 68172c6eba21..710178e3da4c 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -193,10 +193,10 @@ static int bch2_indirect_extent_missing_error(struct btree_trans *trans,
if (ret)
goto err;
- prt_printf(&buf, "-%llu\n ", (missing_pos.offset + (missing_end - missing_start)) << 9);
+ prt_printf(&buf, "-%llu\n", (missing_pos.offset + (missing_end - missing_start)) << 9);
bch2_bkey_val_to_text(&buf, c, p.s_c);
- prt_printf(&buf, "\n missing reflink btree range %llu-%llu",
+ prt_printf(&buf, "\nmissing reflink btree range %llu-%llu",
missing_start, missing_end);
if (fsck_err(trans, reflink_p_to_missing_reflink_v, "%s", buf.buf)) {
@@ -323,10 +323,10 @@ static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
__le64 *refcount = bkey_refcount(bkey_i_to_s(new));
if (!*refcount && (flags & BTREE_TRIGGER_overwrite)) {
bch2_bkey_val_to_text(&buf, c, p.s_c);
- prt_printf(&buf, "\n ");
+ prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k);
log_fsck_err(trans, reflink_refcount_underflow,
- "indirect extent refcount underflow while marking\n %s",
+ "indirect extent refcount underflow while marking\n%s",
buf.buf);
goto next;
}
@@ -495,7 +495,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
bool reflink_p_may_update_opts_field)
{
struct bch_fs *c = trans->c;
- struct btree_iter reflink_iter = { NULL };
+ struct btree_iter reflink_iter = {};
struct bkey_s_c k;
struct bkey_i *r_v;
struct bkey_i_reflink_p *r_p;
@@ -507,7 +507,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX,
BTREE_ITER_intent);
- k = bch2_btree_iter_peek_prev(&reflink_iter);
+ k = bch2_btree_iter_peek_prev(trans, &reflink_iter);
ret = bkey_err(k);
if (ret)
goto err;
@@ -569,12 +569,13 @@ err:
return ret;
}
-static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
+static struct bkey_s_c get_next_src(struct btree_trans *trans,
+ struct btree_iter *iter, struct bpos end)
{
struct bkey_s_c k;
int ret;
- for_each_btree_key_max_continue_norestart(*iter, end, 0, k, ret) {
+ for_each_btree_key_max_continue_norestart(trans, *iter, end, 0, k, ret) {
if (bkey_extent_is_unwritten(k))
continue;
@@ -583,7 +584,7 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
}
if (bkey_ge(iter->pos, end))
- bch2_btree_iter_set_pos(iter, end);
+ bch2_btree_iter_set_pos(trans, iter, end);
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
}
@@ -647,27 +648,27 @@ s64 bch2_remap_range(struct bch_fs *c,
if (ret)
continue;
- bch2_btree_iter_set_snapshot(&src_iter, src_snapshot);
+ bch2_btree_iter_set_snapshot(trans, &src_iter, src_snapshot);
ret = bch2_subvolume_get_snapshot(trans, dst_inum.subvol,
&dst_snapshot);
if (ret)
continue;
- bch2_btree_iter_set_snapshot(&dst_iter, dst_snapshot);
+ bch2_btree_iter_set_snapshot(trans, &dst_iter, dst_snapshot);
if (dst_inum.inum < src_inum.inum) {
/* Avoid some lock cycle transaction restarts */
- ret = bch2_btree_iter_traverse(&dst_iter);
+ ret = bch2_btree_iter_traverse(trans, &dst_iter);
if (ret)
continue;
}
dst_done = dst_iter.pos.offset - dst_start.offset;
src_want = POS(src_start.inode, src_start.offset + dst_done);
- bch2_btree_iter_set_pos(&src_iter, src_want);
+ bch2_btree_iter_set_pos(trans, &src_iter, src_want);
- src_k = get_next_src(&src_iter, src_end);
+ src_k = get_next_src(trans, &src_iter, src_end);
ret = bkey_err(src_k);
if (ret)
continue;
@@ -738,7 +739,7 @@ s64 bch2_remap_range(struct bch_fs *c,
do {
struct bch_inode_unpacked inode_u;
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter inode_iter = {};
bch2_trans_begin(trans);
@@ -795,8 +796,8 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
if (fsck_err_on(r->refcount != le64_to_cpu(*refcount),
trans, reflink_v_refcount_wrong,
"reflink key has wrong refcount:\n"
- " %s\n"
- " should be %u",
+ "%s\n"
+ "should be %u",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf),
r->refcount)) {
struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
index 67455beb8358..5d43e3504386 100644
--- a/fs/bcachefs/sb-errors_format.h
+++ b/fs/bcachefs/sb-errors_format.h
@@ -5,8 +5,7 @@
enum bch_fsck_flags {
FSCK_CAN_FIX = 1 << 0,
FSCK_CAN_IGNORE = 1 << 1,
- FSCK_NO_RATELIMIT = 1 << 2,
- FSCK_AUTOFIX = 1 << 3,
+ FSCK_AUTOFIX = 1 << 2,
};
#define BCH_SB_ERRS() \
@@ -311,13 +310,14 @@ enum bch_fsck_flags {
x(accounting_key_replicas_nr_required_bad, 279, FSCK_AUTOFIX) \
x(accounting_key_replicas_devs_unsorted, 280, FSCK_AUTOFIX) \
x(accounting_key_version_0, 282, FSCK_AUTOFIX) \
+ x(accounting_key_nr_counters_wrong, 307, FSCK_AUTOFIX) \
x(logged_op_but_clean, 283, FSCK_AUTOFIX) \
x(compression_opt_not_marked_in_sb, 295, FSCK_AUTOFIX) \
x(compression_type_not_marked_in_sb, 296, FSCK_AUTOFIX) \
x(directory_size_mismatch, 303, FSCK_AUTOFIX) \
x(dirent_cf_name_too_big, 304, 0) \
x(dirent_stray_data_after_cf_name, 305, 0) \
- x(MAX, 307, 0)
+ x(MAX, 308, 0)
enum bch_sb_error_id {
#define x(t, n, ...) BCH_FSCK_ERR_##t = n,
diff --git a/fs/bcachefs/sb-members.h b/fs/bcachefs/sb-members.h
index 38261638a611..06bb41a3f360 100644
--- a/fs/bcachefs/sb-members.h
+++ b/fs/bcachefs/sb-members.h
@@ -20,7 +20,7 @@ struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i);
static inline bool bch2_dev_is_online(struct bch_dev *ca)
{
- return !percpu_ref_is_zero(&ca->io_ref);
+ return !percpu_ref_is_zero(&ca->io_ref[READ]);
}
static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *, unsigned);
@@ -156,33 +156,34 @@ static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev
static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
struct bch_dev *ca,
- unsigned state_mask)
+ unsigned state_mask,
+ int rw)
{
rcu_read_lock();
if (ca)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[rw]);
while ((ca = __bch2_next_dev(c, ca, NULL)) &&
(!((1 << ca->mi.state) & state_mask) ||
- !percpu_ref_tryget(&ca->io_ref)))
+ !percpu_ref_tryget(&ca->io_ref[rw])))
;
rcu_read_unlock();
return ca;
}
-#define __for_each_online_member(_c, _ca, state_mask) \
+#define __for_each_online_member(_c, _ca, state_mask, rw) \
for (struct bch_dev *_ca = NULL; \
- (_ca = bch2_get_next_online_dev(_c, _ca, state_mask));)
+ (_ca = bch2_get_next_online_dev(_c, _ca, state_mask, rw));)
#define for_each_online_member(c, ca) \
- __for_each_online_member(c, ca, ~0)
+ __for_each_online_member(c, ca, ~0, READ)
#define for_each_rw_member(c, ca) \
- __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw))
+ __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), WRITE)
#define for_each_readable_member(c, ca) \
- __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro))
+ __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro), READ)
static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev)
{
@@ -287,7 +288,7 @@ static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev,
rcu_read_lock();
struct bch_dev *ca = bch2_dev_rcu(c, dev);
- if (ca && !percpu_ref_tryget(&ca->io_ref))
+ if (ca && !percpu_ref_tryget(&ca->io_ref[rw]))
ca = NULL;
rcu_read_unlock();
@@ -297,7 +298,7 @@ static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev,
return ca;
if (ca)
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[rw]);
return NULL;
}
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index e7f197896db1..b7de29aed839 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -485,7 +485,7 @@ static int check_snapshot_tree(struct btree_trans *trans,
root_id != bch2_snapshot_root(c, root_id) ||
st.k->p.offset != le32_to_cpu(s.tree),
trans, snapshot_tree_to_missing_snapshot,
- "snapshot tree points to missing/incorrect snapshot:\n %s",
+ "snapshot tree points to missing/incorrect snapshot:\n%s",
(bch2_bkey_val_to_text(&buf, c, st.s_c),
prt_newline(&buf),
ret
@@ -505,19 +505,19 @@ static int check_snapshot_tree(struct btree_trans *trans,
if (fsck_err_on(ret,
trans, snapshot_tree_to_missing_subvol,
- "snapshot tree points to missing subvolume:\n %s",
+ "snapshot tree points to missing subvolume:\n%s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
fsck_err_on(!bch2_snapshot_is_ancestor(c,
le32_to_cpu(subvol.snapshot),
root_id),
trans, snapshot_tree_to_wrong_subvol,
- "snapshot tree points to subvolume that does not point to snapshot in this tree:\n %s",
+ "snapshot tree points to subvolume that does not point to snapshot in this tree:\n%s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol),
trans, snapshot_tree_to_snapshot_subvol,
- "snapshot tree points to snapshot subvolume:\n %s",
+ "snapshot tree points to snapshot subvolume:\n%s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
struct bkey_i_snapshot_tree *u;
@@ -756,7 +756,7 @@ static int check_snapshot(struct btree_trans *trans,
} else {
if (fsck_err_on(s.subvol,
trans, snapshot_should_not_have_subvol,
- "snapshot should not point to subvol:\n %s",
+ "snapshot should not point to subvol:\n%s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
ret = PTR_ERR_OR_ZERO(u);
@@ -774,7 +774,7 @@ static int check_snapshot(struct btree_trans *trans,
if (fsck_err_on(!ret,
trans, snapshot_to_bad_snapshot_tree,
- "snapshot points to missing/incorrect tree:\n %s",
+ "snapshot points to missing/incorrect tree:\n%s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
if (ret)
@@ -786,7 +786,7 @@ static int check_snapshot(struct btree_trans *trans,
if (fsck_err_on(le32_to_cpu(s.depth) != real_depth,
trans, snapshot_bad_depth,
- "snapshot with incorrect depth field, should be %u:\n %s",
+ "snapshot with incorrect depth field, should be %u:\n%s",
real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
ret = PTR_ERR_OR_ZERO(u);
@@ -803,7 +803,7 @@ static int check_snapshot(struct btree_trans *trans,
if (fsck_err_on(!ret,
trans, snapshot_bad_skiplist,
- "snapshot with bad skiplist field:\n %s",
+ "snapshot with bad skiplist field:\n%s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
ret = PTR_ERR_OR_ZERO(u);
@@ -843,9 +843,6 @@ static int check_snapshot_exists(struct btree_trans *trans, u32 id)
{
struct bch_fs *c = trans->c;
- if (bch2_snapshot_exists(c, id))
- return 0;
-
/* Do we need to reconstruct the snapshot_tree entry as well? */
struct btree_iter iter;
struct bkey_s_c k;
@@ -1074,9 +1071,9 @@ static inline void normalize_snapshot_child_pointers(struct bch_snapshot *s)
static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
- struct btree_iter c_iter = (struct btree_iter) { NULL };
- struct btree_iter tree_iter = (struct btree_iter) { NULL };
+ struct btree_iter iter, p_iter = {};
+ struct btree_iter c_iter = {};
+ struct btree_iter tree_iter = {};
struct bkey_s_c_snapshot s;
u32 parent_id, child_id;
unsigned i;
@@ -1193,13 +1190,13 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
POS_MIN, BTREE_ITER_intent);
- k = bch2_btree_iter_peek(&iter);
+ k = bch2_btree_iter_peek(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
for (i = 0; i < nr_snapids; i++) {
- k = bch2_btree_iter_prev_slot(&iter);
+ k = bch2_btree_iter_prev_slot(trans, &iter);
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/str_hash.c b/fs/bcachefs/str_hash.c
index 93e71119e5a4..a90bf7b8a2b4 100644
--- a/fs/bcachefs/str_hash.c
+++ b/fs/bcachefs/str_hash.c
@@ -195,7 +195,7 @@ int __bch2_str_hash_check_key(struct btree_trans *trans,
struct btree_iter *k_iter, struct bkey_s_c hash_k)
{
struct bch_fs *c = trans->c;
- struct btree_iter iter = { NULL };
+ struct btree_iter iter = {};
struct printbuf buf = PRINTBUF;
struct bkey_s_c k;
int ret = 0;
@@ -232,7 +232,7 @@ bad_hash:
goto out;
if (fsck_err(trans, hash_table_key_wrong_offset,
- "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n %s",
+ "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
bch2_btree_id_str(desc->btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
diff --git a/fs/bcachefs/str_hash.h b/fs/bcachefs/str_hash.h
index 575ad1e03904..09a354a26c3b 100644
--- a/fs/bcachefs/str_hash.h
+++ b/fs/bcachefs/str_hash.h
@@ -231,11 +231,11 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
- bch2_trans_copy_iter(&iter, start);
+ bch2_trans_copy_iter(trans, &iter, start);
- bch2_btree_iter_advance(&iter);
+ bch2_btree_iter_advance(trans, &iter);
- for_each_btree_key_continue_norestart(iter, BTREE_ITER_slots, k, ret) {
+ for_each_btree_key_continue_norestart(trans, iter, BTREE_ITER_slots, k, ret) {
if (k.k->type != desc.key_type &&
k.k->type != KEY_TYPE_hash_whiteout)
break;
@@ -280,7 +280,7 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans,
}
if (!slot.path && !(flags & STR_HASH_must_replace))
- bch2_trans_copy_iter(&slot, iter);
+ bch2_trans_copy_iter(trans, &slot, iter);
if (k.k->type != KEY_TYPE_hash_whiteout)
goto not_found;
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index b7b96283c316..5537283d0bea 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -275,7 +275,7 @@ int bch2_subvol_has_children(struct btree_trans *trans, u32 subvol)
struct btree_iter iter;
bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolume_children, POS(subvol, 0), 0);
- struct bkey_s_c k = bch2_btree_iter_peek(&iter);
+ struct bkey_s_c k = bch2_btree_iter_peek(trans, &iter);
bch2_trans_iter_exit(trans, &iter);
return bkey_err(k) ?: k.k && k.k->p.inode == subvol
@@ -561,6 +561,7 @@ int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
}
SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
+ n->v.fs_path_parent = 0;
bch2_trans_iter_exit(trans, &iter);
return ret;
}
@@ -573,7 +574,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
bool ro)
{
struct bch_fs *c = trans->c;
- struct btree_iter dst_iter, src_iter = (struct btree_iter) { NULL };
+ struct btree_iter dst_iter, src_iter = {};
struct bkey_i_subvolume *new_subvol = NULL;
struct bkey_i_subvolume *src_subvol = NULL;
u32 parent = 0, new_nodes[2], snapshot_subvols[2];
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
index 910f6196700e..f640c1e3d639 100644
--- a/fs/bcachefs/subvolume.h
+++ b/fs/bcachefs/subvolume.h
@@ -33,16 +33,16 @@ int bch2_subvol_is_ro_trans(struct btree_trans *, u32);
int bch2_subvol_is_ro(struct bch_fs *, u32);
static inline struct bkey_s_c
-bch2_btree_iter_peek_in_subvolume_max_type(struct btree_iter *iter, struct bpos end,
- u32 subvolid, unsigned flags)
+bch2_btree_iter_peek_in_subvolume_max_type(struct btree_trans *trans, struct btree_iter *iter,
+ struct bpos end, u32 subvolid, unsigned flags)
{
u32 snapshot;
- int ret = bch2_subvolume_get_snapshot(iter->trans, subvolid, &snapshot);
+ int ret = bch2_subvolume_get_snapshot(trans, subvolid, &snapshot);
if (ret)
return bkey_s_c_err(ret);
- bch2_btree_iter_set_snapshot(iter, snapshot);
- return bch2_btree_iter_peek_max_type(iter, end, flags);
+ bch2_btree_iter_set_snapshot(trans, iter, snapshot);
+ return bch2_btree_iter_peek_max_type(trans, iter, end, flags);
}
#define for_each_btree_key_in_subvolume_max_continue(_trans, _iter, \
@@ -53,14 +53,14 @@ bch2_btree_iter_peek_in_subvolume_max_type(struct btree_iter *iter, struct bpos
\
do { \
_ret3 = lockrestart_do(_trans, ({ \
- (_k) = bch2_btree_iter_peek_in_subvolume_max_type(&(_iter), \
+ (_k) = bch2_btree_iter_peek_in_subvolume_max_type(trans, &(_iter),\
_end, _subvolid, (_flags)); \
if (!(_k).k) \
break; \
\
bkey_err(_k) ?: (_do); \
})); \
- } while (!_ret3 && bch2_btree_iter_advance(&(_iter))); \
+ } while (!_ret3 && bch2_btree_iter_advance(_trans, &(_iter))); \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret3; \
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 572b06bfa0b8..e27422b6d9c6 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -248,7 +248,7 @@ struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *sb,
struct bch_sb_handle *dev_sb = &ca->disk_sb;
if (bch2_sb_realloc(dev_sb, le32_to_cpu(dev_sb->sb->u64s) + d)) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
return NULL;
}
}
@@ -945,7 +945,7 @@ static void write_super_endio(struct bio *bio)
}
closure_put(&ca->fs->sb_write);
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
}
static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
@@ -963,7 +963,7 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb], bio_sectors(bio));
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[READ]);
closure_bio_submit(bio, &c->sb_write);
}
@@ -989,7 +989,7 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_sb],
bio_sectors(bio));
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[READ]);
closure_bio_submit(bio, &c->sb_write);
}
@@ -1014,13 +1014,20 @@ int bch2_write_super(struct bch_fs *c)
closure_init_stack(cl);
memset(&sb_written, 0, sizeof(sb_written));
+ /*
+ * Note: we do writes to RO devices here, and we might want to change
+ * that in the future.
+ *
+ * For now, we expect to be able to call write_super() when we're not
+ * yet RW:
+ */
for_each_online_member(c, ca) {
ret = darray_push(&online_devices, ca);
if (bch2_fs_fatal_err_on(ret, c, "%s: error allocating online devices", __func__)) {
- percpu_ref_put(&ca->io_ref);
+ percpu_ref_put(&ca->io_ref[READ]);
goto out;
}
- percpu_ref_get(&ca->io_ref);
+ percpu_ref_get(&ca->io_ref[READ]);
}
/* Make sure we're using the new magic numbers: */
@@ -1186,7 +1193,7 @@ out:
/* Make new options visible after they're persistent: */
bch2_sb_update(c);
darray_for_each(online_devices, ca)
- percpu_ref_put(&(*ca)->io_ref);
+ percpu_ref_put(&(*ca)->io_ref[READ]);
darray_exit(&online_devices);
printbuf_exit(&err);
return ret;
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 99f9a0aaa380..b79e80a435e0 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -70,14 +70,10 @@
#include <linux/percpu.h>
#include <linux/random.h>
#include <linux/sysfs.h>
-#include <crypto/hash.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
MODULE_DESCRIPTION("bcachefs filesystem");
-MODULE_SOFTDEP("pre: chacha20");
-MODULE_SOFTDEP("pre: poly1305");
-MODULE_SOFTDEP("pre: xxhash");
const char * const bch2_fs_flag_strs[] = {
#define x(n) #n,
@@ -185,6 +181,7 @@ static void bch2_dev_unlink(struct bch_dev *);
static void bch2_dev_free(struct bch_dev *);
static int bch2_dev_alloc(struct bch_fs *, unsigned);
static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
+static void bch2_dev_io_ref_stop(struct bch_dev *, int);
static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
struct bch_fs *bch2_dev_to_fs(dev_t dev)
@@ -294,8 +291,10 @@ static void __bch2_fs_read_only(struct bch_fs *c)
/*
* After stopping journal:
*/
- for_each_member_device(c, ca)
+ for_each_member_device(c, ca) {
+ bch2_dev_io_ref_stop(ca, WRITE);
bch2_dev_allocator_remove(c, ca);
+ }
}
#ifndef BCH_WRITE_REF_DEBUG
@@ -465,10 +464,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
if (ret)
goto err;
- ret = bch2_fs_mark_dirty(c);
- if (ret)
- goto err;
-
clear_bit(BCH_FS_clean_shutdown, &c->flags);
/*
@@ -480,10 +475,24 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
set_bit(JOURNAL_need_flush_write, &c->journal.flags);
set_bit(JOURNAL_running, &c->journal.flags);
- for_each_rw_member(c, ca)
+ __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), READ) {
bch2_dev_allocator_add(c, ca);
+ percpu_ref_reinit(&ca->io_ref[WRITE]);
+ }
bch2_recalc_capacity(c);
+ ret = bch2_fs_mark_dirty(c);
+ if (ret)
+ goto err;
+
+ spin_lock(&c->journal.lock);
+ bch2_journal_space_available(&c->journal);
+ spin_unlock(&c->journal.lock);
+
+ ret = bch2_journal_reclaim_start(&c->journal);
+ if (ret)
+ goto err;
+
set_bit(BCH_FS_rw, &c->flags);
set_bit(BCH_FS_was_rw, &c->flags);
@@ -495,11 +504,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
atomic_long_inc(&c->writes[i]);
}
#endif
-
- ret = bch2_journal_reclaim_start(&c->journal);
- if (ret)
- goto err;
-
if (!early) {
ret = bch2_fs_read_write_late(c);
if (ret)
@@ -533,9 +537,11 @@ int bch2_fs_read_write(struct bch_fs *c)
int bch2_fs_read_write_early(struct bch_fs *c)
{
- lockdep_assert_held(&c->state_lock);
+ down_write(&c->state_lock);
+ int ret = __bch2_fs_read_write(c, true);
+ up_write(&c->state_lock);
- return __bch2_fs_read_write(c, true);
+ return ret;
}
/* Filesystem startup/shutdown: */
@@ -673,6 +679,7 @@ void bch2_fs_free(struct bch_fs *c)
if (ca) {
EBUG_ON(atomic_long_read(&ca->ref) != 1);
+ bch2_dev_io_ref_stop(ca, READ);
bch2_free_super(&ca->disk_sb);
bch2_dev_free(ca);
}
@@ -991,12 +998,6 @@ static void print_mount_opts(struct bch_fs *c)
prt_str(&p, "starting version ");
bch2_version_to_text(&p, c->sb.version);
- if (c->opts.read_only) {
- prt_str(&p, " opts=");
- first = false;
- prt_printf(&p, "ro");
- }
-
for (i = 0; i < bch2_opts_nr; i++) {
const struct bch_option *opt = &bch2_opt_table[i];
u64 v = bch2_opt_get_by_id(&c->opts, i);
@@ -1019,38 +1020,39 @@ static void print_mount_opts(struct bch_fs *c)
int bch2_fs_start(struct bch_fs *c)
{
time64_t now = ktime_get_real_seconds();
- int ret;
+ int ret = 0;
print_mount_opts(c);
down_write(&c->state_lock);
+ mutex_lock(&c->sb_lock);
BUG_ON(test_bit(BCH_FS_started, &c->flags));
- mutex_lock(&c->sb_lock);
+ if (!bch2_sb_field_get_minsize(&c->disk_sb, ext,
+ sizeof(struct bch_sb_field_ext) / sizeof(u64))) {
+ mutex_unlock(&c->sb_lock);
+ up_write(&c->state_lock);
+ ret = -BCH_ERR_ENOSPC_sb;
+ goto err;
+ }
ret = bch2_sb_members_v2_init(c);
if (ret) {
mutex_unlock(&c->sb_lock);
+ up_write(&c->state_lock);
goto err;
}
for_each_online_member(c, ca)
bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = cpu_to_le64(now);
- struct bch_sb_field_ext *ext =
- bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64));
mutex_unlock(&c->sb_lock);
- if (!ext) {
- bch_err(c, "insufficient space in superblock for sb_field_ext");
- ret = -BCH_ERR_ENOSPC_sb;
- goto err;
- }
-
for_each_rw_member(c, ca)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
+ up_write(&c->state_lock);
c->recovery_task = current;
ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
@@ -1066,31 +1068,28 @@ int bch2_fs_start(struct bch_fs *c)
goto err;
if (bch2_fs_init_fault("fs_start")) {
- bch_err(c, "fs_start fault injected");
- ret = -EINVAL;
+ ret = -BCH_ERR_injected_fs_start;
goto err;
}
set_bit(BCH_FS_started, &c->flags);
wake_up(&c->ro_ref_wait);
+ down_write(&c->state_lock);
if (c->opts.read_only) {
bch2_fs_read_only(c);
} else {
ret = !test_bit(BCH_FS_rw, &c->flags)
? bch2_fs_read_write(c)
: bch2_fs_read_write_late(c);
- if (ret)
- goto err;
}
+ up_write(&c->state_lock);
- ret = 0;
err:
if (ret)
bch_err_msg(c, ret, "starting filesystem");
else
bch_verbose(c, "done starting filesystem");
- up_write(&c->state_lock);
return ret;
}
@@ -1199,6 +1198,15 @@ static int bch2_dev_in_fs(struct bch_sb_handle *fs,
/* Device startup/shutdown: */
+static void bch2_dev_io_ref_stop(struct bch_dev *ca, int rw)
+{
+ if (!percpu_ref_is_zero(&ca->io_ref[rw])) {
+ reinit_completion(&ca->io_ref_completion[rw]);
+ percpu_ref_kill(&ca->io_ref[rw]);
+ wait_for_completion(&ca->io_ref_completion[rw]);
+ }
+}
+
static void bch2_dev_release(struct kobject *kobj)
{
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
@@ -1208,6 +1216,9 @@ static void bch2_dev_release(struct kobject *kobj)
static void bch2_dev_free(struct bch_dev *ca)
{
+ WARN_ON(!percpu_ref_is_zero(&ca->io_ref[WRITE]));
+ WARN_ON(!percpu_ref_is_zero(&ca->io_ref[READ]));
+
cancel_work_sync(&ca->io_error_work);
bch2_dev_unlink(ca);
@@ -1226,7 +1237,8 @@ static void bch2_dev_free(struct bch_dev *ca)
bch2_time_stats_quantiles_exit(&ca->io_latency[WRITE]);
bch2_time_stats_quantiles_exit(&ca->io_latency[READ]);
- percpu_ref_exit(&ca->io_ref);
+ percpu_ref_exit(&ca->io_ref[WRITE]);
+ percpu_ref_exit(&ca->io_ref[READ]);
#ifndef CONFIG_BCACHEFS_DEBUG
percpu_ref_exit(&ca->ref);
#endif
@@ -1238,14 +1250,12 @@ static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
lockdep_assert_held(&c->state_lock);
- if (percpu_ref_is_zero(&ca->io_ref))
+ if (percpu_ref_is_zero(&ca->io_ref[READ]))
return;
__bch2_dev_read_only(c, ca);
- reinit_completion(&ca->io_ref_completion);
- percpu_ref_kill(&ca->io_ref);
- wait_for_completion(&ca->io_ref_completion);
+ bch2_dev_io_ref_stop(ca, READ);
bch2_dev_unlink(ca);
@@ -1262,11 +1272,18 @@ static void bch2_dev_ref_complete(struct percpu_ref *ref)
}
#endif
-static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
+static void bch2_dev_io_ref_read_complete(struct percpu_ref *ref)
{
- struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
+ struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref[READ]);
- complete(&ca->io_ref_completion);
+ complete(&ca->io_ref_completion[READ]);
+}
+
+static void bch2_dev_io_ref_write_complete(struct percpu_ref *ref)
+{
+ struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref[WRITE]);
+
+ complete(&ca->io_ref_completion[WRITE]);
}
static void bch2_dev_unlink(struct bch_dev *ca)
@@ -1330,7 +1347,8 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
kobject_init(&ca->kobj, &bch2_dev_ktype);
init_completion(&ca->ref_completion);
- init_completion(&ca->io_ref_completion);
+ init_completion(&ca->io_ref_completion[READ]);
+ init_completion(&ca->io_ref_completion[WRITE]);
INIT_WORK(&ca->io_error_work, bch2_io_error_work);
@@ -1356,7 +1374,9 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
bch2_dev_allocator_background_init(ca);
- if (percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
+ if (percpu_ref_init(&ca->io_ref[READ], bch2_dev_io_ref_read_complete,
+ PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
+ percpu_ref_init(&ca->io_ref[WRITE], bch2_dev_io_ref_write_complete,
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
!(ca->sb_read_scratch = kmalloc(BCH_SB_READ_SCRATCH_BUF_SIZE, GFP_KERNEL)) ||
bch2_dev_buckets_alloc(c, ca) ||
@@ -1419,7 +1439,8 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
return -BCH_ERR_device_size_too_small;
}
- BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
+ BUG_ON(!percpu_ref_is_zero(&ca->io_ref[READ]));
+ BUG_ON(!percpu_ref_is_zero(&ca->io_ref[WRITE]));
ret = bch2_dev_journal_init(ca, sb->sb);
if (ret)
@@ -1438,7 +1459,7 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
ca->dev = ca->disk_sb.bdev->bd_dev;
- percpu_ref_reinit(&ca->io_ref);
+ percpu_ref_reinit(&ca->io_ref[READ]);
return 0;
}
@@ -1568,6 +1589,8 @@ static bool bch2_fs_may_start(struct bch_fs *c)
static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
{
+ bch2_dev_io_ref_stop(ca, WRITE);
+
/*
* The allocator thread itself allocates btree nodes, so stop it first:
*/
@@ -1584,6 +1607,10 @@ static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
+
+ if (percpu_ref_is_zero(&ca->io_ref[WRITE]))
+ percpu_ref_reinit(&ca->io_ref[WRITE]);
+
bch2_dev_do_discards(ca);
}
@@ -1731,7 +1758,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
return 0;
err:
if (ca->mi.state == BCH_MEMBER_STATE_rw &&
- !percpu_ref_is_zero(&ca->io_ref))
+ !percpu_ref_is_zero(&ca->io_ref[READ]))
__bch2_dev_read_write(c, ca);
up_write(&c->state_lock);
return ret;
@@ -2259,7 +2286,7 @@ BCH_DEBUG_PARAMS()
__maybe_unused
static unsigned bch2_metadata_version = bcachefs_metadata_version_current;
-module_param_named(version, bch2_metadata_version, uint, 0400);
+module_param_named(version, bch2_metadata_version, uint, 0444);
module_exit(bcachefs_exit);
module_init(bcachefs_init);
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 251ba8224c1f..e5f003c29369 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -257,10 +257,8 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
prt_printf(out, "type\tcompressed\runcompressed\raverage extent size\r\n");
for (unsigned i = 1; i < BCH_COMPRESSION_TYPE_NR; i++) {
- struct disk_accounting_pos a = {
- .type = BCH_DISK_ACCOUNTING_compression,
- .compression.type = i,
- };
+ struct disk_accounting_pos a;
+ disk_accounting_key_init(a, compression, .type = i);
struct bpos p = disk_accounting_pos_to_bpos(&a);
u64 v[3];
bch2_accounting_mem_read(c, p, v, ARRAY_SIZE(v));
@@ -631,8 +629,6 @@ static ssize_t sysfs_opt_store(struct bch_fs *c,
if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs)))
return -EROFS;
- down_write(&c->state_lock);
-
char *tmp = kstrdup(buf, GFP_KERNEL);
if (!tmp) {
ret = -ENOMEM;
@@ -675,7 +671,6 @@ static ssize_t sysfs_opt_store(struct bch_fs *c,
ret = size;
err:
- up_write(&c->state_lock);
bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
return ret;
}
diff --git a/fs/bcachefs/tests.c b/fs/bcachefs/tests.c
index 6c6469814637..c265b102267a 100644
--- a/fs/bcachefs/tests.c
+++ b/fs/bcachefs/tests.c
@@ -43,7 +43,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
BTREE_ITER_intent);
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, &k.k_i, 0));
bch_err_msg(c, ret, "update error");
if (ret)
@@ -51,7 +51,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
pr_info("deleting once");
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_delete_at(trans, &iter, 0));
bch_err_msg(c, ret, "delete error (first)");
if (ret)
@@ -59,7 +59,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
pr_info("deleting twice");
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_delete_at(trans, &iter, 0));
bch_err_msg(c, ret, "delete error (second)");
if (ret)
@@ -84,7 +84,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
BTREE_ITER_intent);
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_trans_update(trans, &iter, &k.k_i, 0));
bch_err_msg(c, ret, "update error");
if (ret)
@@ -94,7 +94,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
bch2_journal_flush_all_pins(&c->journal);
ret = commit_do(trans, NULL, NULL, 0,
- bch2_btree_iter_traverse(&iter) ?:
+ bch2_btree_iter_traverse(trans, &iter) ?:
bch2_btree_delete_at(trans, &iter, 0));
bch_err_msg(c, ret, "delete error");
if (ret)
@@ -349,10 +349,10 @@ static int test_peek_end(struct bch_fs *c, u64 nr)
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), 0);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k);
bch2_trans_iter_exit(trans, &iter);
@@ -369,10 +369,10 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr)
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), 0);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k);
bch2_trans_iter_exit(trans, &iter);
@@ -488,7 +488,7 @@ static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
trans = bch2_trans_get(c);
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs,
SPOS(0, 0, snapid_lo), 0);
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX))));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX))));
BUG_ON(k.k->p.snapshot != U32_MAX);
@@ -602,9 +602,9 @@ static int rand_lookup(struct bch_fs *c, u64 nr)
SPOS(0, 0, U32_MAX), 0);
for (i = 0; i < nr; i++) {
- bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
+ bch2_btree_iter_set_pos(trans, &iter, SPOS(0, test_rand(), U32_MAX));
- lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
+ lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(trans, &iter)));
ret = bkey_err(k);
if (ret)
break;
@@ -623,9 +623,9 @@ static int rand_mixed_trans(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
- bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
+ bch2_btree_iter_set_pos(trans, iter, SPOS(0, pos, U32_MAX));
- k = bch2_btree_iter_peek(iter);
+ k = bch2_btree_iter_peek(trans, iter);
ret = bkey_err(k);
bch_err_msg(trans->c, ret, "lookup error");
if (ret)
@@ -672,7 +672,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos)
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
BTREE_ITER_intent);
- k = bch2_btree_iter_peek_max(&iter, POS(0, U64_MAX));
+ k = bch2_btree_iter_peek_max(trans, &iter, POS(0, U64_MAX));
ret = bkey_err(k);
if (ret)
goto err;
diff --git a/fs/bcachefs/time_stats.c b/fs/bcachefs/time_stats.c
index 3fe82757f93a..2c34fe4be912 100644
--- a/fs/bcachefs/time_stats.c
+++ b/fs/bcachefs/time_stats.c
@@ -10,6 +10,9 @@
#include "eytzinger.h"
#include "time_stats.h"
+/* disable automatic switching to percpu mode */
+#define TIME_STATS_NONPCPU ((unsigned long) 1)
+
static const struct time_unit time_units[] = {
{ "ns", 1 },
{ "us", NSEC_PER_USEC },
@@ -123,11 +126,12 @@ void __bch2_time_stats_update(struct bch2_time_stats *stats, u64 start, u64 end)
{
unsigned long flags;
- if (!stats->buffer) {
+ if ((unsigned long) stats->buffer <= TIME_STATS_NONPCPU) {
spin_lock_irqsave(&stats->lock, flags);
time_stats_update_one(stats, start, end);
- if (mean_and_variance_weighted_get_mean(stats->freq_stats_weighted, TIME_STATS_MV_WEIGHT) < 32 &&
+ if (!stats->buffer &&
+ mean_and_variance_weighted_get_mean(stats->freq_stats_weighted, TIME_STATS_MV_WEIGHT) < 32 &&
stats->duration_stats.n > 1024)
stats->buffer =
alloc_percpu_gfp(struct time_stat_buffer,
@@ -157,7 +161,7 @@ void bch2_time_stats_reset(struct bch2_time_stats *stats)
unsigned offset = offsetof(struct bch2_time_stats, min_duration);
memset((void *) stats + offset, 0, sizeof(*stats) - offset);
- if (stats->buffer) {
+ if ((unsigned long) stats->buffer > TIME_STATS_NONPCPU) {
int cpu;
for_each_possible_cpu(cpu)
per_cpu_ptr(stats->buffer, cpu)->nr = 0;
@@ -167,7 +171,9 @@ void bch2_time_stats_reset(struct bch2_time_stats *stats)
void bch2_time_stats_exit(struct bch2_time_stats *stats)
{
- free_percpu(stats->buffer);
+ if ((unsigned long) stats->buffer > TIME_STATS_NONPCPU)
+ free_percpu(stats->buffer);
+ stats->buffer = NULL;
}
void bch2_time_stats_init(struct bch2_time_stats *stats)
@@ -177,3 +183,9 @@ void bch2_time_stats_init(struct bch2_time_stats *stats)
stats->min_freq = U64_MAX;
spin_lock_init(&stats->lock);
}
+
+void bch2_time_stats_init_no_pcpu(struct bch2_time_stats *stats)
+{
+ bch2_time_stats_init(stats);
+ stats->buffer = (struct time_stat_buffer __percpu *) TIME_STATS_NONPCPU;
+}
diff --git a/fs/bcachefs/time_stats.h b/fs/bcachefs/time_stats.h
index dc6493f7bbab..eddb0985bab4 100644
--- a/fs/bcachefs/time_stats.h
+++ b/fs/bcachefs/time_stats.h
@@ -145,6 +145,7 @@ static inline bool track_event_change(struct bch2_time_stats *stats, bool v)
void bch2_time_stats_reset(struct bch2_time_stats *);
void bch2_time_stats_exit(struct bch2_time_stats *);
void bch2_time_stats_init(struct bch2_time_stats *);
+void bch2_time_stats_init_no_pcpu(struct bch2_time_stats *);
static inline void bch2_time_stats_quantiles_exit(struct bch2_time_stats_quantiles *statq)
{
diff --git a/fs/bcachefs/util.c b/fs/bcachefs/util.c
index 553de8d8e3e5..87af551692f4 100644
--- a/fs/bcachefs/util.c
+++ b/fs/bcachefs/util.c
@@ -270,7 +270,7 @@ static void __bch2_print_string_as_lines(const char *prefix, const char *lines,
locked = console_trylock();
}
- while (1) {
+ while (*lines) {
p = strchrnul(lines, '\n');
printk("%s%.*s\n", prefix, (int) (p - lines), lines);
if (!*p)
diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h
index 7d921fc920a0..6ba5071ab6dd 100644
--- a/fs/bcachefs/util.h
+++ b/fs/bcachefs/util.h
@@ -94,6 +94,7 @@ do { \
#define printbuf_tabstop_push(_buf, _n) bch2_printbuf_tabstop_push(_buf, _n)
#define printbuf_indent_add(_out, _n) bch2_printbuf_indent_add(_out, _n)
+#define printbuf_indent_add_nextline(_out, _n) bch2_printbuf_indent_add_nextline(_out, _n)
#define printbuf_indent_sub(_out, _n) bch2_printbuf_indent_sub(_out, _n)
#define prt_newline(_out) bch2_prt_newline(_out)
@@ -621,7 +622,7 @@ do { \
#define per_cpu_sum(_p) \
({ \
- typeof(*_p) _ret = 0; \
+ TYPEOF_UNQUAL(*_p) _ret = 0; \
\
int cpu; \
for_each_possible_cpu(cpu) \
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index f9667b944c0d..651da52b2cbc 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -168,7 +168,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
int type, int flags)
{
struct bch_fs *c = trans->c;
- struct btree_iter inode_iter = { NULL };
+ struct btree_iter inode_iter = {};
int ret;
ret = bch2_subvol_is_ro_trans(trans, inum.subvol) ?:
diff --git a/fs/bpf_fs_kfuncs.c b/fs/bpf_fs_kfuncs.c
index 3fe9f59ef867..08412532db1b 100644
--- a/fs/bpf_fs_kfuncs.c
+++ b/fs/bpf_fs_kfuncs.c
@@ -2,10 +2,12 @@
/* Copyright (c) 2024 Google LLC. */
#include <linux/bpf.h>
+#include <linux/bpf_lsm.h>
#include <linux/btf.h>
#include <linux/btf_ids.h>
#include <linux/dcache.h>
#include <linux/fs.h>
+#include <linux/fsnotify.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/xattr.h>
@@ -93,6 +95,24 @@ __bpf_kfunc int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz)
return len;
}
+static bool match_security_bpf_prefix(const char *name__str)
+{
+ return !strncmp(name__str, XATTR_NAME_BPF_LSM, XATTR_NAME_BPF_LSM_LEN);
+}
+
+static int bpf_xattr_read_permission(const char *name, struct inode *inode)
+{
+ if (WARN_ON(!inode))
+ return -EINVAL;
+
+ /* Allow reading xattr with user. and security.bpf. prefix */
+ if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
+ !match_security_bpf_prefix(name))
+ return -EPERM;
+
+ return inode_permission(&nop_mnt_idmap, inode, MAY_READ);
+}
+
/**
* bpf_get_dentry_xattr - get xattr of a dentry
* @dentry: dentry to get xattr from
@@ -101,9 +121,10 @@ __bpf_kfunc int bpf_path_d_path(struct path *path, char *buf, size_t buf__sz)
*
* Get xattr *name__str* of *dentry* and store the output in *value_ptr*.
*
- * For security reasons, only *name__str* with prefix "user." is allowed.
+ * For security reasons, only *name__str* with prefixes "user." or
+ * "security.bpf." are allowed.
*
- * Return: 0 on success, a negative value on error.
+ * Return: length of the xattr value on success, a negative value on error.
*/
__bpf_kfunc int bpf_get_dentry_xattr(struct dentry *dentry, const char *name__str,
struct bpf_dynptr *value_p)
@@ -114,18 +135,12 @@ __bpf_kfunc int bpf_get_dentry_xattr(struct dentry *dentry, const char *name__st
void *value;
int ret;
- if (WARN_ON(!inode))
- return -EINVAL;
-
- if (strncmp(name__str, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
- return -EPERM;
-
value_len = __bpf_dynptr_size(value_ptr);
value = __bpf_dynptr_data_rw(value_ptr, value_len);
if (!value)
return -EINVAL;
- ret = inode_permission(&nop_mnt_idmap, inode, MAY_READ);
+ ret = bpf_xattr_read_permission(name__str, inode);
if (ret)
return ret;
return __vfs_getxattr(dentry, inode, name__str, value, value_len);
@@ -139,9 +154,10 @@ __bpf_kfunc int bpf_get_dentry_xattr(struct dentry *dentry, const char *name__st
*
* Get xattr *name__str* of *file* and store the output in *value_ptr*.
*
- * For security reasons, only *name__str* with prefix "user." is allowed.
+ * For security reasons, only *name__str* with prefixes "user." or
+ * "security.bpf." are allowed.
*
- * Return: 0 on success, a negative value on error.
+ * Return: length of the xattr value on success, a negative value on error.
*/
__bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
struct bpf_dynptr *value_p)
@@ -154,6 +170,160 @@ __bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str,
__bpf_kfunc_end_defs();
+static int bpf_xattr_write_permission(const char *name, struct inode *inode)
+{
+ if (WARN_ON(!inode))
+ return -EINVAL;
+
+ /* Only allow setting and removing security.bpf. xattrs */
+ if (!match_security_bpf_prefix(name))
+ return -EPERM;
+
+ return inode_permission(&nop_mnt_idmap, inode, MAY_WRITE);
+}
+
+/**
+ * bpf_set_dentry_xattr_locked - set a xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: xattr value
+ * @flags: flags to pass into filesystem operations
+ *
+ * Set xattr *name__str* of *dentry* to the value in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "security.bpf."
+ * is allowed.
+ *
+ * The caller already locked dentry->d_inode.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+int bpf_set_dentry_xattr_locked(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags)
+{
+
+ struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p;
+ struct inode *inode = d_inode(dentry);
+ const void *value;
+ u32 value_len;
+ int ret;
+
+ value_len = __bpf_dynptr_size(value_ptr);
+ value = __bpf_dynptr_data(value_ptr, value_len);
+ if (!value)
+ return -EINVAL;
+
+ ret = bpf_xattr_write_permission(name__str, inode);
+ if (ret)
+ return ret;
+
+ ret = __vfs_setxattr(&nop_mnt_idmap, dentry, inode, name__str,
+ value, value_len, flags);
+ if (!ret) {
+ fsnotify_xattr(dentry);
+
+ /* This xattr is set by BPF LSM, so we do not call
+ * security_inode_post_setxattr. Otherwise, we would
+ * risk deadlocks by calling back to the same kfunc.
+ *
+ * This is the same as security_inode_setsecurity().
+ */
+ }
+ return ret;
+}
+
+/**
+ * bpf_remove_dentry_xattr_locked - remove a xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ *
+ * Rmove xattr *name__str* of *dentry*.
+ *
+ * For security reasons, only *name__str* with prefix "security.bpf."
+ * is allowed.
+ *
+ * The caller already locked dentry->d_inode.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+int bpf_remove_dentry_xattr_locked(struct dentry *dentry, const char *name__str)
+{
+ struct inode *inode = d_inode(dentry);
+ int ret;
+
+ ret = bpf_xattr_write_permission(name__str, inode);
+ if (ret)
+ return ret;
+
+ ret = __vfs_removexattr(&nop_mnt_idmap, dentry, name__str);
+ if (!ret) {
+ fsnotify_xattr(dentry);
+
+ /* This xattr is removed by BPF LSM, so we do not call
+ * security_inode_post_removexattr. Otherwise, we would
+ * risk deadlocks by calling back to the same kfunc.
+ */
+ }
+ return ret;
+}
+
+__bpf_kfunc_start_defs();
+
+/**
+ * bpf_set_dentry_xattr - set a xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ * @value_p: xattr value
+ * @flags: flags to pass into filesystem operations
+ *
+ * Set xattr *name__str* of *dentry* to the value in *value_ptr*.
+ *
+ * For security reasons, only *name__str* with prefix "security.bpf."
+ * is allowed.
+ *
+ * The caller has not locked dentry->d_inode.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_set_dentry_xattr(struct dentry *dentry, const char *name__str,
+ const struct bpf_dynptr *value_p, int flags)
+{
+ struct inode *inode = d_inode(dentry);
+ int ret;
+
+ inode_lock(inode);
+ ret = bpf_set_dentry_xattr_locked(dentry, name__str, value_p, flags);
+ inode_unlock(inode);
+ return ret;
+}
+
+/**
+ * bpf_remove_dentry_xattr - remove a xattr of a dentry
+ * @dentry: dentry to get xattr from
+ * @name__str: name of the xattr
+ *
+ * Rmove xattr *name__str* of *dentry*.
+ *
+ * For security reasons, only *name__str* with prefix "security.bpf."
+ * is allowed.
+ *
+ * The caller has not locked dentry->d_inode.
+ *
+ * Return: 0 on success, a negative value on error.
+ */
+__bpf_kfunc int bpf_remove_dentry_xattr(struct dentry *dentry, const char *name__str)
+{
+ struct inode *inode = d_inode(dentry);
+ int ret;
+
+ inode_lock(inode);
+ ret = bpf_remove_dentry_xattr_locked(dentry, name__str);
+ inode_unlock(inode);
+ return ret;
+}
+
+__bpf_kfunc_end_defs();
+
BTF_KFUNCS_START(bpf_fs_kfunc_set_ids)
BTF_ID_FLAGS(func, bpf_get_task_exe_file,
KF_ACQUIRE | KF_TRUSTED_ARGS | KF_RET_NULL)
@@ -161,6 +331,8 @@ BTF_ID_FLAGS(func, bpf_put_file, KF_RELEASE)
BTF_ID_FLAGS(func, bpf_path_d_path, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_get_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, bpf_get_file_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_set_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_remove_dentry_xattr, KF_SLEEPABLE | KF_TRUSTED_ARGS)
BTF_KFUNCS_END(bpf_fs_kfunc_set_ids)
static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id)
@@ -171,6 +343,37 @@ static int bpf_fs_kfuncs_filter(const struct bpf_prog *prog, u32 kfunc_id)
return -EACCES;
}
+/* bpf_[set|remove]_dentry_xattr.* hooks have KF_TRUSTED_ARGS and
+ * KF_SLEEPABLE, so they are only available to sleepable hooks with
+ * dentry arguments.
+ *
+ * Setting and removing xattr requires exclusive lock on dentry->d_inode.
+ * Some hooks already locked d_inode, while some hooks have not locked
+ * d_inode. Therefore, we need different kfuncs for different hooks.
+ * Specifically, hooks in the following list (d_inode_locked_hooks)
+ * should call bpf_[set|remove]_dentry_xattr_locked; while other hooks
+ * should call bpf_[set|remove]_dentry_xattr.
+ */
+BTF_SET_START(d_inode_locked_hooks)
+BTF_ID(func, bpf_lsm_inode_post_removexattr)
+BTF_ID(func, bpf_lsm_inode_post_setattr)
+BTF_ID(func, bpf_lsm_inode_post_setxattr)
+BTF_ID(func, bpf_lsm_inode_removexattr)
+BTF_ID(func, bpf_lsm_inode_rmdir)
+BTF_ID(func, bpf_lsm_inode_setattr)
+BTF_ID(func, bpf_lsm_inode_setxattr)
+BTF_ID(func, bpf_lsm_inode_unlink)
+#ifdef CONFIG_SECURITY_PATH
+BTF_ID(func, bpf_lsm_path_unlink)
+BTF_ID(func, bpf_lsm_path_rmdir)
+#endif /* CONFIG_SECURITY_PATH */
+BTF_SET_END(d_inode_locked_hooks)
+
+bool bpf_lsm_has_d_inode_locked(const struct bpf_prog *prog)
+{
+ return btf_id_set_contains(&d_inode_locked_hooks, prog->aux->attach_btf_id);
+}
+
static const struct btf_kfunc_id_set bpf_fs_kfunc_set = {
.owner = THIS_MODULE,
.set = &bpf_fs_kfunc_set_ids,
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index fa8515598341..73a2dfb854c5 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -3,9 +3,9 @@
config BTRFS_FS
tristate "Btrfs filesystem support"
select BLK_CGROUP_PUNT_BIO
+ select CRC32
select CRYPTO
select CRYPTO_CRC32C
- select LIBCRC32C
select CRYPTO_XXHASH
select CRYPTO_SHA256
select CRYPTO_BLAKE2B
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1a916716cefe..3dd555db3d32 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1564,7 +1564,7 @@ static int transaction_kthread(void *arg)
do {
cannot_commit = false;
- delay = msecs_to_jiffies(fs_info->commit_interval * 1000);
+ delay = secs_to_jiffies(fs_info->commit_interval);
mutex_lock(&fs_info->transaction_kthread_mutex);
spin_lock(&fs_info->trans_lock);
@@ -1579,9 +1579,9 @@ static int transaction_kthread(void *arg)
cur->state < TRANS_STATE_COMMIT_PREP &&
delta < fs_info->commit_interval) {
spin_unlock(&fs_info->trans_lock);
- delay -= msecs_to_jiffies((delta - 1) * 1000);
+ delay -= secs_to_jiffies(delta - 1);
delay = min(delay,
- msecs_to_jiffies(fs_info->commit_interval * 1000));
+ secs_to_jiffies(fs_info->commit_interval));
goto sleep;
}
transid = cur->transid;
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index cd5f38d6fbaa..3541efa765c7 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -225,7 +225,7 @@ void zstd_cleanup_workspace_manager(void)
}
spin_unlock_bh(&wsm.lock);
- del_timer_sync(&wsm.timer);
+ timer_delete_sync(&wsm.timer);
}
/*
diff --git a/fs/buffer.c b/fs/buffer.c
index 194eacbefc95..c7abb4a029dc 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2166,7 +2166,7 @@ int __block_write_begin(struct folio *folio, loff_t pos, unsigned len,
}
EXPORT_SYMBOL(__block_write_begin);
-static void __block_commit_write(struct folio *folio, size_t from, size_t to)
+void block_commit_write(struct folio *folio, size_t from, size_t to)
{
size_t block_start, block_end;
bool partial = false;
@@ -2204,6 +2204,7 @@ static void __block_commit_write(struct folio *folio, size_t from, size_t to)
if (!partial)
folio_mark_uptodate(folio);
}
+EXPORT_SYMBOL(block_commit_write);
/*
* block_write_begin takes care of the basic task of block allocation and
@@ -2262,7 +2263,7 @@ int block_write_end(struct file *file, struct address_space *mapping,
flush_dcache_folio(folio);
/* This could be a short (even 0-length) commit */
- __block_commit_write(folio, start, start + copied);
+ block_commit_write(folio, start, start + copied);
return copied;
}
@@ -2566,13 +2567,6 @@ int cont_write_begin(struct file *file, struct address_space *mapping,
}
EXPORT_SYMBOL(cont_write_begin);
-void block_commit_write(struct page *page, unsigned from, unsigned to)
-{
- struct folio *folio = page_folio(page);
- __block_commit_write(folio, from, to);
-}
-EXPORT_SYMBOL(block_commit_write);
-
/*
* block_page_mkwrite() is not allowed to change the file size as it gets
* called from a page fault handler when a page is first dirtied. Hence we must
@@ -2618,7 +2612,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
if (unlikely(ret))
goto out_unlock;
- __block_commit_write(folio, 0, end);
+ block_commit_write(folio, 0, end);
folio_mark_dirty(folio);
folio_wait_stable(folio);
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 83a60126de0f..14d0cc894000 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -128,10 +128,11 @@ retry:
ret = security_path_mkdir(&path, subdir, 0700);
if (ret < 0)
goto mkdir_error;
- subdir = ERR_PTR(cachefiles_inject_write_error());
- if (!IS_ERR(subdir))
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
subdir = vfs_mkdir(&nop_mnt_idmap, d_inode(dir), subdir, 0700);
- ret = PTR_ERR(subdir);
+ else
+ subdir = ERR_PTR(ret);
if (IS_ERR(subdir)) {
trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
cachefiles_trace_mkdir_error);
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 7249d70e1a43..3e7def3d31c1 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -3,7 +3,7 @@ config CEPH_FS
tristate "Ceph distributed file system"
depends on INET
select CEPH_LIB
- select LIBCRC32C
+ select CRC32
select CRYPTO_AES
select CRYPTO
select NETFS_SUPPORT
diff --git a/fs/dax.c b/fs/dax.c
index 7fd4cd9a51f2..af5045b0f476 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -71,6 +71,11 @@ static unsigned long dax_to_pfn(void *entry)
return xa_to_value(entry) >> DAX_SHIFT;
}
+static struct folio *dax_to_folio(void *entry)
+{
+ return page_folio(pfn_to_page(dax_to_pfn(entry)));
+}
+
static void *dax_make_entry(pfn_t pfn, unsigned long flags)
{
return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
@@ -206,7 +211,7 @@ static void dax_wake_entry(struct xa_state *xas, void *entry,
*
* Must be called with the i_pages lock held.
*/
-static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
+static void *get_next_unlocked_entry(struct xa_state *xas, unsigned int order)
{
void *entry;
struct wait_exceptional_entry_queue ewait;
@@ -236,6 +241,37 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
}
/*
+ * Wait for the given entry to become unlocked. Caller must hold the i_pages
+ * lock and call either put_unlocked_entry() if it did not lock the entry or
+ * dax_unlock_entry() if it did. Returns an unlocked entry if still present.
+ */
+static void *wait_entry_unlocked_exclusive(struct xa_state *xas, void *entry)
+{
+ struct wait_exceptional_entry_queue ewait;
+ wait_queue_head_t *wq;
+
+ init_wait(&ewait.wait);
+ ewait.wait.func = wake_exceptional_entry_func;
+
+ while (unlikely(dax_is_locked(entry))) {
+ wq = dax_entry_waitqueue(xas, entry, &ewait.key);
+ prepare_to_wait_exclusive(wq, &ewait.wait,
+ TASK_UNINTERRUPTIBLE);
+ xas_pause(xas);
+ xas_unlock_irq(xas);
+ schedule();
+ finish_wait(wq, &ewait.wait);
+ xas_lock_irq(xas);
+ entry = xas_load(xas);
+ }
+
+ if (xa_is_internal(entry))
+ return NULL;
+
+ return entry;
+}
+
+/*
* The only thing keeping the address space around is the i_pages lock
* (it's cycled in clear_inode() after removing the entries from i_pages)
* After we call xas_unlock_irq(), we cannot touch xas->xa.
@@ -250,7 +286,7 @@ static void wait_entry_unlocked(struct xa_state *xas, void *entry)
wq = dax_entry_waitqueue(xas, entry, &ewait.key);
/*
- * Unlike get_unlocked_entry() there is no guarantee that this
+ * Unlike get_next_unlocked_entry() there is no guarantee that this
* path ever successfully retrieves an unlocked entry before an
* inode dies. Perform a non-exclusive wait in case this path
* never successfully performs its own wake up.
@@ -307,109 +343,156 @@ static unsigned long dax_entry_size(void *entry)
return PAGE_SIZE;
}
-static unsigned long dax_end_pfn(void *entry)
+/*
+ * A DAX folio is considered shared if it has no mapping set and ->share (which
+ * shares the ->index field) is non-zero. Note this may return false even if the
+ * page is shared between multiple files but has not yet actually been mapped
+ * into multiple address spaces.
+ */
+static inline bool dax_folio_is_shared(struct folio *folio)
{
- return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
+ return !folio->mapping && folio->share;
}
/*
- * Iterate through all mapped pfns represented by an entry, i.e. skip
- * 'empty' and 'zero' entries.
+ * When it is called by dax_insert_entry(), the shared flag will indicate
+ * whether this entry is shared by multiple files. If the page has not
+ * previously been associated with any mappings the ->mapping and ->index
+ * fields will be set. If it has already been associated with a mapping
+ * the mapping will be cleared and the share count set. It's then up to
+ * reverse map users like memory_failure() to call back into the filesystem to
+ * recover ->mapping and ->index information. For example by implementing
+ * dax_holder_operations.
*/
-#define for_each_mapped_pfn(entry, pfn) \
- for (pfn = dax_to_pfn(entry); \
- pfn < dax_end_pfn(entry); pfn++)
-
-static inline bool dax_page_is_shared(struct page *page)
+static void dax_folio_make_shared(struct folio *folio)
{
- return page->mapping == PAGE_MAPPING_DAX_SHARED;
+ /*
+ * folio is not currently shared so mark it as shared by clearing
+ * folio->mapping.
+ */
+ folio->mapping = NULL;
+
+ /*
+ * folio has previously been mapped into one address space so set the
+ * share count.
+ */
+ folio->share = 1;
}
-/*
- * Set the page->mapping with PAGE_MAPPING_DAX_SHARED flag, increase the
- * refcount.
- */
-static inline void dax_page_share_get(struct page *page)
+static inline unsigned long dax_folio_put(struct folio *folio)
{
- if (page->mapping != PAGE_MAPPING_DAX_SHARED) {
+ unsigned long ref;
+ int order, i;
+
+ if (!dax_folio_is_shared(folio))
+ ref = 0;
+ else
+ ref = --folio->share;
+
+ if (ref)
+ return ref;
+
+ folio->mapping = NULL;
+ order = folio_order(folio);
+ if (!order)
+ return 0;
+
+ for (i = 0; i < (1UL << order); i++) {
+ struct dev_pagemap *pgmap = page_pgmap(&folio->page);
+ struct page *page = folio_page(folio, i);
+ struct folio *new_folio = (struct folio *)page;
+
+ ClearPageHead(page);
+ clear_compound_head(page);
+
+ new_folio->mapping = NULL;
/*
- * Reset the index if the page was already mapped
- * regularly before.
+ * Reset pgmap which was over-written by
+ * prep_compound_page().
*/
- if (page->mapping)
- page->share = 1;
- page->mapping = PAGE_MAPPING_DAX_SHARED;
+ new_folio->pgmap = pgmap;
+ new_folio->share = 0;
+ WARN_ON_ONCE(folio_ref_count(new_folio));
}
- page->share++;
+
+ return ref;
}
-static inline unsigned long dax_page_share_put(struct page *page)
+static void dax_folio_init(void *entry)
{
- return --page->share;
+ struct folio *folio = dax_to_folio(entry);
+ int order = dax_entry_order(entry);
+
+ /*
+ * Folio should have been split back to order-0 pages in
+ * dax_folio_put() when they were removed from their
+ * final mapping.
+ */
+ WARN_ON_ONCE(folio_order(folio));
+
+ if (order > 0) {
+ prep_compound_page(&folio->page, order);
+ if (order > 1)
+ INIT_LIST_HEAD(&folio->_deferred_list);
+ WARN_ON_ONCE(folio_ref_count(folio));
+ }
}
-/*
- * When it is called in dax_insert_entry(), the shared flag will indicate that
- * whether this entry is shared by multiple files. If so, set the page->mapping
- * PAGE_MAPPING_DAX_SHARED, and use page->share as refcount.
- */
static void dax_associate_entry(void *entry, struct address_space *mapping,
- struct vm_area_struct *vma, unsigned long address, bool shared)
+ struct vm_area_struct *vma,
+ unsigned long address, bool shared)
{
- unsigned long size = dax_entry_size(entry), pfn, index;
- int i = 0;
+ unsigned long size = dax_entry_size(entry), index;
+ struct folio *folio = dax_to_folio(entry);
+
+ if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
+ return;
if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
return;
index = linear_page_index(vma, address & ~(size - 1));
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
+ if (shared && (folio->mapping || dax_folio_is_shared(folio))) {
+ if (folio->mapping)
+ dax_folio_make_shared(folio);
- if (shared) {
- dax_page_share_get(page);
- } else {
- WARN_ON_ONCE(page->mapping);
- page->mapping = mapping;
- page->index = index + i++;
- }
+ WARN_ON_ONCE(!folio->share);
+ WARN_ON_ONCE(dax_entry_order(entry) != folio_order(folio));
+ folio->share++;
+ } else {
+ WARN_ON_ONCE(folio->mapping);
+ dax_folio_init(entry);
+ folio = dax_to_folio(entry);
+ folio->mapping = mapping;
+ folio->index = index;
}
}
static void dax_disassociate_entry(void *entry, struct address_space *mapping,
- bool trunc)
+ bool trunc)
{
- unsigned long pfn;
+ struct folio *folio = dax_to_folio(entry);
if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
return;
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
-
- WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
- if (dax_page_is_shared(page)) {
- /* keep the shared flag if this page is still shared */
- if (dax_page_share_put(page) > 0)
- continue;
- } else
- WARN_ON_ONCE(page->mapping && page->mapping != mapping);
- page->mapping = NULL;
- page->index = 0;
- }
+ if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
+ return;
+
+ dax_folio_put(folio);
}
static struct page *dax_busy_page(void *entry)
{
- unsigned long pfn;
+ struct folio *folio = dax_to_folio(entry);
- for_each_mapped_pfn(entry, pfn) {
- struct page *page = pfn_to_page(pfn);
+ if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry))
+ return NULL;
- if (page_ref_count(page) > 1)
- return page;
- }
- return NULL;
+ if (folio_ref_count(folio) - folio_mapcount(folio))
+ return &folio->page;
+ else
+ return NULL;
}
/**
@@ -580,7 +663,7 @@ static void *grab_mapping_entry(struct xa_state *xas,
retry:
pmd_downgrade = false;
xas_lock_irq(xas);
- entry = get_unlocked_entry(xas, order);
+ entry = get_next_unlocked_entry(xas, order);
if (entry) {
if (dax_is_conflict(entry))
@@ -690,7 +773,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
return NULL;
- if (!dax_mapping(mapping) || !mapping_mapped(mapping))
+ if (!dax_mapping(mapping))
return NULL;
/* If end == LLONG_MAX, all pages from start to till end of file */
@@ -716,8 +799,7 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping,
xas_for_each(&xas, entry, end_idx) {
if (WARN_ON_ONCE(!xa_is_value(entry)))
continue;
- if (unlikely(dax_is_locked(entry)))
- entry = get_unlocked_entry(&xas, 0);
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
if (entry)
page = dax_busy_page(entry);
put_unlocked_entry(&xas, entry, WAKE_NEXT);
@@ -743,14 +825,14 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
EXPORT_SYMBOL_GPL(dax_layout_busy_page);
static int __dax_invalidate_entry(struct address_space *mapping,
- pgoff_t index, bool trunc)
+ pgoff_t index, bool trunc)
{
XA_STATE(xas, &mapping->i_pages, index);
int ret = 0;
void *entry;
xas_lock_irq(&xas);
- entry = get_unlocked_entry(&xas, 0);
+ entry = get_next_unlocked_entry(&xas, 0);
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
goto out;
if (!trunc &&
@@ -776,7 +858,9 @@ static int __dax_clear_dirty_range(struct address_space *mapping,
xas_lock_irq(&xas);
xas_for_each(&xas, entry, end) {
- entry = get_unlocked_entry(&xas, 0);
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
+ if (!entry)
+ continue;
xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
put_unlocked_entry(&xas, entry, WAKE_NEXT);
@@ -813,6 +897,107 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
return ret;
}
+void dax_delete_mapping_range(struct address_space *mapping,
+ loff_t start, loff_t end)
+{
+ void *entry;
+ pgoff_t start_idx = start >> PAGE_SHIFT;
+ pgoff_t end_idx;
+ XA_STATE(xas, &mapping->i_pages, start_idx);
+
+ /* If end == LLONG_MAX, all pages from start to till end of file */
+ if (end == LLONG_MAX)
+ end_idx = ULONG_MAX;
+ else
+ end_idx = end >> PAGE_SHIFT;
+
+ xas_lock_irq(&xas);
+ xas_for_each(&xas, entry, end_idx) {
+ if (!xa_is_value(entry))
+ continue;
+ entry = wait_entry_unlocked_exclusive(&xas, entry);
+ if (!entry)
+ continue;
+ dax_disassociate_entry(entry, mapping, true);
+ xas_store(&xas, NULL);
+ mapping->nrpages -= 1UL << dax_entry_order(entry);
+ put_unlocked_entry(&xas, entry, WAKE_ALL);
+ }
+ xas_unlock_irq(&xas);
+}
+EXPORT_SYMBOL_GPL(dax_delete_mapping_range);
+
+static int wait_page_idle(struct page *page,
+ void (cb)(struct inode *),
+ struct inode *inode)
+{
+ return ___wait_var_event(page, dax_page_is_idle(page),
+ TASK_INTERRUPTIBLE, 0, 0, cb(inode));
+}
+
+static void wait_page_idle_uninterruptible(struct page *page,
+ struct inode *inode)
+{
+ ___wait_var_event(page, dax_page_is_idle(page),
+ TASK_UNINTERRUPTIBLE, 0, 0, schedule());
+}
+
+/*
+ * Unmaps the inode and waits for any DMA to complete prior to deleting the
+ * DAX mapping entries for the range.
+ *
+ * For NOWAIT behavior, pass @cb as NULL to early-exit on first found
+ * busy page
+ */
+int dax_break_layout(struct inode *inode, loff_t start, loff_t end,
+ void (cb)(struct inode *))
+{
+ struct page *page;
+ int error = 0;
+
+ if (!dax_mapping(inode->i_mapping))
+ return 0;
+
+ do {
+ page = dax_layout_busy_page_range(inode->i_mapping, start, end);
+ if (!page)
+ break;
+ if (!cb) {
+ error = -ERESTARTSYS;
+ break;
+ }
+
+ error = wait_page_idle(page, cb, inode);
+ } while (error == 0);
+
+ if (!page)
+ dax_delete_mapping_range(inode->i_mapping, start, end);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dax_break_layout);
+
+void dax_break_layout_final(struct inode *inode)
+{
+ struct page *page;
+
+ if (!dax_mapping(inode->i_mapping))
+ return;
+
+ do {
+ page = dax_layout_busy_page_range(inode->i_mapping, 0,
+ LLONG_MAX);
+ if (!page)
+ break;
+
+ wait_page_idle_uninterruptible(page, inode);
+ } while (true);
+
+ if (!page)
+ dax_delete_mapping_range(inode->i_mapping, 0, LLONG_MAX);
+}
+EXPORT_SYMBOL_GPL(dax_break_layout_final);
+
/*
* Invalidate DAX entry if it is clean.
*/
@@ -895,8 +1080,9 @@ static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf,
void *old;
dax_disassociate_entry(entry, mapping, false);
- dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address,
- shared);
+ dax_associate_entry(new_entry, mapping, vmf->vma,
+ vmf->address, shared);
+
/*
* Only swap our new entry into the page cache if the current
* entry is a zero page or an empty entry. If a normal PTE or
@@ -940,7 +1126,7 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
if (unlikely(dax_is_locked(entry))) {
void *old_entry = entry;
- entry = get_unlocked_entry(xas, 0);
+ entry = get_next_unlocked_entry(xas, 0);
/* Entry got punched out / reallocated? */
if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
@@ -1084,9 +1270,7 @@ static int dax_iomap_direct_access(const struct iomap *iomap, loff_t pos,
goto out;
if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
goto out;
- /* For larger pages we need devmap */
- if (length > 1 && !pfn_t_devmap(*pfnp))
- goto out;
+
rc = 0;
out_check_addr:
@@ -1193,7 +1377,7 @@ static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf,
*entry = dax_insert_entry(xas, vmf, iter, *entry, pfn, DAX_ZERO_PAGE);
- ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
+ ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), false);
trace_dax_load_hole(inode, vmf, ret);
return ret;
}
@@ -1664,7 +1848,8 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
loff_t pos = (loff_t)xas->xa_index << PAGE_SHIFT;
bool write = iter->flags & IOMAP_WRITE;
unsigned long entry_flags = pmd ? DAX_PMD : 0;
- int err = 0;
+ struct folio *folio;
+ int ret, err = 0;
pfn_t pfn;
void *kaddr;
@@ -1696,17 +1881,19 @@ static vm_fault_t dax_fault_iter(struct vm_fault *vmf,
return dax_fault_return(err);
}
+ folio = dax_to_folio(*entry);
if (dax_fault_is_synchronous(iter, vmf->vma))
return dax_fault_synchronous_pfnp(pfnp, pfn);
- /* insert PMD pfn */
+ folio_ref_inc(folio);
if (pmd)
- return vmf_insert_pfn_pmd(vmf, pfn, write);
+ ret = vmf_insert_folio_pmd(vmf, pfn_folio(pfn_t_to_pfn(pfn)),
+ write);
+ else
+ ret = vmf_insert_page_mkwrite(vmf, pfn_t_to_page(pfn), write);
+ folio_put(folio);
- /* insert PTE pfn */
- if (write)
- return vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
- return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
+ return ret;
}
static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
@@ -1949,11 +2136,12 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
{
struct address_space *mapping = vmf->vma->vm_file->f_mapping;
XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
+ struct folio *folio;
void *entry;
vm_fault_t ret;
xas_lock_irq(&xas);
- entry = get_unlocked_entry(&xas, order);
+ entry = get_next_unlocked_entry(&xas, order);
/* Did we race with someone splitting entry or so? */
if (!entry || dax_is_conflict(entry) ||
(order == 0 && !dax_is_pte_entry(entry))) {
@@ -1966,14 +2154,17 @@ dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
dax_lock_entry(&xas, entry);
xas_unlock_irq(&xas);
+ folio = pfn_folio(pfn_t_to_pfn(pfn));
+ folio_ref_inc(folio);
if (order == 0)
- ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
+ ret = vmf_insert_page_mkwrite(vmf, &folio->page, true);
#ifdef CONFIG_FS_DAX_PMD
else if (order == PMD_ORDER)
- ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
+ ret = vmf_insert_folio_pmd(vmf, folio, FAULT_FLAG_WRITE);
#endif
else
ret = VM_FAULT_FALLBACK;
+ folio_put(folio);
dax_unlock_entry(&xas, entry);
trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
return ret;
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index c294a8fc566d..cb1b6d0c3454 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -57,11 +57,10 @@ static ssize_t efivarfs_file_write(struct file *file,
if (bytes == -ENOENT) {
/*
- * FIXME: temporary workaround for fwupdate, signal
- * failed write with a 1 to keep created but not
- * written files
+ * zero size signals to release that the write deleted
+ * the variable
*/
- i_size_write(inode, 1);
+ i_size_write(inode, 0);
} else {
i_size_write(inode, datasize + sizeof(attributes));
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
@@ -125,8 +124,7 @@ static int efivarfs_file_release(struct inode *inode, struct file *file)
struct efivar_entry *var = inode->i_private;
inode_lock(inode);
- /* FIXME: temporary work around for fwupdate */
- var->removed = (--var->open_count == 0 && i_size_read(inode) == 1);
+ var->removed = (--var->open_count == 0 && i_size_read(inode) == 0);
inode_unlock(inode);
if (var->removed)
diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig
index 331e49cd1b8d..8f68ec49ad89 100644
--- a/fs/erofs/Kconfig
+++ b/fs/erofs/Kconfig
@@ -3,8 +3,8 @@
config EROFS_FS
tristate "EROFS filesystem support"
depends on BLOCK
+ select CRC32
select FS_IOMAP
- select LIBCRC32C
help
EROFS (Enhanced Read-Only File System) is a lightweight read-only
file system with modern designs (e.g. no buffer heads, inline
diff --git a/fs/exec.c b/fs/exec.c
index f45859ad13ac..8e4ea5f1e64c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1227,13 +1227,12 @@ int begin_new_exec(struct linux_binprm * bprm)
*/
bprm->point_of_no_return = true;
- /*
- * Make this the only thread in the thread group.
- */
+ /* Make this the only thread in the thread group */
retval = de_thread(me);
if (retval)
goto out;
-
+ /* see the comment in check_unsafe_exec() */
+ current->fs->in_exec = 0;
/*
* Cancel any io_uring activity across execve
*/
@@ -1495,6 +1494,8 @@ static void free_bprm(struct linux_binprm *bprm)
}
free_arg_pages(bprm);
if (bprm->cred) {
+ /* in case exec fails before de_thread() succeeds */
+ current->fs->in_exec = 0;
mutex_unlock(&current->signal->cred_guard_mutex);
abort_creds(bprm->cred);
}
@@ -1616,6 +1617,10 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
* suid exec because the differently privileged task
* will be able to manipulate the current directory, etc.
* It would be nice to force an unshare instead...
+ *
+ * Otherwise we set fs->in_exec = 1 to deny clone(CLONE_FS)
+ * from another sub-thread until de_thread() succeeds, this
+ * state is protected by cred_guard_mutex we hold.
*/
n_fs = 1;
spin_lock(&p->fs->lock);
@@ -1859,10 +1864,9 @@ static int bprm_execve(struct linux_binprm *bprm)
goto out;
sched_mm_cid_after_execve(current);
+ rseq_execve(current);
/* execve succeeded */
- current->fs->in_exec = 0;
current->in_execve = 0;
- rseq_execve(current);
user_events_execve(current);
acct_update_integrals(current);
task_numa_free(current, false);
@@ -1879,7 +1883,7 @@ out:
force_fatal_sig(SIGSEGV);
sched_mm_cid_after_execve(current);
- current->fs->in_exec = 0;
+ rseq_set_notify_resume(current);
current->in_execve = 0;
return retval;
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
index 9ff825f1502d..cc01556c9d9b 100644
--- a/fs/exfat/balloc.c
+++ b/fs/exfat/balloc.c
@@ -147,7 +147,6 @@ int exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
unsigned int ent_idx;
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
- struct exfat_mount_options *opts = &sbi->options;
if (!is_valid_cluster(sbi, clu))
return -EIO;
@@ -163,19 +162,6 @@ int exfat_clear_bitmap(struct inode *inode, unsigned int clu, bool sync)
exfat_update_bh(sbi->vol_amap[i], sync);
- if (opts->discard) {
- int ret_discard;
-
- ret_discard = sb_issue_discard(sb,
- exfat_cluster_to_sector(sbi, clu),
- (1 << sbi->sect_per_clus_bits), GFP_NOFS, 0);
-
- if (ret_discard == -EOPNOTSUPP) {
- exfat_err(sb, "discard not supported by device, disabling");
- opts->discard = 0;
- }
- }
-
return 0;
}
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index d30ce18a88b7..f8ead4d47ef0 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -14,8 +14,6 @@
#define EXFAT_ROOT_INO 1
-#define EXFAT_CLUSTERS_UNTRACKED (~0u)
-
/*
* exfat error flags
*/
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
index 6f3651c6ca91..23065f948ae7 100644
--- a/fs/exfat/fatent.c
+++ b/fs/exfat/fatent.c
@@ -144,6 +144,20 @@ int exfat_chain_cont_cluster(struct super_block *sb, unsigned int chain,
return 0;
}
+static inline void exfat_discard_cluster(struct super_block *sb,
+ unsigned int clu, unsigned int num_clusters)
+{
+ int ret;
+ struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+ ret = sb_issue_discard(sb, exfat_cluster_to_sector(sbi, clu),
+ sbi->sect_per_clus * num_clusters, GFP_NOFS, 0);
+ if (ret == -EOPNOTSUPP) {
+ exfat_err(sb, "discard not supported by device, disabling");
+ sbi->options.discard = 0;
+ }
+}
+
/* This function must be called with bitmap_lock held */
static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain)
{
@@ -196,7 +210,12 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
clu++;
num_clusters++;
} while (num_clusters < p_chain->size);
+
+ if (sbi->options.discard)
+ exfat_discard_cluster(sb, p_chain->dir, p_chain->size);
} else {
+ unsigned int nr_clu = 1;
+
do {
bool sync = false;
unsigned int n_clu = clu;
@@ -215,6 +234,16 @@ static int __exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain
if (exfat_clear_bitmap(inode, clu, (sync && IS_DIRSYNC(inode))))
break;
+
+ if (sbi->options.discard) {
+ if (n_clu == clu + 1)
+ nr_clu++;
+ else {
+ exfat_discard_cluster(sb, clu - nr_clu + 1, nr_clu);
+ nr_clu = 1;
+ }
+ }
+
clu = n_clu;
num_clusters++;
@@ -265,7 +294,7 @@ int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain,
clu = next;
if (exfat_ent_get(sb, clu, &next))
return -EIO;
- } while (next != EXFAT_EOF_CLUSTER);
+ } while (next != EXFAT_EOF_CLUSTER && count <= p_chain->size);
if (p_chain->size != count) {
exfat_fs_error(sb,
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index 807349d8ea05..841a5b18e3df 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -582,6 +582,9 @@ static ssize_t exfat_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
loff_t pos = iocb->ki_pos;
loff_t valid_size;
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
inode_lock(inode);
valid_size = ei->valid_size;
@@ -635,6 +638,16 @@ unlock:
return ret;
}
+static ssize_t exfat_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+
+ if (unlikely(exfat_forced_shutdown(inode->i_sb)))
+ return -EIO;
+
+ return generic_file_read_iter(iocb, iter);
+}
+
static vm_fault_t exfat_page_mkwrite(struct vm_fault *vmf)
{
int err;
@@ -672,14 +685,26 @@ static const struct vm_operations_struct exfat_file_vm_ops = {
static int exfat_file_mmap(struct file *file, struct vm_area_struct *vma)
{
+ if (unlikely(exfat_forced_shutdown(file_inode(file)->i_sb)))
+ return -EIO;
+
file_accessed(file);
vma->vm_ops = &exfat_file_vm_ops;
return 0;
}
+static ssize_t exfat_splice_read(struct file *in, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len, unsigned int flags)
+{
+ if (unlikely(exfat_forced_shutdown(file_inode(in)->i_sb)))
+ return -EIO;
+
+ return filemap_splice_read(in, ppos, pipe, len, flags);
+}
+
const struct file_operations exfat_file_operations = {
.llseek = generic_file_llseek,
- .read_iter = generic_file_read_iter,
+ .read_iter = exfat_file_read_iter,
.write_iter = exfat_file_write_iter,
.unlocked_ioctl = exfat_ioctl,
#ifdef CONFIG_COMPAT
@@ -687,7 +712,7 @@ const struct file_operations exfat_file_operations = {
#endif
.mmap = exfat_file_mmap,
.fsync = exfat_file_fsync,
- .splice_read = filemap_splice_read,
+ .splice_read = exfat_splice_read,
.splice_write = iter_file_splice_write,
};
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 96952d4acb50..b22c02d6000f 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -274,9 +274,11 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
sector_t last_block;
sector_t phys = 0;
sector_t valid_blks;
+ loff_t i_size;
mutex_lock(&sbi->s_lock);
- last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size_read(inode), sb);
+ i_size = i_size_read(inode);
+ last_block = EXFAT_B_TO_BLK_ROUND_UP(i_size, sb);
if (iblock >= last_block && !create)
goto done;
@@ -305,77 +307,99 @@ static int exfat_get_block(struct inode *inode, sector_t iblock,
if (buffer_delay(bh_result))
clear_buffer_delay(bh_result);
- if (create) {
+ /*
+ * In most cases, we just need to set bh_result to mapped, unmapped
+ * or new status as follows:
+ * 1. i_size == valid_size
+ * 2. write case (create == 1)
+ * 3. direct_read (!bh_result->b_folio)
+ * -> the unwritten part will be zeroed in exfat_direct_IO()
+ *
+ * Otherwise, in the case of buffered read, it is necessary to take
+ * care the last nested block if valid_size is not equal to i_size.
+ */
+ if (i_size == ei->valid_size || create || !bh_result->b_folio)
valid_blks = EXFAT_B_TO_BLK_ROUND_UP(ei->valid_size, sb);
+ else
+ valid_blks = EXFAT_B_TO_BLK(ei->valid_size, sb);
- if (iblock + max_blocks < valid_blks) {
- /* The range has been written, map it */
- goto done;
- } else if (iblock < valid_blks) {
- /*
- * The range has been partially written,
- * map the written part.
- */
- max_blocks = valid_blks - iblock;
- goto done;
- }
+ /* The range has been fully written, map it */
+ if (iblock + max_blocks < valid_blks)
+ goto done;
- /* The area has not been written, map and mark as new. */
- set_buffer_new(bh_result);
+ /* The range has been partially written, map the written part */
+ if (iblock < valid_blks) {
+ max_blocks = valid_blks - iblock;
+ goto done;
+ }
+ /* The area has not been written, map and mark as new for create case */
+ if (create) {
+ set_buffer_new(bh_result);
ei->valid_size = EXFAT_BLK_TO_B(iblock + max_blocks, sb);
mark_inode_dirty(inode);
- } else {
- valid_blks = EXFAT_B_TO_BLK(ei->valid_size, sb);
+ goto done;
+ }
- if (iblock + max_blocks < valid_blks) {
- /* The range has been written, map it */
+ /*
+ * The area has just one block partially written.
+ * In that case, we should read and fill the unwritten part of
+ * a block with zero.
+ */
+ if (bh_result->b_folio && iblock == valid_blks &&
+ (ei->valid_size & (sb->s_blocksize - 1))) {
+ loff_t size, pos;
+ void *addr;
+
+ max_blocks = 1;
+
+ /*
+ * No buffer_head is allocated.
+ * (1) bmap: It's enough to set blocknr without I/O.
+ * (2) read: The unwritten part should be filled with zero.
+ * If a folio does not have any buffers,
+ * let's returns -EAGAIN to fallback to
+ * block_read_full_folio() for per-bh IO.
+ */
+ if (!folio_buffers(bh_result->b_folio)) {
+ err = -EAGAIN;
goto done;
- } else if (iblock < valid_blks) {
- /*
- * The area has been partially written,
- * map the written part.
- */
- max_blocks = valid_blks - iblock;
+ }
+
+ pos = EXFAT_BLK_TO_B(iblock, sb);
+ size = ei->valid_size - pos;
+ addr = folio_address(bh_result->b_folio) +
+ offset_in_folio(bh_result->b_folio, pos);
+
+ /* Check if bh->b_data points to proper addr in folio */
+ if (bh_result->b_data != addr) {
+ exfat_fs_error_ratelimit(sb,
+ "b_data(%p) != folio_addr(%p)",
+ bh_result->b_data, addr);
+ err = -EINVAL;
goto done;
- } else if (iblock == valid_blks &&
- (ei->valid_size & (sb->s_blocksize - 1))) {
- /*
- * The block has been partially written,
- * zero the unwritten part and map the block.
- */
- loff_t size, off, pos;
-
- max_blocks = 1;
-
- /*
- * For direct read, the unwritten part will be zeroed in
- * exfat_direct_IO()
- */
- if (!bh_result->b_folio)
- goto done;
-
- pos = EXFAT_BLK_TO_B(iblock, sb);
- size = ei->valid_size - pos;
- off = pos & (PAGE_SIZE - 1);
-
- folio_set_bh(bh_result, bh_result->b_folio, off);
- err = bh_read(bh_result, 0);
- if (err < 0)
- goto unlock_ret;
-
- folio_zero_segment(bh_result->b_folio, off + size,
- off + sb->s_blocksize);
- } else {
- /*
- * The range has not been written, clear the mapped flag
- * to only zero the cache and do not read from disk.
- */
- clear_buffer_mapped(bh_result);
}
+
+ /* Read a block */
+ err = bh_read(bh_result, 0);
+ if (err < 0)
+ goto done;
+
+ /* Zero unwritten part of a block */
+ memset(bh_result->b_data + size, 0, bh_result->b_size - size);
+ err = 0;
+ goto done;
}
+
+ /*
+ * The area has not been written, clear mapped for read/bmap cases.
+ * If so, it will be filled with zero without reading from disk.
+ */
+ clear_buffer_mapped(bh_result);
done:
bh_result->b_size = EXFAT_BLK_TO_B(max_blocks, sb);
+ if (err < 0)
+ clear_buffer_mapped(bh_result);
unlock_ret:
mutex_unlock(&sbi->s_lock);
return err;
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index bd57844414aa..8465033a6cf0 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -67,15 +67,6 @@ static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
unsigned long long id = huge_encode_dev(sb->s_bdev->bd_dev);
- if (sbi->used_clusters == EXFAT_CLUSTERS_UNTRACKED) {
- mutex_lock(&sbi->s_lock);
- if (exfat_count_used_clusters(sb, &sbi->used_clusters)) {
- mutex_unlock(&sbi->s_lock);
- return -EIO;
- }
- mutex_unlock(&sbi->s_lock);
- }
-
buf->f_type = sb->s_magic;
buf->f_bsize = sbi->cluster_size;
buf->f_blocks = sbi->num_clusters - 2; /* clu 0 & 1 */
@@ -531,7 +522,6 @@ static int exfat_read_boot_sector(struct super_block *sb)
sbi->vol_flags = le16_to_cpu(p_boot->vol_flags);
sbi->vol_flags_persistent = sbi->vol_flags & (VOLUME_DIRTY | MEDIA_FAILURE);
sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER;
- sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED;
/* check consistencies */
if ((u64)sbi->num_FAT_sectors << p_boot->sect_size_bits <
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index b5845c4846b8..128dd092916b 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -608,4 +608,5 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
}
EXPORT_SYMBOL_GPL(exportfs_decode_fh);
+MODULE_DESCRIPTION("Code mapping from inodes to file handles");
MODULE_LICENSE("GPL");
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index f38bdd46e4f7..4025f875252a 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -368,6 +368,7 @@ struct ext2_inode {
#define EXT2_MOUNT_ERRORS_CONT 0x000010 /* Continue on errors */
#define EXT2_MOUNT_ERRORS_RO 0x000020 /* Remount fs ro on errors */
#define EXT2_MOUNT_ERRORS_PANIC 0x000040 /* Panic on errors */
+#define EXT2_MOUNT_ERRORS_MASK 0x000070
#define EXT2_MOUNT_MINIX_DF 0x000080 /* Mimics the Minix statfs */
#define EXT2_MOUNT_NOBH 0x000100 /* No buffer_heads */
#define EXT2_MOUNT_NO_UID32 0x000200 /* Disable 32-bit UIDs */
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 37f7ce56adce..28ff47ec4be6 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -23,7 +23,8 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/blkdev.h>
-#include <linux/parser.h>
+#include <linux/fs_context.h>
+#include <linux/fs_parser.h>
#include <linux/random.h>
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
@@ -40,7 +41,6 @@
#include "acl.h"
static void ext2_write_super(struct super_block *sb);
-static int ext2_remount (struct super_block * sb, int * flags, char * data);
static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf);
static int ext2_sync_fs(struct super_block *sb, int wait);
static int ext2_freeze(struct super_block *sb);
@@ -81,6 +81,33 @@ void ext2_error(struct super_block *sb, const char *function,
}
}
+static void ext2_msg_fc(struct fs_context *fc, const char *prefix,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+ const char *s_id;
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ s_id = fc->root->d_sb->s_id;
+ } else {
+ /* get last path component of source */
+ s_id = strrchr(fc->source, '/');
+ if (s_id)
+ s_id++;
+ else
+ s_id = fc->source;
+ }
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ printk("%sEXT2-fs (%s): %pV\n", prefix, s_id, &vaf);
+
+ va_end(args);
+}
+
void ext2_msg(struct super_block *sb, const char *prefix,
const char *fmt, ...)
{
@@ -346,7 +373,6 @@ static const struct super_operations ext2_sops = {
.freeze_fs = ext2_freeze,
.unfreeze_fs = ext2_unfreeze,
.statfs = ext2_statfs,
- .remount_fs = ext2_remount,
.show_options = ext2_show_options,
#ifdef CONFIG_QUOTA
.quota_read = ext2_quota_read,
@@ -402,230 +428,217 @@ static const struct export_operations ext2_export_ops = {
.get_parent = ext2_get_parent,
};
-static unsigned long get_sb_block(void **data)
-{
- unsigned long sb_block;
- char *options = (char *) *data;
-
- if (!options || strncmp(options, "sb=", 3) != 0)
- return 1; /* Default location */
- options += 3;
- sb_block = simple_strtoul(options, &options, 0);
- if (*options && *options != ',') {
- printk("EXT2-fs: Invalid sb specification: %s\n",
- (char *) *data);
- return 1;
- }
- if (*options == ',')
- options++;
- *data = (void *) options;
- return sb_block;
-}
-
enum {
- Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
- Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic,
- Opt_err_ro, Opt_nouid32, Opt_debug,
- Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
- Opt_acl, Opt_noacl, Opt_xip, Opt_dax, Opt_ignore, Opt_err, Opt_quota,
- Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
+ Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid,
+ Opt_sb, Opt_errors, Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov,
+ Opt_nobh, Opt_user_xattr, Opt_acl, Opt_xip, Opt_dax, Opt_ignore,
+ Opt_quota, Opt_usrquota, Opt_grpquota, Opt_reservation,
};
-static const match_table_t tokens = {
- {Opt_bsd_df, "bsddf"},
- {Opt_minix_df, "minixdf"},
- {Opt_grpid, "grpid"},
- {Opt_grpid, "bsdgroups"},
- {Opt_nogrpid, "nogrpid"},
- {Opt_nogrpid, "sysvgroups"},
- {Opt_resgid, "resgid=%u"},
- {Opt_resuid, "resuid=%u"},
- {Opt_sb, "sb=%u"},
- {Opt_err_cont, "errors=continue"},
- {Opt_err_panic, "errors=panic"},
- {Opt_err_ro, "errors=remount-ro"},
- {Opt_nouid32, "nouid32"},
- {Opt_debug, "debug"},
- {Opt_oldalloc, "oldalloc"},
- {Opt_orlov, "orlov"},
- {Opt_nobh, "nobh"},
- {Opt_user_xattr, "user_xattr"},
- {Opt_nouser_xattr, "nouser_xattr"},
- {Opt_acl, "acl"},
- {Opt_noacl, "noacl"},
- {Opt_xip, "xip"},
- {Opt_dax, "dax"},
- {Opt_grpquota, "grpquota"},
- {Opt_ignore, "noquota"},
- {Opt_quota, "quota"},
- {Opt_usrquota, "usrquota"},
- {Opt_reservation, "reservation"},
- {Opt_noreservation, "noreservation"},
- {Opt_err, NULL}
+static const struct constant_table ext2_param_errors[] = {
+ {"continue", EXT2_MOUNT_ERRORS_CONT},
+ {"panic", EXT2_MOUNT_ERRORS_PANIC},
+ {"remount-ro", EXT2_MOUNT_ERRORS_RO},
+ {}
+};
+
+static const struct fs_parameter_spec ext2_param_spec[] = {
+ fsparam_flag ("bsddf", Opt_bsd_df),
+ fsparam_flag ("minixdf", Opt_minix_df),
+ fsparam_flag ("grpid", Opt_grpid),
+ fsparam_flag ("bsdgroups", Opt_grpid),
+ fsparam_flag ("nogrpid", Opt_nogrpid),
+ fsparam_flag ("sysvgroups", Opt_nogrpid),
+ fsparam_gid ("resgid", Opt_resgid),
+ fsparam_uid ("resuid", Opt_resuid),
+ fsparam_u32 ("sb", Opt_sb),
+ fsparam_enum ("errors", Opt_errors, ext2_param_errors),
+ fsparam_flag ("nouid32", Opt_nouid32),
+ fsparam_flag ("debug", Opt_debug),
+ fsparam_flag ("oldalloc", Opt_oldalloc),
+ fsparam_flag ("orlov", Opt_orlov),
+ fsparam_flag ("nobh", Opt_nobh),
+ fsparam_flag_no ("user_xattr", Opt_user_xattr),
+ fsparam_flag_no ("acl", Opt_acl),
+ fsparam_flag ("xip", Opt_xip),
+ fsparam_flag ("dax", Opt_dax),
+ fsparam_flag ("grpquota", Opt_grpquota),
+ fsparam_flag ("noquota", Opt_ignore),
+ fsparam_flag ("quota", Opt_quota),
+ fsparam_flag ("usrquota", Opt_usrquota),
+ fsparam_flag_no ("reservation", Opt_reservation),
+ {}
+};
+
+#define EXT2_SPEC_s_resuid (1 << 0)
+#define EXT2_SPEC_s_resgid (1 << 1)
+
+struct ext2_fs_context {
+ unsigned long vals_s_flags; /* Bits to set in s_flags */
+ unsigned long mask_s_flags; /* Bits changed in s_flags */
+ unsigned int vals_s_mount_opt;
+ unsigned int mask_s_mount_opt;
+ kuid_t s_resuid;
+ kgid_t s_resgid;
+ unsigned long s_sb_block;
+ unsigned int spec;
+
};
-static int parse_options(char *options, struct super_block *sb,
- struct ext2_mount_options *opts)
+static inline void ctx_set_mount_opt(struct ext2_fs_context *ctx,
+ unsigned long flag)
{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option;
- kuid_t uid;
- kgid_t gid;
-
- if (!options)
- return 1;
-
- while ((p = strsep (&options, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
-
- token = match_token(p, tokens, args);
- switch (token) {
- case Opt_bsd_df:
- clear_opt (opts->s_mount_opt, MINIX_DF);
- break;
- case Opt_minix_df:
- set_opt (opts->s_mount_opt, MINIX_DF);
- break;
- case Opt_grpid:
- set_opt (opts->s_mount_opt, GRPID);
- break;
- case Opt_nogrpid:
- clear_opt (opts->s_mount_opt, GRPID);
- break;
- case Opt_resuid:
- if (match_int(&args[0], &option))
- return 0;
- uid = make_kuid(current_user_ns(), option);
- if (!uid_valid(uid)) {
- ext2_msg(sb, KERN_ERR, "Invalid uid value %d", option);
- return 0;
-
- }
- opts->s_resuid = uid;
- break;
- case Opt_resgid:
- if (match_int(&args[0], &option))
- return 0;
- gid = make_kgid(current_user_ns(), option);
- if (!gid_valid(gid)) {
- ext2_msg(sb, KERN_ERR, "Invalid gid value %d", option);
- return 0;
- }
- opts->s_resgid = gid;
- break;
- case Opt_sb:
- /* handled by get_sb_block() instead of here */
- /* *sb_block = match_int(&args[0]); */
- break;
- case Opt_err_panic:
- clear_opt (opts->s_mount_opt, ERRORS_CONT);
- clear_opt (opts->s_mount_opt, ERRORS_RO);
- set_opt (opts->s_mount_opt, ERRORS_PANIC);
- break;
- case Opt_err_ro:
- clear_opt (opts->s_mount_opt, ERRORS_CONT);
- clear_opt (opts->s_mount_opt, ERRORS_PANIC);
- set_opt (opts->s_mount_opt, ERRORS_RO);
- break;
- case Opt_err_cont:
- clear_opt (opts->s_mount_opt, ERRORS_RO);
- clear_opt (opts->s_mount_opt, ERRORS_PANIC);
- set_opt (opts->s_mount_opt, ERRORS_CONT);
- break;
- case Opt_nouid32:
- set_opt (opts->s_mount_opt, NO_UID32);
- break;
- case Opt_debug:
- set_opt (opts->s_mount_opt, DEBUG);
- break;
- case Opt_oldalloc:
- set_opt (opts->s_mount_opt, OLDALLOC);
- break;
- case Opt_orlov:
- clear_opt (opts->s_mount_opt, OLDALLOC);
- break;
- case Opt_nobh:
- ext2_msg(sb, KERN_INFO,
- "nobh option not supported");
- break;
+ ctx->mask_s_mount_opt |= flag;
+ ctx->vals_s_mount_opt |= flag;
+}
+
+static inline void ctx_clear_mount_opt(struct ext2_fs_context *ctx,
+ unsigned long flag)
+{
+ ctx->mask_s_mount_opt |= flag;
+ ctx->vals_s_mount_opt &= ~flag;
+}
+
+static inline unsigned long
+ctx_test_mount_opt(struct ext2_fs_context *ctx, unsigned long flag)
+{
+ return (ctx->vals_s_mount_opt & flag);
+}
+
+static inline bool
+ctx_parsed_mount_opt(struct ext2_fs_context *ctx, unsigned long flag)
+{
+ return (ctx->mask_s_mount_opt & flag);
+}
+
+static void ext2_free_fc(struct fs_context *fc)
+{
+ kfree(fc->fs_private);
+}
+
+static int ext2_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct ext2_fs_context *ctx = fc->fs_private;
+ int opt;
+ struct fs_parse_result result;
+
+ opt = fs_parse(fc, ext2_param_spec, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_bsd_df:
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_MINIX_DF);
+ break;
+ case Opt_minix_df:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_MINIX_DF);
+ break;
+ case Opt_grpid:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_GRPID);
+ break;
+ case Opt_nogrpid:
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_GRPID);
+ break;
+ case Opt_resuid:
+ ctx->s_resuid = result.uid;
+ ctx->spec |= EXT2_SPEC_s_resuid;
+ break;
+ case Opt_resgid:
+ ctx->s_resgid = result.gid;
+ ctx->spec |= EXT2_SPEC_s_resgid;
+ break;
+ case Opt_sb:
+ /* Note that this is silently ignored on remount */
+ ctx->s_sb_block = result.uint_32;
+ break;
+ case Opt_errors:
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_ERRORS_MASK);
+ ctx_set_mount_opt(ctx, result.uint_32);
+ break;
+ case Opt_nouid32:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_NO_UID32);
+ break;
+ case Opt_debug:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_DEBUG);
+ break;
+ case Opt_oldalloc:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_OLDALLOC);
+ break;
+ case Opt_orlov:
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_OLDALLOC);
+ break;
+ case Opt_nobh:
+ ext2_msg_fc(fc, KERN_INFO, "nobh option not supported\n");
+ break;
#ifdef CONFIG_EXT2_FS_XATTR
- case Opt_user_xattr:
- set_opt (opts->s_mount_opt, XATTR_USER);
- break;
- case Opt_nouser_xattr:
- clear_opt (opts->s_mount_opt, XATTR_USER);
- break;
+ case Opt_user_xattr:
+ if (!result.negated)
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_XATTR_USER);
+ else
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_XATTR_USER);
+ break;
#else
- case Opt_user_xattr:
- case Opt_nouser_xattr:
- ext2_msg(sb, KERN_INFO, "(no)user_xattr options"
- "not supported");
- break;
+ case Opt_user_xattr:
+ ext2_msg_fc(fc, KERN_INFO, "(no)user_xattr options not supported");
+ break;
#endif
#ifdef CONFIG_EXT2_FS_POSIX_ACL
- case Opt_acl:
- set_opt(opts->s_mount_opt, POSIX_ACL);
- break;
- case Opt_noacl:
- clear_opt(opts->s_mount_opt, POSIX_ACL);
- break;
+ case Opt_acl:
+ if (!result.negated)
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_POSIX_ACL);
+ else
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_POSIX_ACL);
+ break;
#else
- case Opt_acl:
- case Opt_noacl:
- ext2_msg(sb, KERN_INFO,
- "(no)acl options not supported");
- break;
+ case Opt_acl:
+ ext2_msg_fc(fc, KERN_INFO, "(no)acl options not supported");
+ break;
#endif
- case Opt_xip:
- ext2_msg(sb, KERN_INFO, "use dax instead of xip");
- set_opt(opts->s_mount_opt, XIP);
- fallthrough;
- case Opt_dax:
+ case Opt_xip:
+ ext2_msg_fc(fc, KERN_INFO, "use dax instead of xip");
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_XIP);
+ fallthrough;
+ case Opt_dax:
#ifdef CONFIG_FS_DAX
- ext2_msg(sb, KERN_WARNING,
- "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
- set_opt(opts->s_mount_opt, DAX);
+ ext2_msg_fc(fc, KERN_WARNING,
+ "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_DAX);
#else
- ext2_msg(sb, KERN_INFO, "dax option not supported");
+ ext2_msg_fc(fc, KERN_INFO, "dax option not supported");
#endif
- break;
+ break;
#if defined(CONFIG_QUOTA)
- case Opt_quota:
- case Opt_usrquota:
- set_opt(opts->s_mount_opt, USRQUOTA);
- break;
-
- case Opt_grpquota:
- set_opt(opts->s_mount_opt, GRPQUOTA);
- break;
+ case Opt_quota:
+ case Opt_usrquota:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_USRQUOTA);
+ break;
+
+ case Opt_grpquota:
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_GRPQUOTA);
+ break;
#else
- case Opt_quota:
- case Opt_usrquota:
- case Opt_grpquota:
- ext2_msg(sb, KERN_INFO,
- "quota operations not supported");
- break;
+ case Opt_quota:
+ case Opt_usrquota:
+ case Opt_grpquota:
+ ext2_msg_fc(fc, KERN_INFO, "quota operations not supported");
+ break;
#endif
-
- case Opt_reservation:
- set_opt(opts->s_mount_opt, RESERVATION);
- ext2_msg(sb, KERN_INFO, "reservations ON");
- break;
- case Opt_noreservation:
- clear_opt(opts->s_mount_opt, RESERVATION);
- ext2_msg(sb, KERN_INFO, "reservations OFF");
- break;
- case Opt_ignore:
- break;
- default:
- return 0;
+ case Opt_reservation:
+ if (!result.negated) {
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_RESERVATION);
+ ext2_msg_fc(fc, KERN_INFO, "reservations ON");
+ } else {
+ ctx_clear_mount_opt(ctx, EXT2_MOUNT_RESERVATION);
+ ext2_msg_fc(fc, KERN_INFO, "reservations OFF");
}
+ break;
+ case Opt_ignore:
+ break;
+ default:
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static int ext2_setup_super (struct super_block * sb,
@@ -801,24 +814,83 @@ static unsigned long descriptor_loc(struct super_block *sb,
return ext2_group_first_block_no(sb, bg) + ext2_bg_has_super(sb, bg);
}
-static int ext2_fill_super(struct super_block *sb, void *data, int silent)
+/*
+ * Set all mount options either from defaults on disk, or from parsed
+ * options. Parsed/specified options override on-disk defaults.
+ */
+static void ext2_set_options(struct fs_context *fc, struct ext2_sb_info *sbi)
+{
+ struct ext2_fs_context *ctx = fc->fs_private;
+ struct ext2_super_block *es = sbi->s_es;
+ unsigned long def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
+
+ /* Copy parsed mount options to sbi */
+ sbi->s_mount_opt = ctx->vals_s_mount_opt;
+
+ /* Use in-superblock defaults only if not specified during parsing */
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_DEBUG) &&
+ def_mount_opts & EXT2_DEFM_DEBUG)
+ set_opt(sbi->s_mount_opt, DEBUG);
+
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_GRPID) &&
+ def_mount_opts & EXT2_DEFM_BSDGROUPS)
+ set_opt(sbi->s_mount_opt, GRPID);
+
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_NO_UID32) &&
+ def_mount_opts & EXT2_DEFM_UID16)
+ set_opt(sbi->s_mount_opt, NO_UID32);
+
+#ifdef CONFIG_EXT2_FS_XATTR
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_XATTR_USER) &&
+ def_mount_opts & EXT2_DEFM_XATTR_USER)
+ set_opt(sbi->s_mount_opt, XATTR_USER);
+#endif
+#ifdef CONFIG_EXT2_FS_POSIX_ACL
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_POSIX_ACL) &&
+ def_mount_opts & EXT2_DEFM_ACL)
+ set_opt(sbi->s_mount_opt, POSIX_ACL);
+#endif
+
+ if (!ctx_parsed_mount_opt(ctx, EXT2_MOUNT_ERRORS_MASK)) {
+ if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
+ set_opt(sbi->s_mount_opt, ERRORS_PANIC);
+ else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
+ set_opt(sbi->s_mount_opt, ERRORS_CONT);
+ else
+ set_opt(sbi->s_mount_opt, ERRORS_RO);
+ }
+
+ if (ctx->spec & EXT2_SPEC_s_resuid)
+ sbi->s_resuid = ctx->s_resuid;
+ else
+ sbi->s_resuid = make_kuid(&init_user_ns,
+ le16_to_cpu(es->s_def_resuid));
+
+ if (ctx->spec & EXT2_SPEC_s_resgid)
+ sbi->s_resgid = ctx->s_resgid;
+ else
+ sbi->s_resgid = make_kgid(&init_user_ns,
+ le16_to_cpu(es->s_def_resgid));
+}
+
+static int ext2_fill_super(struct super_block *sb, struct fs_context *fc)
{
+ struct ext2_fs_context *ctx = fc->fs_private;
+ int silent = fc->sb_flags & SB_SILENT;
struct buffer_head * bh;
struct ext2_sb_info * sbi;
struct ext2_super_block * es;
struct inode *root;
unsigned long block;
- unsigned long sb_block = get_sb_block(&data);
+ unsigned long sb_block = ctx->s_sb_block;
unsigned long logic_sb_block;
unsigned long offset = 0;
- unsigned long def_mount_opts;
long ret = -ENOMEM;
int blocksize = BLOCK_SIZE;
int db_count;
int i, j;
__le32 features;
int err;
- struct ext2_mount_options opts;
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
@@ -877,42 +949,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (sb->s_magic != EXT2_SUPER_MAGIC)
goto cantfind_ext2;
- opts.s_mount_opt = 0;
- /* Set defaults before we parse the mount options */
- def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
- if (def_mount_opts & EXT2_DEFM_DEBUG)
- set_opt(opts.s_mount_opt, DEBUG);
- if (def_mount_opts & EXT2_DEFM_BSDGROUPS)
- set_opt(opts.s_mount_opt, GRPID);
- if (def_mount_opts & EXT2_DEFM_UID16)
- set_opt(opts.s_mount_opt, NO_UID32);
-#ifdef CONFIG_EXT2_FS_XATTR
- if (def_mount_opts & EXT2_DEFM_XATTR_USER)
- set_opt(opts.s_mount_opt, XATTR_USER);
-#endif
-#ifdef CONFIG_EXT2_FS_POSIX_ACL
- if (def_mount_opts & EXT2_DEFM_ACL)
- set_opt(opts.s_mount_opt, POSIX_ACL);
-#endif
-
- if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_PANIC)
- set_opt(opts.s_mount_opt, ERRORS_PANIC);
- else if (le16_to_cpu(sbi->s_es->s_errors) == EXT2_ERRORS_CONTINUE)
- set_opt(opts.s_mount_opt, ERRORS_CONT);
- else
- set_opt(opts.s_mount_opt, ERRORS_RO);
-
- opts.s_resuid = make_kuid(&init_user_ns, le16_to_cpu(es->s_def_resuid));
- opts.s_resgid = make_kgid(&init_user_ns, le16_to_cpu(es->s_def_resgid));
-
- set_opt(opts.s_mount_opt, RESERVATION);
-
- if (!parse_options((char *) data, sb, &opts))
- goto failed_mount;
-
- sbi->s_mount_opt = opts.s_mount_opt;
- sbi->s_resuid = opts.s_resuid;
- sbi->s_resgid = opts.s_resgid;
+ ext2_set_options(fc, sbi);
sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
(test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
@@ -1324,23 +1361,21 @@ static void ext2_write_super(struct super_block *sb)
ext2_sync_fs(sb, 1);
}
-static int ext2_remount (struct super_block * sb, int * flags, char * data)
+static int ext2_reconfigure(struct fs_context *fc)
{
+ struct ext2_fs_context *ctx = fc->fs_private;
+ struct super_block *sb = fc->root->d_sb;
struct ext2_sb_info * sbi = EXT2_SB(sb);
struct ext2_super_block * es;
struct ext2_mount_options new_opts;
+ int flags = fc->sb_flags;
int err;
sync_filesystem(sb);
- spin_lock(&sbi->s_lock);
- new_opts.s_mount_opt = sbi->s_mount_opt;
- new_opts.s_resuid = sbi->s_resuid;
- new_opts.s_resgid = sbi->s_resgid;
- spin_unlock(&sbi->s_lock);
-
- if (!parse_options(data, sb, &new_opts))
- return -EINVAL;
+ new_opts.s_mount_opt = ctx->vals_s_mount_opt;
+ new_opts.s_resuid = ctx->s_resuid;
+ new_opts.s_resgid = ctx->s_resgid;
spin_lock(&sbi->s_lock);
es = sbi->s_es;
@@ -1349,9 +1384,9 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
"dax flag with busy inodes while remounting");
new_opts.s_mount_opt ^= EXT2_MOUNT_DAX;
}
- if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
+ if ((bool)(flags & SB_RDONLY) == sb_rdonly(sb))
goto out_set;
- if (*flags & SB_RDONLY) {
+ if (flags & SB_RDONLY) {
if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
!(sbi->s_mount_state & EXT2_VALID_FS))
goto out_set;
@@ -1470,10 +1505,9 @@ static int ext2_statfs (struct dentry * dentry, struct kstatfs * buf)
return 0;
}
-static struct dentry *ext2_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
+static int ext2_get_tree(struct fs_context *fc)
{
- return mount_bdev(fs_type, flags, dev_name, data, ext2_fill_super);
+ return get_tree_bdev(fc, ext2_fill_super);
}
#ifdef CONFIG_QUOTA
@@ -1556,7 +1590,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
}
lock_buffer(bh);
memcpy(bh->b_data+offset, data, tocopy);
- flush_dcache_page(bh->b_page);
+ flush_dcache_folio(bh->b_folio);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unlock_buffer(bh);
@@ -1624,12 +1658,49 @@ out:
#endif
+static const struct fs_context_operations ext2_context_ops = {
+ .parse_param = ext2_parse_param,
+ .get_tree = ext2_get_tree,
+ .reconfigure = ext2_reconfigure,
+ .free = ext2_free_fc,
+};
+
+static int ext2_init_fs_context(struct fs_context *fc)
+{
+ struct ext2_fs_context *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+ struct super_block *sb = fc->root->d_sb;
+ struct ext2_sb_info *sbi = EXT2_SB(sb);
+
+ spin_lock(&sbi->s_lock);
+ ctx->vals_s_mount_opt = sbi->s_mount_opt;
+ ctx->vals_s_flags = sb->s_flags;
+ ctx->s_resuid = sbi->s_resuid;
+ ctx->s_resgid = sbi->s_resgid;
+ spin_unlock(&sbi->s_lock);
+ } else {
+ ctx->s_sb_block = 1;
+ ctx_set_mount_opt(ctx, EXT2_MOUNT_RESERVATION);
+ }
+
+ fc->fs_private = ctx;
+ fc->ops = &ext2_context_ops;
+
+ return 0;
+}
+
static struct file_system_type ext2_fs_type = {
.owner = THIS_MODULE,
.name = "ext2",
- .mount = ext2_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
+ .init_fs_context = ext2_init_fs_context,
+ .parameters = ext2_param_spec,
};
MODULE_ALIAS_FS("ext2");
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index f608f6554b95..2c9b762925c7 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -642,7 +642,7 @@ retry:
goto retry;
if (folio)
- block_commit_write(&folio->page, from, to);
+ block_commit_write(folio, from, to);
out:
if (folio) {
folio_unlock(folio);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index bcb96caf77c0..1dc09ed5d403 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -182,6 +182,8 @@ void ext4_evict_inode(struct inode *inode)
trace_ext4_evict_inode(inode);
+ dax_break_layout_final(inode);
+
if (EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)
ext4_evict_ea_inode(inode);
if (inode->i_nlink) {
@@ -3981,24 +3983,10 @@ static void ext4_wait_dax_page(struct inode *inode)
int ext4_break_layouts(struct inode *inode)
{
- struct page *page;
- int error;
-
if (WARN_ON_ONCE(!rwsem_is_locked(&inode->i_mapping->invalidate_lock)))
return -EINVAL;
- do {
- page = dax_layout_busy_page(inode->i_mapping);
- if (!page)
- return 0;
-
- error = ___wait_var_event(&page->_refcount,
- atomic_read(&page->_refcount) == 1,
- TASK_INTERRUPTIBLE, 0, 0,
- ext4_wait_dax_page(inode));
- } while (error == 0);
-
- return error;
+ return dax_break_layout_inode(inode, ext4_wait_dax_page);
}
/*
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 898443e98efc..48649be64d6a 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -399,7 +399,7 @@ data_copy:
bh = bh->b_this_page;
}
- block_commit_write(&folio[0]->page, from, from + replaced_size);
+ block_commit_write(folio[0], from, from + replaced_size);
/* Even in case of data=writeback it is reasonable to pin
* inode to transaction, to prevent unexpected data loss */
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 8122d4ffb3b5..181934499624 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -5680,7 +5680,7 @@ failed_mount3:
/* flush s_sb_upd_work before sbi destroy */
flush_work(&sbi->s_sb_upd_work);
ext4_stop_mmpd(sbi);
- del_timer_sync(&sbi->s_err_report);
+ timer_delete_sync(&sbi->s_err_report);
ext4_group_desc_free(sbi);
failed_mount:
#if IS_ENABLED(CONFIG_UNICODE)
diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
index 0b6ee6dd1fd6..0502bf3cdf6a 100644
--- a/fs/fuse/dax.c
+++ b/fs/fuse/dax.c
@@ -666,36 +666,12 @@ static void fuse_wait_dax_page(struct inode *inode)
filemap_invalidate_lock(inode->i_mapping);
}
-/* Should be called with mapping->invalidate_lock held exclusively */
-static int __fuse_dax_break_layouts(struct inode *inode, bool *retry,
- loff_t start, loff_t end)
-{
- struct page *page;
-
- page = dax_layout_busy_page_range(inode->i_mapping, start, end);
- if (!page)
- return 0;
-
- *retry = true;
- return ___wait_var_event(&page->_refcount,
- atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
- 0, 0, fuse_wait_dax_page(inode));
-}
-
-/* dmap_end == 0 leads to unmapping of whole file */
+/* Should be called with mapping->invalidate_lock held exclusively. */
int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start,
u64 dmap_end)
{
- bool retry;
- int ret;
-
- do {
- retry = false;
- ret = __fuse_dax_break_layouts(inode, &retry, dmap_start,
- dmap_end);
- } while (ret == 0 && retry);
-
- return ret;
+ return dax_break_layout(inode, dmap_start, dmap_end,
+ fuse_wait_dax_page);
}
ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to)
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 51e31df4c546..6dcbaa218b7a 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -32,6 +32,100 @@ MODULE_ALIAS("devname:fuse");
static struct kmem_cache *fuse_req_cachep;
+const unsigned long fuse_timeout_timer_freq =
+ secs_to_jiffies(FUSE_TIMEOUT_TIMER_FREQ);
+
+bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list)
+{
+ struct fuse_req *req;
+
+ req = list_first_entry_or_null(list, struct fuse_req, list);
+ if (!req)
+ return false;
+ return time_is_before_jiffies(req->create_time + fc->timeout.req_timeout);
+}
+
+bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing)
+{
+ int i;
+
+ for (i = 0; i < FUSE_PQ_HASH_SIZE; i++)
+ if (fuse_request_expired(fc, &processing[i]))
+ return true;
+
+ return false;
+}
+
+/*
+ * Check if any requests aren't being completed by the time the request timeout
+ * elapses. To do so, we:
+ * - check the fiq pending list
+ * - check the bg queue
+ * - check the fpq io and processing lists
+ *
+ * To make this fast, we only check against the head request on each list since
+ * these are generally queued in order of creation time (eg newer requests get
+ * queued to the tail). We might miss a few edge cases (eg requests transitioning
+ * between lists, re-sent requests at the head of the pending list having a
+ * later creation time than other requests on that list, etc.) but that is fine
+ * since if the request never gets fulfilled, it will eventually be caught.
+ */
+void fuse_check_timeout(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct fuse_conn *fc = container_of(dwork, struct fuse_conn,
+ timeout.work);
+ struct fuse_iqueue *fiq = &fc->iq;
+ struct fuse_dev *fud;
+ struct fuse_pqueue *fpq;
+ bool expired = false;
+
+ if (!atomic_read(&fc->num_waiting))
+ goto out;
+
+ spin_lock(&fiq->lock);
+ expired = fuse_request_expired(fc, &fiq->pending);
+ spin_unlock(&fiq->lock);
+ if (expired)
+ goto abort_conn;
+
+ spin_lock(&fc->bg_lock);
+ expired = fuse_request_expired(fc, &fc->bg_queue);
+ spin_unlock(&fc->bg_lock);
+ if (expired)
+ goto abort_conn;
+
+ spin_lock(&fc->lock);
+ if (!fc->connected) {
+ spin_unlock(&fc->lock);
+ return;
+ }
+ list_for_each_entry(fud, &fc->devices, entry) {
+ fpq = &fud->pq;
+ spin_lock(&fpq->lock);
+ if (fuse_request_expired(fc, &fpq->io) ||
+ fuse_fpq_processing_expired(fc, fpq->processing)) {
+ spin_unlock(&fpq->lock);
+ spin_unlock(&fc->lock);
+ goto abort_conn;
+ }
+
+ spin_unlock(&fpq->lock);
+ }
+ spin_unlock(&fc->lock);
+
+ if (fuse_uring_request_expired(fc))
+ goto abort_conn;
+
+out:
+ queue_delayed_work(system_wq, &fc->timeout.work,
+ fuse_timeout_timer_freq);
+ return;
+
+abort_conn:
+ fuse_abort_conn(fc);
+}
+
static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
{
INIT_LIST_HEAD(&req->list);
@@ -40,6 +134,7 @@ static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
refcount_set(&req->count, 1);
__set_bit(FR_PENDING, &req->flags);
req->fm = fm;
+ req->create_time = jiffies;
}
static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags)
@@ -407,6 +502,24 @@ static int queue_interrupt(struct fuse_req *req)
return 0;
}
+bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock)
+{
+ spin_lock(lock);
+ if (test_bit(FR_PENDING, &req->flags)) {
+ /*
+ * FR_PENDING does not get cleared as the request will end
+ * up in destruction anyway.
+ */
+ list_del(&req->list);
+ spin_unlock(lock);
+ __fuse_put_request(req);
+ req->out.h.error = -EINTR;
+ return true;
+ }
+ spin_unlock(lock);
+ return false;
+}
+
static void request_wait_answer(struct fuse_req *req)
{
struct fuse_conn *fc = req->fm->fc;
@@ -428,22 +541,20 @@ static void request_wait_answer(struct fuse_req *req)
}
if (!test_bit(FR_FORCE, &req->flags)) {
+ bool removed;
+
/* Only fatal signals may interrupt this */
err = wait_event_killable(req->waitq,
test_bit(FR_FINISHED, &req->flags));
if (!err)
return;
- spin_lock(&fiq->lock);
- /* Request is not yet in userspace, bail out */
- if (test_bit(FR_PENDING, &req->flags)) {
- list_del(&req->list);
- spin_unlock(&fiq->lock);
- __fuse_put_request(req);
- req->out.h.error = -EINTR;
+ if (test_bit(FR_URING, &req->flags))
+ removed = fuse_uring_remove_pending_req(req);
+ else
+ removed = fuse_remove_pending_req(req, &fiq->lock);
+ if (removed)
return;
- }
- spin_unlock(&fiq->lock);
}
/*
@@ -1533,14 +1644,10 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_inval_entry_out outarg;
- int err = -ENOMEM;
- char *buf;
+ int err;
+ char *buf = NULL;
struct qstr name;
- buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
- if (!buf)
- goto err;
-
err = -EINVAL;
if (size < sizeof(outarg))
goto err;
@@ -1550,13 +1657,18 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
goto err;
err = -ENAMETOOLONG;
- if (outarg.namelen > FUSE_NAME_MAX)
+ if (outarg.namelen > fc->name_max)
goto err;
err = -EINVAL;
if (size != sizeof(outarg) + outarg.namelen + 1)
goto err;
+ err = -ENOMEM;
+ buf = kzalloc(outarg.namelen + 1, GFP_KERNEL);
+ if (!buf)
+ goto err;
+
name.name = buf;
name.len = outarg.namelen;
err = fuse_copy_one(cs, buf, outarg.namelen + 1);
@@ -1581,14 +1693,10 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_delete_out outarg;
- int err = -ENOMEM;
- char *buf;
+ int err;
+ char *buf = NULL;
struct qstr name;
- buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
- if (!buf)
- goto err;
-
err = -EINVAL;
if (size < sizeof(outarg))
goto err;
@@ -1598,13 +1706,18 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
goto err;
err = -ENAMETOOLONG;
- if (outarg.namelen > FUSE_NAME_MAX)
+ if (outarg.namelen > fc->name_max)
goto err;
err = -EINVAL;
if (size != sizeof(outarg) + outarg.namelen + 1)
goto err;
+ err = -ENOMEM;
+ buf = kzalloc(outarg.namelen + 1, GFP_KERNEL);
+ if (!buf)
+ goto err;
+
name.name = buf;
name.len = outarg.namelen;
err = fuse_copy_one(cs, buf, outarg.namelen + 1);
@@ -2275,6 +2388,9 @@ void fuse_abort_conn(struct fuse_conn *fc)
LIST_HEAD(to_end);
unsigned int i;
+ if (fc->timeout.req_timeout)
+ cancel_delayed_work(&fc->timeout.work);
+
/* Background queuing checks fc->connected under bg_lock */
spin_lock(&fc->bg_lock);
fc->connected = 0;
diff --git a/fs/fuse/dev_uring.c b/fs/fuse/dev_uring.c
index 82bf458fa9db..accdce2977c5 100644
--- a/fs/fuse/dev_uring.c
+++ b/fs/fuse/dev_uring.c
@@ -140,6 +140,33 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring)
}
}
+bool fuse_uring_request_expired(struct fuse_conn *fc)
+{
+ struct fuse_ring *ring = fc->ring;
+ struct fuse_ring_queue *queue;
+ int qid;
+
+ if (!ring)
+ return false;
+
+ for (qid = 0; qid < ring->nr_queues; qid++) {
+ queue = READ_ONCE(ring->queues[qid]);
+ if (!queue)
+ continue;
+
+ spin_lock(&queue->lock);
+ if (fuse_request_expired(fc, &queue->fuse_req_queue) ||
+ fuse_request_expired(fc, &queue->fuse_req_bg_queue) ||
+ fuse_fpq_processing_expired(fc, queue->fpq.processing)) {
+ spin_unlock(&queue->lock);
+ return true;
+ }
+ spin_unlock(&queue->lock);
+ }
+
+ return false;
+}
+
void fuse_uring_destruct(struct fuse_conn *fc)
{
struct fuse_ring *ring = fc->ring;
@@ -211,7 +238,6 @@ static struct fuse_ring *fuse_uring_create(struct fuse_conn *fc)
ring->nr_queues = nr_queues;
ring->fc = fc;
ring->max_payload_sz = max_payload_size;
- atomic_set(&ring->queue_refs, 0);
smp_store_release(&fc->ring, ring);
spin_unlock(&fc->lock);
@@ -726,8 +752,6 @@ static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent,
struct fuse_req *req)
{
struct fuse_ring_queue *queue = ent->queue;
- struct fuse_conn *fc = req->fm->fc;
- struct fuse_iqueue *fiq = &fc->iq;
lockdep_assert_held(&queue->lock);
@@ -737,9 +761,7 @@ static void fuse_uring_add_req_to_ring_ent(struct fuse_ring_ent *ent,
ent->state);
}
- spin_lock(&fiq->lock);
clear_bit(FR_PENDING, &req->flags);
- spin_unlock(&fiq->lock);
ent->fuse_req = req;
ent->state = FRRS_FUSE_REQ;
list_move(&ent->list, &queue->ent_w_req_queue);
@@ -1238,6 +1260,8 @@ void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req)
if (unlikely(queue->stopped))
goto err_unlock;
+ set_bit(FR_URING, &req->flags);
+ req->ring_queue = queue;
ent = list_first_entry_or_null(&queue->ent_avail_queue,
struct fuse_ring_ent, list);
if (ent)
@@ -1276,6 +1300,8 @@ bool fuse_uring_queue_bq_req(struct fuse_req *req)
return false;
}
+ set_bit(FR_URING, &req->flags);
+ req->ring_queue = queue;
list_add_tail(&req->list, &queue->fuse_req_bg_queue);
ent = list_first_entry_or_null(&queue->ent_avail_queue,
@@ -1306,6 +1332,13 @@ bool fuse_uring_queue_bq_req(struct fuse_req *req)
return true;
}
+bool fuse_uring_remove_pending_req(struct fuse_req *req)
+{
+ struct fuse_ring_queue *queue = req->ring_queue;
+
+ return fuse_remove_pending_req(req, &queue->lock);
+}
+
static const struct fuse_iqueue_ops fuse_io_uring_ops = {
/* should be send over io-uring as enhancement */
.send_forget = fuse_dev_queue_forget,
diff --git a/fs/fuse/dev_uring_i.h b/fs/fuse/dev_uring_i.h
index 2102b3d0c1ae..51a563922ce1 100644
--- a/fs/fuse/dev_uring_i.h
+++ b/fs/fuse/dev_uring_i.h
@@ -142,6 +142,8 @@ void fuse_uring_abort_end_requests(struct fuse_ring *ring);
int fuse_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags);
void fuse_uring_queue_fuse_req(struct fuse_iqueue *fiq, struct fuse_req *req);
bool fuse_uring_queue_bq_req(struct fuse_req *req);
+bool fuse_uring_remove_pending_req(struct fuse_req *req);
+bool fuse_uring_request_expired(struct fuse_conn *fc);
static inline void fuse_uring_abort(struct fuse_conn *fc)
{
@@ -172,12 +174,6 @@ static inline bool fuse_uring_ready(struct fuse_conn *fc)
#else /* CONFIG_FUSE_IO_URING */
-struct fuse_ring;
-
-static inline void fuse_uring_create(struct fuse_conn *fc)
-{
-}
-
static inline void fuse_uring_destruct(struct fuse_conn *fc)
{
}
@@ -200,6 +196,16 @@ static inline bool fuse_uring_ready(struct fuse_conn *fc)
return false;
}
+static inline bool fuse_uring_remove_pending_req(struct fuse_req *req)
+{
+ return false;
+}
+
+static inline bool fuse_uring_request_expired(struct fuse_conn *fc)
+{
+ return false;
+}
+
#endif /* CONFIG_FUSE_IO_URING */
#endif /* _FS_FUSE_DEV_URING_I_H */
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index fa8f1141ea74..83ac192e7fdd 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -370,7 +370,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
*inode = NULL;
err = -ENAMETOOLONG;
- if (name->len > FUSE_NAME_MAX)
+ if (name->len > fm->fc->name_max)
goto out;
@@ -1137,6 +1137,9 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
struct fuse_mount *fm = get_fuse_mount(inode);
FUSE_ARGS(args);
+ if (fm->fc->no_link)
+ goto out;
+
memset(&inarg, 0, sizeof(inarg));
inarg.oldnodeid = get_node_id(inode);
args.opcode = FUSE_LINK;
@@ -1151,6 +1154,12 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
else if (err == -EINTR)
fuse_invalidate_attr(inode);
+ if (err == -ENOSYS)
+ fm->fc->no_link = 1;
+out:
+ if (fm->fc->no_link)
+ return -EPERM;
+
return err;
}
@@ -1954,7 +1963,7 @@ int fuse_do_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
if (FUSE_IS_DAX(inode) && is_truncate) {
filemap_invalidate_lock(mapping);
fault_blocked = true;
- err = fuse_dax_break_layouts(inode, 0, 0);
+ err = fuse_dax_break_layouts(inode, 0, -1);
if (err) {
filemap_invalidate_unlock(mapping);
return err;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index d63e56fd3dd2..754378dd9f71 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -253,7 +253,7 @@ static int fuse_open(struct inode *inode, struct file *file)
if (dax_truncate) {
filemap_invalidate_lock(inode->i_mapping);
- err = fuse_dax_break_layouts(inode, 0, 0);
+ err = fuse_dax_break_layouts(inode, 0, -1);
if (err)
goto out_inode_unlock;
}
@@ -3205,7 +3205,7 @@ static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
inode_lock(inode);
if (block_faults) {
filemap_invalidate_lock(inode->i_mapping);
- err = fuse_dax_break_layouts(inode, 0, 0);
+ err = fuse_dax_break_layouts(inode, 0, -1);
if (err)
goto out;
}
diff --git a/fs/fuse/fuse_dev_i.h b/fs/fuse/fuse_dev_i.h
index 3b2bfe1248d3..b3c2e32254ba 100644
--- a/fs/fuse/fuse_dev_i.h
+++ b/fs/fuse/fuse_dev_i.h
@@ -61,6 +61,10 @@ int fuse_copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args,
void fuse_dev_queue_forget(struct fuse_iqueue *fiq,
struct fuse_forget_link *forget);
void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req);
+bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock);
+
+bool fuse_request_expired(struct fuse_conn *fc, struct list_head *list);
+bool fuse_fpq_processing_expired(struct fuse_conn *fc, struct list_head *processing);
#endif
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index fee96fe7887b..d56d4fd956db 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -38,14 +38,34 @@
/** Bias for fi->writectr, meaning new writepages must not be sent */
#define FUSE_NOWRITE INT_MIN
-/** It could be as large as PATH_MAX, but would that have any uses? */
-#define FUSE_NAME_MAX 1024
+/** Maximum length of a filename, not including terminating null */
+
+/* maximum, small enough for FUSE_MIN_READ_BUFFER*/
+#define FUSE_NAME_LOW_MAX 1024
+/* maximum, but needs a request buffer > FUSE_MIN_READ_BUFFER */
+#define FUSE_NAME_MAX (PATH_MAX - 1)
/** Number of dentries for each connection in the control filesystem */
#define FUSE_CTL_NUM_DENTRIES 5
+/* Frequency (in seconds) of request timeout checks, if opted into */
+#define FUSE_TIMEOUT_TIMER_FREQ 15
+
+/** Frequency (in jiffies) of request timeout checks, if opted into */
+extern const unsigned long fuse_timeout_timer_freq;
+
/** Maximum of max_pages received in init_out */
extern unsigned int fuse_max_pages_limit;
+/*
+ * Default timeout (in seconds) for the server to reply to a request
+ * before the connection is aborted, if no timeout was specified on mount.
+ */
+extern unsigned int fuse_default_req_timeout;
+/*
+ * Max timeout (in seconds) for the server to reply to a request before
+ * the connection is aborted.
+ */
+extern unsigned int fuse_max_req_timeout;
/** List of active connections */
extern struct list_head fuse_conn_list;
@@ -378,6 +398,7 @@ struct fuse_io_priv {
* FR_FINISHED: request is finished
* FR_PRIVATE: request is on private list
* FR_ASYNC: request is asynchronous
+ * FR_URING: request is handled through fuse-io-uring
*/
enum fuse_req_flag {
FR_ISREPLY,
@@ -392,6 +413,7 @@ enum fuse_req_flag {
FR_FINISHED,
FR_PRIVATE,
FR_ASYNC,
+ FR_URING,
};
/**
@@ -441,7 +463,10 @@ struct fuse_req {
#ifdef CONFIG_FUSE_IO_URING
void *ring_entry;
+ void *ring_queue;
#endif
+ /** When (in jiffies) the request was created */
+ unsigned long create_time;
};
struct fuse_iqueue;
@@ -867,6 +892,9 @@ struct fuse_conn {
/* Use pages instead of pointer for kernel I/O */
unsigned int use_pages_for_kvec_io:1;
+ /* Is link not implemented by fs? */
+ unsigned int no_link:1;
+
/* Use io_uring for communication */
unsigned int io_uring;
@@ -900,6 +928,9 @@ struct fuse_conn {
/** Version counter for evict inode */
atomic64_t evict_ctr;
+ /* maximum file name length */
+ u32 name_max;
+
/** Called on final put */
void (*release)(struct fuse_conn *);
@@ -935,6 +966,15 @@ struct fuse_conn {
/** uring connection information*/
struct fuse_ring *ring;
#endif
+
+ /** Only used if the connection opts into request timeouts */
+ struct {
+ /* Worker for checking if any requests have timed out */
+ struct delayed_work work;
+
+ /* Request timeout (in jiffies). 0 = no timeout */
+ unsigned int req_timeout;
+ } timeout;
};
/*
@@ -1216,6 +1256,9 @@ void fuse_request_end(struct fuse_req *req);
void fuse_abort_conn(struct fuse_conn *fc);
void fuse_wait_aborted(struct fuse_conn *fc);
+/* Check if any requests timed out */
+void fuse_check_timeout(struct work_struct *work);
+
/**
* Invalidate inode attributes
*/
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e9db2cb8c150..fd48e8d37f2e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -37,6 +37,9 @@ DEFINE_MUTEX(fuse_mutex);
static int set_global_limit(const char *val, const struct kernel_param *kp);
unsigned int fuse_max_pages_limit = 256;
+/* default is no timeout */
+unsigned int fuse_default_req_timeout;
+unsigned int fuse_max_req_timeout;
unsigned max_user_bgreq;
module_param_call(max_user_bgreq, set_global_limit, param_get_uint,
@@ -979,6 +982,8 @@ void fuse_conn_init(struct fuse_conn *fc, struct fuse_mount *fm,
fc->user_ns = get_user_ns(user_ns);
fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ;
fc->max_pages_limit = fuse_max_pages_limit;
+ fc->name_max = FUSE_NAME_LOW_MAX;
+ fc->timeout.req_timeout = 0;
if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH))
fuse_backing_files_init(fc);
@@ -1007,6 +1012,8 @@ void fuse_conn_put(struct fuse_conn *fc)
if (IS_ENABLED(CONFIG_FUSE_DAX))
fuse_dax_conn_free(fc);
+ if (fc->timeout.req_timeout)
+ cancel_delayed_work_sync(&fc->timeout.work);
if (fiq->ops->release)
fiq->ops->release(fiq);
put_pid_ns(fc->pid_ns);
@@ -1257,6 +1264,34 @@ static void process_init_limits(struct fuse_conn *fc, struct fuse_init_out *arg)
spin_unlock(&fc->bg_lock);
}
+static void set_request_timeout(struct fuse_conn *fc, unsigned int timeout)
+{
+ fc->timeout.req_timeout = secs_to_jiffies(timeout);
+ INIT_DELAYED_WORK(&fc->timeout.work, fuse_check_timeout);
+ queue_delayed_work(system_wq, &fc->timeout.work,
+ fuse_timeout_timer_freq);
+}
+
+static void init_server_timeout(struct fuse_conn *fc, unsigned int timeout)
+{
+ if (!timeout && !fuse_max_req_timeout && !fuse_default_req_timeout)
+ return;
+
+ if (!timeout)
+ timeout = fuse_default_req_timeout;
+
+ if (fuse_max_req_timeout) {
+ if (timeout)
+ timeout = min(fuse_max_req_timeout, timeout);
+ else
+ timeout = fuse_max_req_timeout;
+ }
+
+ timeout = max(FUSE_TIMEOUT_TIMER_FREQ, timeout);
+
+ set_request_timeout(fc, timeout);
+}
+
struct fuse_init_args {
struct fuse_args args;
struct fuse_init_in in;
@@ -1275,6 +1310,7 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
ok = false;
else {
unsigned long ra_pages;
+ unsigned int timeout = 0;
process_init_limits(fc, arg);
@@ -1338,6 +1374,13 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
fc->max_pages =
min_t(unsigned int, fc->max_pages_limit,
max_t(unsigned int, arg->max_pages, 1));
+
+ /*
+ * PATH_MAX file names might need two pages for
+ * ops like rename
+ */
+ if (fc->max_pages > 1)
+ fc->name_max = FUSE_NAME_MAX;
}
if (IS_ENABLED(CONFIG_FUSE_DAX)) {
if (flags & FUSE_MAP_ALIGNMENT &&
@@ -1392,12 +1435,17 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
}
if (flags & FUSE_OVER_IO_URING && fuse_uring_enabled())
fc->io_uring = 1;
+
+ if (flags & FUSE_REQUEST_TIMEOUT)
+ timeout = arg->request_timeout;
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
fc->no_flock = 1;
}
+ init_server_timeout(fc, timeout);
+
fm->sb->s_bdi->ra_pages =
min(fm->sb->s_bdi->ra_pages, ra_pages);
fc->minor = arg->minor;
@@ -1439,7 +1487,8 @@ void fuse_send_init(struct fuse_mount *fm)
FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP |
FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP |
- FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP;
+ FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | FUSE_ALLOW_IDMAP |
+ FUSE_REQUEST_TIMEOUT;
#ifdef CONFIG_FUSE_DAX
if (fm->fc->dax)
flags |= FUSE_MAP_ALIGNMENT;
diff --git a/fs/fuse/sysctl.c b/fs/fuse/sysctl.c
index 63fb1e5bee30..e2d921abcb88 100644
--- a/fs/fuse/sysctl.c
+++ b/fs/fuse/sysctl.c
@@ -13,6 +13,12 @@ static struct ctl_table_header *fuse_table_header;
/* Bound by fuse_init_out max_pages, which is a u16 */
static unsigned int sysctl_fuse_max_pages_limit = 65535;
+/*
+ * fuse_init_out request timeouts are u16.
+ * This goes up to ~18 hours, which is plenty for a timeout.
+ */
+static unsigned int sysctl_fuse_req_timeout_limit = 65535;
+
static const struct ctl_table fuse_sysctl_table[] = {
{
.procname = "max_pages_limit",
@@ -23,6 +29,24 @@ static const struct ctl_table fuse_sysctl_table[] = {
.extra1 = SYSCTL_ONE,
.extra2 = &sysctl_fuse_max_pages_limit,
},
+ {
+ .procname = "default_request_timeout",
+ .data = &fuse_default_req_timeout,
+ .maxlen = sizeof(fuse_default_req_timeout),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sysctl_fuse_req_timeout_limit,
+ },
+ {
+ .procname = "max_request_timeout",
+ .data = &fuse_max_req_timeout,
+ .maxlen = sizeof(fuse_max_req_timeout),
+ .mode = 0644,
+ .proc_handler = proc_douintvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = &sysctl_fuse_req_timeout_limit,
+ },
};
int fuse_sysctl_register(void)
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 82afe78ec542..2c7b24cb67ad 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -1017,8 +1017,7 @@ static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
if (kaddr)
*kaddr = fs->window_kaddr + offset;
if (pfn)
- *pfn = phys_to_pfn_t(fs->window_phys_addr + offset,
- PFN_DEV | PFN_MAP);
+ *pfn = phys_to_pfn_t(fs->window_phys_addr + offset, 0);
return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
}
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index be7f87a8e11a..7bd231d16d4a 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -4,7 +4,6 @@ config GFS2_FS
select BUFFER_HEAD
select FS_POSIX_ACL
select CRC32
- select LIBCRC32C
select QUOTACTL
select FS_IOMAP
help
diff --git a/fs/hostfs/hostfs.h b/fs/hostfs/hostfs.h
index 8b39c15c408c..15b2f094d36e 100644
--- a/fs/hostfs/hostfs.h
+++ b/fs/hostfs/hostfs.h
@@ -60,7 +60,7 @@ struct hostfs_stat {
unsigned int uid;
unsigned int gid;
unsigned long long size;
- struct hostfs_timespec atime, mtime, ctime;
+ struct hostfs_timespec atime, mtime, ctime, btime;
unsigned int blksize;
unsigned long long blocks;
struct {
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index a2c6b9051c5b..702c41317589 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -33,6 +33,7 @@ struct hostfs_inode_info {
struct inode vfs_inode;
struct mutex open_mutex;
dev_t dev;
+ struct hostfs_timespec btime;
};
static inline struct hostfs_inode_info *HOSTFS_I(struct inode *inode)
@@ -547,6 +548,7 @@ static int hostfs_inode_set(struct inode *ino, void *data)
}
HOSTFS_I(ino)->dev = dev;
+ HOSTFS_I(ino)->btime = st->btime;
ino->i_ino = st->ino;
ino->i_mode = st->mode;
return hostfs_inode_update(ino, st);
@@ -557,7 +559,10 @@ static int hostfs_inode_test(struct inode *inode, void *data)
const struct hostfs_stat *st = data;
dev_t dev = MKDEV(st->dev.maj, st->dev.min);
- return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev;
+ return inode->i_ino == st->ino && HOSTFS_I(inode)->dev == dev &&
+ (inode->i_mode & S_IFMT) == (st->mode & S_IFMT) &&
+ HOSTFS_I(inode)->btime.tv_sec == st->btime.tv_sec &&
+ HOSTFS_I(inode)->btime.tv_nsec == st->btime.tv_nsec;
}
static struct inode *hostfs_iget(struct super_block *sb, char *name)
diff --git a/fs/hostfs/hostfs_user.c b/fs/hostfs/hostfs_user.c
index 97e9c40a9448..3bcd9f35e70b 100644
--- a/fs/hostfs/hostfs_user.c
+++ b/fs/hostfs/hostfs_user.c
@@ -18,39 +18,48 @@
#include "hostfs.h"
#include <utime.h>
-static void stat64_to_hostfs(const struct stat64 *buf, struct hostfs_stat *p)
+static void statx_to_hostfs(const struct statx *buf, struct hostfs_stat *p)
{
- p->ino = buf->st_ino;
- p->mode = buf->st_mode;
- p->nlink = buf->st_nlink;
- p->uid = buf->st_uid;
- p->gid = buf->st_gid;
- p->size = buf->st_size;
- p->atime.tv_sec = buf->st_atime;
- p->atime.tv_nsec = 0;
- p->ctime.tv_sec = buf->st_ctime;
- p->ctime.tv_nsec = 0;
- p->mtime.tv_sec = buf->st_mtime;
- p->mtime.tv_nsec = 0;
- p->blksize = buf->st_blksize;
- p->blocks = buf->st_blocks;
- p->rdev.maj = os_major(buf->st_rdev);
- p->rdev.min = os_minor(buf->st_rdev);
- p->dev.maj = os_major(buf->st_dev);
- p->dev.min = os_minor(buf->st_dev);
+ p->ino = buf->stx_ino;
+ p->mode = buf->stx_mode;
+ p->nlink = buf->stx_nlink;
+ p->uid = buf->stx_uid;
+ p->gid = buf->stx_gid;
+ p->size = buf->stx_size;
+ p->atime.tv_sec = buf->stx_atime.tv_sec;
+ p->atime.tv_nsec = buf->stx_atime.tv_nsec;
+ p->ctime.tv_sec = buf->stx_ctime.tv_sec;
+ p->ctime.tv_nsec = buf->stx_ctime.tv_nsec;
+ p->mtime.tv_sec = buf->stx_mtime.tv_sec;
+ p->mtime.tv_nsec = buf->stx_mtime.tv_nsec;
+ if (buf->stx_mask & STATX_BTIME) {
+ p->btime.tv_sec = buf->stx_btime.tv_sec;
+ p->btime.tv_nsec = buf->stx_btime.tv_nsec;
+ } else {
+ memset(&p->btime, 0, sizeof(p->btime));
+ }
+ p->blksize = buf->stx_blksize;
+ p->blocks = buf->stx_blocks;
+ p->rdev.maj = buf->stx_rdev_major;
+ p->rdev.min = buf->stx_rdev_minor;
+ p->dev.maj = buf->stx_dev_major;
+ p->dev.min = buf->stx_dev_minor;
}
int stat_file(const char *path, struct hostfs_stat *p, int fd)
{
- struct stat64 buf;
+ struct statx buf;
+ int flags = AT_SYMLINK_NOFOLLOW;
if (fd >= 0) {
- if (fstat64(fd, &buf) < 0)
- return -errno;
- } else if (lstat64(path, &buf) < 0) {
- return -errno;
+ flags |= AT_EMPTY_PATH;
+ path = "";
}
- stat64_to_hostfs(&buf, p);
+
+ if ((statx(fd, path, flags, STATX_BASIC_STATS | STATX_BTIME, &buf)) < 0)
+ return -errno;
+
+ statx_to_hostfs(&buf, p);
return 0;
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d98caedbb723..e4de5425838d 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -193,19 +193,21 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
}
/*
- * Someone wants to read @bytes from a HWPOISON hugetlb @page from @offset.
+ * Someone wants to read @bytes from a HWPOISON hugetlb @folio from @offset.
* Returns the maximum number of bytes one can read without touching the 1st raw
- * HWPOISON subpage.
+ * HWPOISON page.
*
* The implementation borrows the iteration logic from copy_page_to_iter*.
*/
-static size_t adjust_range_hwpoison(struct page *page, size_t offset, size_t bytes)
+static size_t adjust_range_hwpoison(struct folio *folio, size_t offset,
+ size_t bytes)
{
+ struct page *page;
size_t n = 0;
size_t res = 0;
- /* First subpage to start the loop. */
- page = nth_page(page, offset / PAGE_SIZE);
+ /* First page to start the loop. */
+ page = folio_page(folio, offset / PAGE_SIZE);
offset %= PAGE_SIZE;
while (1) {
if (is_raw_hwpoison_page_in_hugepage(page))
@@ -278,10 +280,10 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
else {
/*
* Adjust how many bytes safe to read without
- * touching the 1st raw HWPOISON subpage after
+ * touching the 1st raw HWPOISON page after
* offset.
*/
- want = adjust_range_hwpoison(&folio->page, offset, nr);
+ want = adjust_range_hwpoison(folio, offset, nr);
if (want == 0) {
folio_put(folio);
retval = -EIO;
@@ -338,8 +340,8 @@ static void hugetlb_delete_from_page_cache(struct folio *folio)
* mutex for the page in the mapping. So, we can not race with page being
* faulted into the vma.
*/
-static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
- unsigned long addr, struct page *page)
+static bool hugetlb_vma_maps_pfn(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
{
pte_t *ptep, pte;
@@ -351,7 +353,7 @@ static bool hugetlb_vma_maps_page(struct vm_area_struct *vma,
if (huge_pte_none(pte) || !pte_present(pte))
return false;
- if (pte_page(pte) == page)
+ if (pte_pfn(pte) == pfn)
return true;
return false;
@@ -396,7 +398,7 @@ static void hugetlb_unmap_file_folio(struct hstate *h,
{
struct rb_root_cached *root = &mapping->i_mmap;
struct hugetlb_vma_lock *vma_lock;
- struct page *page = &folio->page;
+ unsigned long pfn = folio_pfn(folio);
struct vm_area_struct *vma;
unsigned long v_start;
unsigned long v_end;
@@ -412,7 +414,7 @@ retry:
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
- if (!hugetlb_vma_maps_page(vma, v_start, page))
+ if (!hugetlb_vma_maps_pfn(vma, v_start, pfn))
continue;
if (!hugetlb_vma_trylock_write(vma)) {
@@ -462,7 +464,7 @@ retry:
*/
v_start = vma_offset_start(vma, start);
v_end = vma_offset_end(vma, end);
- if (hugetlb_vma_maps_page(vma, v_start, page))
+ if (hugetlb_vma_maps_pfn(vma, v_start, pfn))
unmap_hugepage_range(vma, v_start, v_end, NULL,
ZAP_FLAG_DROP_MARKER);
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 814b7f679486..31553372b33a 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1480,7 +1480,7 @@ static int iomap_folio_mkwrite_iter(struct iomap_iter *iter,
&iter->iomap);
if (ret)
return ret;
- block_commit_write(&folio->page, 0, length);
+ block_commit_write(folio, 0, length);
} else {
WARN_ON_ONCE(!folio_test_uptodate(folio));
folio_mark_dirty(folio);
diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
index eb2f8273e6f1..09df40b612fb 100644
--- a/fs/isofs/dir.c
+++ b/fs/isofs/dir.c
@@ -147,7 +147,8 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
de = tmpde;
}
/* Basic sanity check, whether name doesn't exceed dir entry */
- if (de_len < de->name_len[0] +
+ if (de_len < sizeof(struct iso_directory_record) ||
+ de_len < de->name_len[0] +
sizeof(struct iso_directory_record)) {
printk(KERN_NOTICE "iso9660: Corrupted directory entry"
" in block %lu of inode %lu\n", block,
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index a5ccba25ff47..743a1d7633cd 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -197,7 +197,7 @@ loop:
if (journal->j_commit_sequence != journal->j_commit_request) {
jbd2_debug(1, "OK, requests differ\n");
write_unlock(&journal->j_state_lock);
- del_timer_sync(&journal->j_commit_timer);
+ timer_delete_sync(&journal->j_commit_timer);
jbd2_journal_commit_transaction(journal);
write_lock(&journal->j_state_lock);
goto loop;
@@ -246,7 +246,7 @@ loop:
goto loop;
end_loop:
- del_timer_sync(&journal->j_commit_timer);
+ timer_delete_sync(&journal->j_commit_timer);
journal->j_task = NULL;
wake_up(&journal->j_wait_done_commit);
jbd2_debug(1, "Journal thread exiting.\n");
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 4061e0ba7010..bb815a002984 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -584,7 +584,7 @@ static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad)
size_t retlen;
/* Nothing to do if not write-buffering the flash. In particular, we shouldn't
- del_timer() the timer we never initialised. */
+ call timer_delete() on the timer we never initialised. */
if (!jffs2_is_writebuffered(c))
return 0;
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index d296aad70800..fc70d72c3fe8 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -17,7 +17,7 @@
#include "kernfs-internal.h"
-static DEFINE_RWLOCK(kernfs_rename_lock); /* kn->parent and ->name */
+DEFINE_RWLOCK(kernfs_rename_lock); /* kn->parent and ->name */
/*
* Don't use rename_lock to piggy back on pr_cont_buf. We don't want to
* call pr_cont() while holding rename_lock. Because sometimes pr_cont()
@@ -51,22 +51,14 @@ static bool kernfs_lockdep(struct kernfs_node *kn)
#endif
}
-static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen)
-{
- if (!kn)
- return strscpy(buf, "(null)", buflen);
-
- return strscpy(buf, kn->parent ? kn->name : "/", buflen);
-}
-
/* kernfs_node_depth - compute depth from @from to @to */
static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to)
{
size_t depth = 0;
- while (to->parent && to != from) {
+ while (rcu_dereference(to->__parent) && to != from) {
depth++;
- to = to->parent;
+ to = rcu_dereference(to->__parent);
}
return depth;
}
@@ -84,18 +76,18 @@ static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a,
db = kernfs_depth(rb->kn, b);
while (da > db) {
- a = a->parent;
+ a = rcu_dereference(a->__parent);
da--;
}
while (db > da) {
- b = b->parent;
+ b = rcu_dereference(b->__parent);
db--;
}
/* worst case b and a will be the same at root */
while (b != a) {
- b = b->parent;
- a = a->parent;
+ b = rcu_dereference(b->__parent);
+ a = rcu_dereference(a->__parent);
}
return a;
@@ -168,10 +160,13 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
/* Calculate how many bytes we need for the rest */
for (i = depth_to - 1; i >= 0; i--) {
+ const char *name;
+
for (kn = kn_to, j = 0; j < i; j++)
- kn = kn->parent;
+ kn = rcu_dereference(kn->__parent);
- len += scnprintf(buf + len, buflen - len, "/%s", kn->name);
+ name = rcu_dereference(kn->name);
+ len += scnprintf(buf + len, buflen - len, "/%s", name);
}
return len;
@@ -195,13 +190,18 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
*/
int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
{
- unsigned long flags;
- int ret;
+ struct kernfs_node *kn_parent;
- read_lock_irqsave(&kernfs_rename_lock, flags);
- ret = kernfs_name_locked(kn, buf, buflen);
- read_unlock_irqrestore(&kernfs_rename_lock, flags);
- return ret;
+ if (!kn)
+ return strscpy(buf, "(null)", buflen);
+
+ guard(rcu)();
+ /*
+ * KERNFS_ROOT_INVARIANT_PARENT is ignored here. The name is RCU freed and
+ * the parent is either existing or not.
+ */
+ kn_parent = rcu_dereference(kn->__parent);
+ return strscpy(buf, kn_parent ? rcu_dereference(kn->name) : "/", buflen);
}
/**
@@ -223,13 +223,17 @@ int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen)
int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from,
char *buf, size_t buflen)
{
- unsigned long flags;
- int ret;
+ struct kernfs_root *root;
- read_lock_irqsave(&kernfs_rename_lock, flags);
- ret = kernfs_path_from_node_locked(to, from, buf, buflen);
- read_unlock_irqrestore(&kernfs_rename_lock, flags);
- return ret;
+ guard(rcu)();
+ if (to) {
+ root = kernfs_root(to);
+ if (!(root->flags & KERNFS_ROOT_INVARIANT_PARENT)) {
+ guard(read_lock_irqsave)(&kernfs_rename_lock);
+ return kernfs_path_from_node_locked(to, from, buf, buflen);
+ }
+ }
+ return kernfs_path_from_node_locked(to, from, buf, buflen);
}
EXPORT_SYMBOL_GPL(kernfs_path_from_node);
@@ -295,7 +299,7 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
unsigned long flags;
read_lock_irqsave(&kernfs_rename_lock, flags);
- parent = kn->parent;
+ parent = kernfs_parent(kn);
kernfs_get(parent);
read_unlock_irqrestore(&kernfs_rename_lock, flags);
@@ -336,13 +340,13 @@ static int kernfs_name_compare(unsigned int hash, const char *name,
return -1;
if (ns > kn->ns)
return 1;
- return strcmp(name, kn->name);
+ return strcmp(name, kernfs_rcu_name(kn));
}
static int kernfs_sd_compare(const struct kernfs_node *left,
const struct kernfs_node *right)
{
- return kernfs_name_compare(left->hash, left->name, left->ns, right);
+ return kernfs_name_compare(left->hash, kernfs_rcu_name(left), left->ns, right);
}
/**
@@ -360,8 +364,12 @@ static int kernfs_sd_compare(const struct kernfs_node *left,
*/
static int kernfs_link_sibling(struct kernfs_node *kn)
{
- struct rb_node **node = &kn->parent->dir.children.rb_node;
struct rb_node *parent = NULL;
+ struct kernfs_node *kn_parent;
+ struct rb_node **node;
+
+ kn_parent = kernfs_parent(kn);
+ node = &kn_parent->dir.children.rb_node;
while (*node) {
struct kernfs_node *pos;
@@ -380,13 +388,13 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
/* add new node and rebalance the tree */
rb_link_node(&kn->rb, parent, node);
- rb_insert_color(&kn->rb, &kn->parent->dir.children);
+ rb_insert_color(&kn->rb, &kn_parent->dir.children);
/* successfully added, account subdir number */
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (kernfs_type(kn) == KERNFS_DIR)
- kn->parent->dir.subdirs++;
- kernfs_inc_rev(kn->parent);
+ kn_parent->dir.subdirs++;
+ kernfs_inc_rev(kn_parent);
up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
return 0;
@@ -407,16 +415,19 @@ static int kernfs_link_sibling(struct kernfs_node *kn)
*/
static bool kernfs_unlink_sibling(struct kernfs_node *kn)
{
+ struct kernfs_node *kn_parent;
+
if (RB_EMPTY_NODE(&kn->rb))
return false;
+ kn_parent = kernfs_parent(kn);
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
if (kernfs_type(kn) == KERNFS_DIR)
- kn->parent->dir.subdirs--;
- kernfs_inc_rev(kn->parent);
+ kn_parent->dir.subdirs--;
+ kernfs_inc_rev(kn_parent);
up_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
- rb_erase(&kn->rb, &kn->parent->dir.children);
+ rb_erase(&kn->rb, &kn_parent->dir.children);
RB_CLEAR_NODE(&kn->rb);
return true;
}
@@ -533,7 +544,8 @@ static void kernfs_free_rcu(struct rcu_head *rcu)
{
struct kernfs_node *kn = container_of(rcu, struct kernfs_node, rcu);
- kfree_const(kn->name);
+ /* If the whole node goes away, then name can't be used outside */
+ kfree_const(rcu_access_pointer(kn->name));
if (kn->iattr) {
simple_xattrs_free(&kn->iattr->xattrs, NULL);
@@ -562,11 +574,12 @@ void kernfs_put(struct kernfs_node *kn)
* Moving/renaming is always done while holding reference.
* kn->parent won't change beneath us.
*/
- parent = kn->parent;
+ parent = kernfs_parent(kn);
WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
"kernfs_put: %s/%s: released with incorrect active_ref %d\n",
- parent ? parent->name : "", kn->name, atomic_read(&kn->active));
+ parent ? rcu_dereference(parent->name) : "",
+ rcu_dereference(kn->name), atomic_read(&kn->active));
if (kernfs_type(kn) == KERNFS_LINK)
kernfs_put(kn->symlink.target_kn);
@@ -643,7 +656,7 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
RB_CLEAR_NODE(&kn->rb);
- kn->name = name;
+ rcu_assign_pointer(kn->name, name);
kn->mode = mode;
kn->flags = flags;
@@ -701,7 +714,7 @@ struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
name, mode, uid, gid, flags);
if (kn) {
kernfs_get(parent);
- kn->parent = parent;
+ rcu_assign_pointer(kn->__parent, parent);
}
return kn;
}
@@ -769,18 +782,20 @@ err_unlock:
*/
int kernfs_add_one(struct kernfs_node *kn)
{
- struct kernfs_node *parent = kn->parent;
- struct kernfs_root *root = kernfs_root(parent);
+ struct kernfs_root *root = kernfs_root(kn);
struct kernfs_iattrs *ps_iattr;
+ struct kernfs_node *parent;
bool has_ns;
int ret;
down_write(&root->kernfs_rwsem);
+ parent = kernfs_parent(kn);
ret = -EINVAL;
has_ns = kernfs_ns_enabled(parent);
if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
- has_ns ? "required" : "invalid", parent->name, kn->name))
+ has_ns ? "required" : "invalid",
+ kernfs_rcu_name(parent), kernfs_rcu_name(kn)))
goto out_unlock;
if (kernfs_type(parent) != KERNFS_DIR)
@@ -790,7 +805,7 @@ int kernfs_add_one(struct kernfs_node *kn)
if (parent->flags & (KERNFS_REMOVING | KERNFS_EMPTY_DIR))
goto out_unlock;
- kn->hash = kernfs_name_hash(kn->name, kn->ns);
+ kn->hash = kernfs_name_hash(kernfs_rcu_name(kn), kn->ns);
ret = kernfs_link_sibling(kn);
if (ret)
@@ -846,7 +861,7 @@ static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
if (has_ns != (bool)ns) {
WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
- has_ns ? "required" : "invalid", parent->name, name);
+ has_ns ? "required" : "invalid", kernfs_rcu_name(parent), name);
return NULL;
}
@@ -949,6 +964,11 @@ struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
return kn;
}
+unsigned int kernfs_root_flags(struct kernfs_node *kn)
+{
+ return kernfs_root(kn)->flags;
+}
+
/**
* kernfs_create_root - create a new kernfs hierarchy
* @scops: optional syscall operations for the hierarchy
@@ -1112,7 +1132,7 @@ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
static int kernfs_dop_revalidate(struct inode *dir, const struct qstr *name,
struct dentry *dentry, unsigned int flags)
{
- struct kernfs_node *kn;
+ struct kernfs_node *kn, *parent;
struct kernfs_root *root;
if (flags & LOOKUP_RCU)
@@ -1120,8 +1140,6 @@ static int kernfs_dop_revalidate(struct inode *dir, const struct qstr *name,
/* Negative hashed dentry? */
if (d_really_is_negative(dentry)) {
- struct kernfs_node *parent;
-
/* If the kernfs parent node has changed discard and
* proceed to ->lookup.
*
@@ -1163,16 +1181,17 @@ static int kernfs_dop_revalidate(struct inode *dir, const struct qstr *name,
if (!kernfs_active(kn))
goto out_bad;
+ parent = kernfs_parent(kn);
/* The kernfs node has been moved? */
- if (kernfs_dentry_node(dentry->d_parent) != kn->parent)
+ if (kernfs_dentry_node(dentry->d_parent) != parent)
goto out_bad;
/* The kernfs node has been renamed */
- if (strcmp(dentry->d_name.name, kn->name) != 0)
+ if (strcmp(dentry->d_name.name, kernfs_rcu_name(kn)) != 0)
goto out_bad;
/* The kernfs node has been moved to a different namespace */
- if (kn->parent && kernfs_ns_enabled(kn->parent) &&
+ if (parent && kernfs_ns_enabled(parent) &&
kernfs_info(dentry->d_sb)->ns != kn->ns)
goto out_bad;
@@ -1365,7 +1384,7 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
return kernfs_leftmost_descendant(rb_to_kn(rbn));
/* no sibling left, visit parent */
- return pos->parent;
+ return kernfs_parent(pos);
}
static void kernfs_activate_one(struct kernfs_node *kn)
@@ -1377,7 +1396,7 @@ static void kernfs_activate_one(struct kernfs_node *kn)
if (kernfs_active(kn) || (kn->flags & (KERNFS_HIDDEN | KERNFS_REMOVING)))
return;
- WARN_ON_ONCE(kn->parent && RB_EMPTY_NODE(&kn->rb));
+ WARN_ON_ONCE(rcu_access_pointer(kn->__parent) && RB_EMPTY_NODE(&kn->rb));
WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS);
atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
@@ -1447,7 +1466,7 @@ void kernfs_show(struct kernfs_node *kn, bool show)
static void __kernfs_remove(struct kernfs_node *kn)
{
- struct kernfs_node *pos;
+ struct kernfs_node *pos, *parent;
/* Short-circuit if non-root @kn has already finished removal. */
if (!kn)
@@ -1459,10 +1478,10 @@ static void __kernfs_remove(struct kernfs_node *kn)
* This is for kernfs_remove_self() which plays with active ref
* after removal.
*/
- if (kn->parent && RB_EMPTY_NODE(&kn->rb))
+ if (kernfs_parent(kn) && RB_EMPTY_NODE(&kn->rb))
return;
- pr_debug("kernfs %s: removing\n", kn->name);
+ pr_debug("kernfs %s: removing\n", kernfs_rcu_name(kn));
/* prevent new usage by marking all nodes removing and deactivating */
pos = NULL;
@@ -1485,14 +1504,14 @@ static void __kernfs_remove(struct kernfs_node *kn)
kernfs_get(pos);
kernfs_drain(pos);
-
+ parent = kernfs_parent(pos);
/*
* kernfs_unlink_sibling() succeeds once per node. Use it
* to decide who's responsible for cleanups.
*/
- if (!pos->parent || kernfs_unlink_sibling(pos)) {
+ if (!parent || kernfs_unlink_sibling(pos)) {
struct kernfs_iattrs *ps_iattr =
- pos->parent ? pos->parent->iattr : NULL;
+ parent ? parent->iattr : NULL;
/* update timestamps on the parent */
down_write(&kernfs_root(kn)->kernfs_iattr_rwsem);
@@ -1718,11 +1737,11 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
{
struct kernfs_node *old_parent;
struct kernfs_root *root;
- const char *old_name = NULL;
+ const char *old_name;
int error;
/* can't move or rename root */
- if (!kn->parent)
+ if (!rcu_access_pointer(kn->__parent))
return -EINVAL;
root = kernfs_root(kn);
@@ -1733,9 +1752,19 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
(new_parent->flags & KERNFS_EMPTY_DIR))
goto out;
+ old_parent = kernfs_parent(kn);
+ if (root->flags & KERNFS_ROOT_INVARIANT_PARENT) {
+ error = -EINVAL;
+ if (WARN_ON_ONCE(old_parent != new_parent))
+ goto out;
+ }
+
error = 0;
- if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
- (strcmp(kn->name, new_name) == 0))
+ old_name = kernfs_rcu_name(kn);
+ if (!new_name)
+ new_name = old_name;
+ if ((old_parent == new_parent) && (kn->ns == new_ns) &&
+ (strcmp(old_name, new_name) == 0))
goto out; /* nothing to rename */
error = -EEXIST;
@@ -1743,7 +1772,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
goto out;
/* rename kernfs_node */
- if (strcmp(kn->name, new_name) != 0) {
+ if (strcmp(old_name, new_name) != 0) {
error = -ENOMEM;
new_name = kstrdup_const(new_name, GFP_KERNEL);
if (!new_name)
@@ -1756,27 +1785,32 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
* Move to the appropriate place in the appropriate directories rbtree.
*/
kernfs_unlink_sibling(kn);
- kernfs_get(new_parent);
- /* rename_lock protects ->parent and ->name accessors */
- write_lock_irq(&kernfs_rename_lock);
+ /* rename_lock protects ->parent accessors */
+ if (old_parent != new_parent) {
+ kernfs_get(new_parent);
+ write_lock_irq(&kernfs_rename_lock);
- old_parent = kn->parent;
- kn->parent = new_parent;
+ rcu_assign_pointer(kn->__parent, new_parent);
- kn->ns = new_ns;
- if (new_name) {
- old_name = kn->name;
- kn->name = new_name;
- }
+ kn->ns = new_ns;
+ if (new_name)
+ rcu_assign_pointer(kn->name, new_name);
- write_unlock_irq(&kernfs_rename_lock);
+ write_unlock_irq(&kernfs_rename_lock);
+ kernfs_put(old_parent);
+ } else {
+ /* name assignment is RCU protected, parent is the same */
+ kn->ns = new_ns;
+ if (new_name)
+ rcu_assign_pointer(kn->name, new_name);
+ }
- kn->hash = kernfs_name_hash(kn->name, kn->ns);
+ kn->hash = kernfs_name_hash(new_name ?: old_name, kn->ns);
kernfs_link_sibling(kn);
- kernfs_put(old_parent);
- kfree_const(old_name);
+ if (new_name && !is_kernel_rodata((unsigned long)old_name))
+ kfree_rcu_mightsleep(old_name);
error = 0;
out:
@@ -1795,7 +1829,8 @@ static struct kernfs_node *kernfs_dir_pos(const void *ns,
{
if (pos) {
int valid = kernfs_active(pos) &&
- pos->parent == parent && hash == pos->hash;
+ rcu_access_pointer(pos->__parent) == parent &&
+ hash == pos->hash;
kernfs_put(pos);
if (!valid)
pos = NULL;
@@ -1860,7 +1895,7 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
pos;
pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
- const char *name = pos->name;
+ const char *name = kernfs_rcu_name(pos);
unsigned int type = fs_umode_to_dtype(pos->mode);
int len = strlen(name);
ino_t ino = kernfs_ino(pos);
@@ -1869,10 +1904,10 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
file->private_data = pos;
kernfs_get(pos);
- up_read(&root->kernfs_rwsem);
- if (!dir_emit(ctx, name, len, ino, type))
+ if (!dir_emit(ctx, name, len, ino, type)) {
+ up_read(&root->kernfs_rwsem);
return 0;
- down_read(&root->kernfs_rwsem);
+ }
}
up_read(&root->kernfs_rwsem);
file->private_data = NULL;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 0eb320617d7b..66fe8fe41f06 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -911,9 +911,11 @@ repeat:
/* kick fsnotify */
down_read(&root->kernfs_supers_rwsem);
+ down_read(&root->kernfs_rwsem);
list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
struct kernfs_node *parent;
struct inode *p_inode = NULL;
+ const char *kn_name;
struct inode *inode;
struct qstr name;
@@ -927,7 +929,8 @@ repeat:
if (!inode)
continue;
- name = QSTR(kn->name);
+ kn_name = kernfs_rcu_name(kn);
+ name = QSTR(kn_name);
parent = kernfs_get_parent(kn);
if (parent) {
p_inode = ilookup(info->sb, kernfs_ino(parent));
@@ -947,6 +950,7 @@ repeat:
iput(inode);
}
+ up_read(&root->kernfs_rwsem);
up_read(&root->kernfs_supers_rwsem);
kernfs_put(kn);
goto repeat;
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h
index b42ee6547cdc..40a2a9cd819d 100644
--- a/fs/kernfs/kernfs-internal.h
+++ b/fs/kernfs/kernfs-internal.h
@@ -19,6 +19,8 @@
#include <linux/kernfs.h>
#include <linux/fs_context.h>
+extern rwlock_t kernfs_rename_lock;
+
struct kernfs_iattrs {
kuid_t ia_uid;
kgid_t ia_gid;
@@ -64,11 +66,14 @@ struct kernfs_root {
*
* Return: the kernfs_root @kn belongs to.
*/
-static inline struct kernfs_root *kernfs_root(struct kernfs_node *kn)
+static inline struct kernfs_root *kernfs_root(const struct kernfs_node *kn)
{
+ const struct kernfs_node *knp;
/* if parent exists, it's always a dir; otherwise, @sd is a dir */
- if (kn->parent)
- kn = kn->parent;
+ guard(rcu)();
+ knp = rcu_dereference(kn->__parent);
+ if (knp)
+ kn = knp;
return kn->dir.root;
}
@@ -97,6 +102,32 @@ struct kernfs_super_info {
};
#define kernfs_info(SB) ((struct kernfs_super_info *)(SB->s_fs_info))
+static inline bool kernfs_root_is_locked(const struct kernfs_node *kn)
+{
+ return lockdep_is_held(&kernfs_root(kn)->kernfs_rwsem);
+}
+
+static inline const char *kernfs_rcu_name(const struct kernfs_node *kn)
+{
+ return rcu_dereference_check(kn->name, kernfs_root_is_locked(kn));
+}
+
+static inline struct kernfs_node *kernfs_parent(const struct kernfs_node *kn)
+{
+ /*
+ * The kernfs_node::__parent remains valid within a RCU section. The kn
+ * can be reparented (and renamed) which changes the entry. This can be
+ * avoided by locking kernfs_root::kernfs_rwsem or kernfs_rename_lock.
+ * Both locks can be used to obtain a reference on __parent. Once the
+ * reference count reaches 0 then the node is about to be freed
+ * and can not be renamed (or become a different parent) anymore.
+ */
+ return rcu_dereference_check(kn->__parent,
+ kernfs_root_is_locked(kn) ||
+ lockdep_is_held(&kernfs_rename_lock) ||
+ !atomic_read(&kn->count));
+}
+
static inline struct kernfs_node *kernfs_dentry_node(struct dentry *dentry)
{
if (d_really_is_negative(dentry))
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index 1358c21837f1..5124e196c2bf 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -145,8 +145,10 @@ static struct dentry *kernfs_fh_to_parent(struct super_block *sb,
static struct dentry *kernfs_get_parent_dentry(struct dentry *child)
{
struct kernfs_node *kn = kernfs_dentry_node(child);
+ struct kernfs_root *root = kernfs_root(kn);
- return d_obtain_alias(kernfs_get_inode(child->d_sb, kn->parent));
+ guard(rwsem_read)(&root->kernfs_rwsem);
+ return d_obtain_alias(kernfs_get_inode(child->d_sb, kernfs_parent(kn)));
}
static const struct export_operations kernfs_export_ops = {
@@ -186,10 +188,10 @@ static struct kernfs_node *find_next_ancestor(struct kernfs_node *child,
return NULL;
}
- while (child->parent != parent) {
- if (!child->parent)
+ while (kernfs_parent(child) != parent) {
+ child = kernfs_parent(child);
+ if (!child)
return NULL;
- child = child->parent;
}
return child;
@@ -207,16 +209,27 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
{
struct dentry *dentry;
struct kernfs_node *knparent;
+ struct kernfs_root *root;
BUG_ON(sb->s_op != &kernfs_sops);
dentry = dget(sb->s_root);
/* Check if this is the root kernfs_node */
- if (!kn->parent)
+ if (!rcu_access_pointer(kn->__parent))
return dentry;
- knparent = find_next_ancestor(kn, NULL);
+ root = kernfs_root(kn);
+ /*
+ * As long as kn is valid, its parent can not vanish. This is cgroup's
+ * kn so it can't have its parent replaced. Therefore it is safe to use
+ * the ancestor node outside of the RCU or locked section.
+ */
+ if (WARN_ON_ONCE(!(root->flags & KERNFS_ROOT_INVARIANT_PARENT)))
+ return ERR_PTR(-EINVAL);
+ scoped_guard(rcu) {
+ knparent = find_next_ancestor(kn, NULL);
+ }
if (WARN_ON(!knparent)) {
dput(dentry);
return ERR_PTR(-EINVAL);
@@ -225,17 +238,26 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
do {
struct dentry *dtmp;
struct kernfs_node *kntmp;
+ const char *name;
if (kn == knparent)
return dentry;
- kntmp = find_next_ancestor(kn, knparent);
- if (WARN_ON(!kntmp)) {
+
+ scoped_guard(rwsem_read, &root->kernfs_rwsem) {
+ kntmp = find_next_ancestor(kn, knparent);
+ if (WARN_ON(!kntmp)) {
+ dput(dentry);
+ return ERR_PTR(-EINVAL);
+ }
+ name = kstrdup(kernfs_rcu_name(kntmp), GFP_KERNEL);
+ }
+ if (!name) {
dput(dentry);
- return ERR_PTR(-EINVAL);
+ return ERR_PTR(-ENOMEM);
}
- dtmp = lookup_positive_unlocked(kntmp->name, dentry,
- strlen(kntmp->name));
+ dtmp = lookup_positive_unlocked(name, dentry, strlen(name));
dput(dentry);
+ kfree(name);
if (IS_ERR(dtmp))
return dtmp;
knparent = kntmp;
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index 45371a70caa7..0bd8a2143723 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -62,10 +62,10 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
/* go up to the root, stop at the base */
base = parent;
- while (base->parent) {
- kn = target->parent;
- while (kn->parent && base != kn)
- kn = kn->parent;
+ while (kernfs_parent(base)) {
+ kn = kernfs_parent(target);
+ while (kernfs_parent(kn) && base != kn)
+ kn = kernfs_parent(kn);
if (base == kn)
break;
@@ -75,14 +75,14 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
strcpy(s, "../");
s += 3;
- base = base->parent;
+ base = kernfs_parent(base);
}
/* determine end of target string for reverse fillup */
kn = target;
- while (kn->parent && kn != base) {
- len += strlen(kn->name) + 1;
- kn = kn->parent;
+ while (kernfs_parent(kn) && kn != base) {
+ len += strlen(kernfs_rcu_name(kn)) + 1;
+ kn = kernfs_parent(kn);
}
/* check limits */
@@ -94,15 +94,16 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
/* reverse fillup of target string from target to base */
kn = target;
- while (kn->parent && kn != base) {
- int slen = strlen(kn->name);
+ while (kernfs_parent(kn) && kn != base) {
+ const char *name = kernfs_rcu_name(kn);
+ int slen = strlen(name);
len -= slen;
- memcpy(s + len, kn->name, slen);
+ memcpy(s + len, name, slen);
if (len)
s[--len] = '/';
- kn = kn->parent;
+ kn = kernfs_parent(kn);
}
return 0;
@@ -111,12 +112,13 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
static int kernfs_getlink(struct inode *inode, char *path)
{
struct kernfs_node *kn = inode->i_private;
- struct kernfs_node *parent = kn->parent;
+ struct kernfs_node *parent;
struct kernfs_node *target = kn->symlink.target_kn;
- struct kernfs_root *root = kernfs_root(parent);
+ struct kernfs_root *root = kernfs_root(kn);
int error;
down_read(&root->kernfs_rwsem);
+ parent = kernfs_parent(kn);
error = kernfs_get_target_path(parent, target, path);
up_read(&root->kernfs_rwsem);
diff --git a/fs/lockd/Makefile b/fs/lockd/Makefile
index fe3e23dd29c3..51bbe22d21e3 100644
--- a/fs/lockd/Makefile
+++ b/fs/lockd/Makefile
@@ -8,6 +8,6 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_LOCKD) += lockd.o
lockd-y := clntlock.o clntproc.o clntxdr.o host.o svc.o svclock.o \
- svcshare.o svcproc.o svcsubs.o mon.o trace.o xdr.o
+ svcshare.o svcproc.o svcsubs.o mon.o trace.o xdr.o netlink.o
lockd-$(CONFIG_LOCKD_V4) += clnt4xdr.o xdr4.o svc4proc.o
lockd-$(CONFIG_PROC_FS) += procfs.o
diff --git a/fs/lockd/netlink.c b/fs/lockd/netlink.c
new file mode 100644
index 000000000000..6e00b02cad90
--- /dev/null
+++ b/fs/lockd/netlink.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/lockd.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "netlink.h"
+
+#include <uapi/linux/lockd_netlink.h>
+
+/* LOCKD_CMD_SERVER_SET - do */
+static const struct nla_policy lockd_server_set_nl_policy[LOCKD_A_SERVER_UDP_PORT + 1] = {
+ [LOCKD_A_SERVER_GRACETIME] = { .type = NLA_U32, },
+ [LOCKD_A_SERVER_TCP_PORT] = { .type = NLA_U16, },
+ [LOCKD_A_SERVER_UDP_PORT] = { .type = NLA_U16, },
+};
+
+/* Ops table for lockd */
+static const struct genl_split_ops lockd_nl_ops[] = {
+ {
+ .cmd = LOCKD_CMD_SERVER_SET,
+ .doit = lockd_nl_server_set_doit,
+ .policy = lockd_server_set_nl_policy,
+ .maxattr = LOCKD_A_SERVER_UDP_PORT,
+ .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_DO,
+ },
+ {
+ .cmd = LOCKD_CMD_SERVER_GET,
+ .doit = lockd_nl_server_get_doit,
+ .flags = GENL_CMD_CAP_DO,
+ },
+};
+
+struct genl_family lockd_nl_family __ro_after_init = {
+ .name = LOCKD_FAMILY_NAME,
+ .version = LOCKD_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = lockd_nl_ops,
+ .n_split_ops = ARRAY_SIZE(lockd_nl_ops),
+};
diff --git a/fs/lockd/netlink.h b/fs/lockd/netlink.h
new file mode 100644
index 000000000000..1920543a7955
--- /dev/null
+++ b/fs/lockd/netlink.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/lockd.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_LOCKD_GEN_H
+#define _LINUX_LOCKD_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/lockd_netlink.h>
+
+int lockd_nl_server_set_doit(struct sk_buff *skb, struct genl_info *info);
+int lockd_nl_server_get_doit(struct sk_buff *skb, struct genl_info *info);
+
+extern struct genl_family lockd_nl_family;
+
+#endif /* _LINUX_LOCKD_GEN_H */
diff --git a/fs/lockd/netns.h b/fs/lockd/netns.h
index 17432c445fe6..88e8e2a97397 100644
--- a/fs/lockd/netns.h
+++ b/fs/lockd/netns.h
@@ -10,6 +10,9 @@ struct lockd_net {
unsigned int nlmsvc_users;
unsigned long next_gc;
unsigned long nrhosts;
+ u32 gracetime;
+ u16 tcp_port;
+ u16 udp_port;
struct delayed_work grace_period_end;
struct lock_manager lockd_manager;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 2c8eedc6c2cc..e80262a51884 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -41,6 +41,7 @@
#include "netns.h"
#include "procfs.h"
+#include "netlink.h"
#define NLMDBG_FACILITY NLMDBG_SVC
#define LOCKD_BUFSIZE (1024 + NLMSVC_XDRSIZE)
@@ -83,8 +84,14 @@ static const int nlm_port_min = 0, nlm_port_max = 65535;
static struct ctl_table_header * nlm_sysctl_table;
#endif
-static unsigned long get_lockd_grace_period(void)
+static unsigned long get_lockd_grace_period(struct net *net)
{
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+ /* Return the net-ns specific grace period, if there is one */
+ if (ln->gracetime)
+ return ln->gracetime * HZ;
+
/* Note: nlm_timeout should always be nonzero */
if (nlm_grace_period)
return roundup(nlm_grace_period, nlm_timeout) * HZ;
@@ -103,7 +110,7 @@ static void grace_ender(struct work_struct *grace)
static void set_grace_period(struct net *net)
{
- unsigned long grace_period = get_lockd_grace_period();
+ unsigned long grace_period = get_lockd_grace_period(net);
struct lockd_net *ln = net_generic(net, lockd_net_id);
locks_start_grace(net, &ln->lockd_manager);
@@ -166,15 +173,16 @@ static int create_lockd_listener(struct svc_serv *serv, const char *name,
static int create_lockd_family(struct svc_serv *serv, struct net *net,
const int family, const struct cred *cred)
{
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
int err;
- err = create_lockd_listener(serv, "udp", net, family, nlm_udpport,
- cred);
+ err = create_lockd_listener(serv, "udp", net, family,
+ ln->udp_port ? ln->udp_port : nlm_udpport, cred);
if (err < 0)
return err;
- return create_lockd_listener(serv, "tcp", net, family, nlm_tcpport,
- cred);
+ return create_lockd_listener(serv, "tcp", net, family,
+ ln->tcp_port ? ln->tcp_port : nlm_tcpport, cred);
}
/*
@@ -459,9 +467,10 @@ static const struct ctl_table nlm_sysctls[] = {
{
.procname = "nsm_local_state",
.data = &nsm_local_state,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(nsm_local_state),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_douintvec,
+ .extra1 = SYSCTL_ZERO,
},
};
@@ -588,6 +597,10 @@ static int __init init_nlm(void)
if (err)
goto err_pernet;
+ err = genl_register_family(&lockd_nl_family);
+ if (err)
+ goto err_netlink;
+
err = lockd_create_procfs();
if (err)
goto err_procfs;
@@ -595,6 +608,8 @@ static int __init init_nlm(void)
return 0;
err_procfs:
+ genl_unregister_family(&lockd_nl_family);
+err_netlink:
unregister_pernet_subsys(&lockd_net_ops);
err_pernet:
#ifdef CONFIG_SYSCTL
@@ -608,6 +623,7 @@ static void __exit exit_nlm(void)
{
/* FIXME: delete all NLM clients */
nlm_shutdown_hosts();
+ genl_unregister_family(&lockd_nl_family);
lockd_remove_procfs();
unregister_pernet_subsys(&lockd_net_ops);
#ifdef CONFIG_SYSCTL
@@ -710,3 +726,94 @@ static struct svc_program nlmsvc_program = {
.pg_init_request = svc_generic_init_request,
.pg_rpcbind_set = svc_generic_rpcbind_set,
};
+
+/**
+ * lockd_nl_server_set_doit - set the lockd server parameters via netlink
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * This updates the per-net values. When updating the values in the init_net
+ * namespace, also update the "legacy" global values.
+ *
+ * Return 0 on success or a negative errno.
+ */
+int lockd_nl_server_set_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+ const struct nlattr *attr;
+
+ if (GENL_REQ_ATTR_CHECK(info, LOCKD_A_SERVER_GRACETIME))
+ return -EINVAL;
+
+ if (info->attrs[LOCKD_A_SERVER_GRACETIME] ||
+ info->attrs[LOCKD_A_SERVER_TCP_PORT] ||
+ info->attrs[LOCKD_A_SERVER_UDP_PORT]) {
+ attr = info->attrs[LOCKD_A_SERVER_GRACETIME];
+ if (attr) {
+ u32 gracetime = nla_get_u32(attr);
+
+ if (gracetime > nlm_grace_period_max)
+ return -EINVAL;
+
+ ln->gracetime = gracetime;
+
+ if (net == &init_net)
+ nlm_grace_period = gracetime;
+ }
+
+ attr = info->attrs[LOCKD_A_SERVER_TCP_PORT];
+ if (attr) {
+ ln->tcp_port = nla_get_u16(attr);
+ if (net == &init_net)
+ nlm_tcpport = ln->tcp_port;
+ }
+
+ attr = info->attrs[LOCKD_A_SERVER_UDP_PORT];
+ if (attr) {
+ ln->udp_port = nla_get_u16(attr);
+ if (net == &init_net)
+ nlm_udpport = ln->udp_port;
+ }
+ }
+ return 0;
+}
+
+/**
+ * lockd_nl_server_get_doit - get lockd server parameters via netlink
+ * @skb: reply buffer
+ * @info: netlink metadata and command arguments
+ *
+ * Return 0 on success or a negative errno.
+ */
+int lockd_nl_server_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+ void *hdr;
+ int err;
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = genlmsg_iput(skb, info);
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto err_free_msg;
+ }
+
+ err = nla_put_u32(skb, LOCKD_A_SERVER_GRACETIME, ln->gracetime) ||
+ nla_put_u16(skb, LOCKD_A_SERVER_TCP_PORT, ln->tcp_port) ||
+ nla_put_u16(skb, LOCKD_A_SERVER_UDP_PORT, ln->udp_port);
+ if (err)
+ goto err_free_msg;
+
+ genlmsg_end(skb, hdr);
+
+ return genlmsg_reply(skb, info);
+err_free_msg:
+ nlmsg_free(skb);
+
+ return err;
+}
diff --git a/fs/namespace.c b/fs/namespace.c
index 6100e5b962a6..14935a0500a2 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2478,7 +2478,8 @@ struct vfsmount *clone_private_mount(const struct path *path)
struct mount *old_mnt = real_mount(path->mnt);
struct mount *new_mnt;
- scoped_guard(rwsem_read, &namespace_sem)
+ guard(rwsem_read)(&namespace_sem);
+
if (IS_MNT_UNBINDABLE(old_mnt))
return ERR_PTR(-EINVAL);
@@ -5326,8 +5327,10 @@ struct kstatmount {
struct mnt_idmap *idmap;
u64 mask;
struct path root;
- struct statmount sm;
struct seq_file seq;
+
+ /* Must be last --ends in a flexible-array member. */
+ struct statmount sm;
};
static u64 mnt_to_attr_flags(struct vfsmount *mnt)
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 3b0918ade53c..02c916a55020 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -546,6 +546,8 @@ int nfs_create_rpc_client(struct nfs_client *clp,
args.flags |= RPC_CLNT_CREATE_NOPING;
if (test_bit(NFS_CS_REUSEPORT, &clp->cl_flags))
args.flags |= RPC_CLNT_CREATE_REUSEPORT;
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ args.flags |= RPC_CLNT_CREATE_NETUNREACH_FATAL;
if (!IS_ERR(clp->cl_rpcclient))
return 0;
@@ -709,6 +711,9 @@ static int nfs_init_server(struct nfs_server *server,
if (ctx->flags & NFS_MOUNT_NORESVPORT)
set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (ctx->flags & NFS_MOUNT_NETUNREACH_FATAL)
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
+
/* Allocate or find a client reference we can use */
clp = nfs_get_client(&cl_init);
if (IS_ERR(clp))
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 4db912f56230..8bdbc4dca89c 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -79,6 +79,7 @@ static void nfs_mark_return_delegation(struct nfs_server *server,
struct nfs_delegation *delegation)
{
set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
+ set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
}
@@ -306,7 +307,8 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
if (delegation == NULL)
goto out;
spin_lock(&delegation->lock);
- if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ if (delegation->inode &&
+ !test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
/* Refcount matched in nfs_end_delegation_return() */
ret = nfs_get_delegation(delegation);
@@ -330,14 +332,16 @@ nfs_start_delegation_return(struct nfs_inode *nfsi)
}
static void nfs_abort_delegation_return(struct nfs_delegation *delegation,
- struct nfs_client *clp, int err)
+ struct nfs_server *server, int err)
{
-
spin_lock(&delegation->lock);
clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
if (err == -EAGAIN) {
set_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN_DELAYED, &clp->cl_state);
+ set_bit(NFS4SERV_DELEGRETURN_DELAYED,
+ &server->delegation_flags);
+ set_bit(NFS4CLNT_DELEGRETURN_DELAYED,
+ &server->nfs_client->cl_state);
}
spin_unlock(&delegation->lock);
}
@@ -547,7 +551,7 @@ out:
*/
static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
{
- struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
+ struct nfs_server *server = NFS_SERVER(inode);
unsigned int mode = O_WRONLY | O_RDWR;
int err = 0;
@@ -569,11 +573,11 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
/*
* Guard against state recovery
*/
- err = nfs4_wait_clnt_recover(clp);
+ err = nfs4_wait_clnt_recover(server->nfs_client);
}
if (err) {
- nfs_abort_delegation_return(delegation, clp, err);
+ nfs_abort_delegation_return(delegation, server, err);
goto out;
}
@@ -590,17 +594,6 @@ static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
ret = true;
- else if (test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags)) {
- struct inode *inode;
-
- spin_lock(&delegation->lock);
- inode = delegation->inode;
- if (inode && list_empty(&NFS_I(inode)->open_files))
- ret = true;
- spin_unlock(&delegation->lock);
- }
- if (ret)
- clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags) ||
test_bit(NFS_DELEGATION_RETURN_DELAYED, &delegation->flags) ||
test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
@@ -619,6 +612,9 @@ static int nfs_server_return_marked_delegations(struct nfs_server *server,
struct nfs_delegation *place_holder_deleg = NULL;
int err = 0;
+ if (!test_and_clear_bit(NFS4SERV_DELEGRETURN,
+ &server->delegation_flags))
+ return 0;
restart:
/*
* To avoid quadratic looping we hold a reference
@@ -670,6 +666,7 @@ restart:
cond_resched();
if (!err)
goto restart;
+ set_bit(NFS4SERV_DELEGRETURN, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
goto out;
}
@@ -684,6 +681,9 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
struct nfs_delegation *d;
bool ret = false;
+ if (!test_and_clear_bit(NFS4SERV_DELEGRETURN_DELAYED,
+ &server->delegation_flags))
+ goto out;
list_for_each_entry_rcu (d, &server->delegations, super_list) {
if (!test_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags))
continue;
@@ -691,6 +691,7 @@ static bool nfs_server_clear_delayed_delegations(struct nfs_server *server)
clear_bit(NFS_DELEGATION_RETURN_DELAYED, &d->flags);
ret = true;
}
+out:
return ret;
}
@@ -878,11 +879,25 @@ int nfs4_inode_make_writeable(struct inode *inode)
return nfs4_inode_return_delegation(inode);
}
-static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
- struct nfs_delegation *delegation)
+static void
+nfs_mark_return_if_closed_delegation(struct nfs_server *server,
+ struct nfs_delegation *delegation)
{
- set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
- set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
+ struct inode *inode;
+
+ if (test_bit(NFS_DELEGATION_RETURN, &delegation->flags) ||
+ test_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags))
+ return;
+ spin_lock(&delegation->lock);
+ inode = delegation->inode;
+ if (!inode)
+ goto out;
+ if (list_empty(&NFS_I(inode)->open_files))
+ nfs_mark_return_delegation(server, delegation);
+ else
+ set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
+out:
+ spin_unlock(&delegation->lock);
}
static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
@@ -1276,6 +1291,7 @@ static void nfs_mark_test_expired_delegation(struct nfs_server *server,
return;
clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
set_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
+ set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags);
set_bit(NFS4CLNT_DELEGATION_EXPIRED, &server->nfs_client->cl_state);
}
@@ -1354,6 +1370,9 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
nfs4_stateid stateid;
unsigned long gen = ++server->delegation_gen;
+ if (!test_and_clear_bit(NFS4SERV_DELEGATION_EXPIRED,
+ &server->delegation_flags))
+ return 0;
restart:
rcu_read_lock();
list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
@@ -1383,6 +1402,9 @@ restart:
goto restart;
}
nfs_inode_mark_test_expired_delegation(server,inode);
+ set_bit(NFS4SERV_DELEGATION_EXPIRED, &server->delegation_flags);
+ set_bit(NFS4CLNT_DELEGATION_EXPIRED,
+ &server->nfs_client->cl_state);
iput(inode);
return -EAGAIN;
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index bc957487f6ec..bd23fc736b39 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -666,6 +666,8 @@ static bool nfs_use_readdirplus(struct inode *dir, struct dir_context *ctx,
{
if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
return false;
+ if (NFS_SERVER(dir)->flags & NFS_MOUNT_FORCE_RDIRPLUS)
+ return true;
if (ctx->pos == 0 ||
cache_hits + cache_misses > NFS_READDIR_CACHE_USAGE_THRESHOLD)
return true;
diff --git a/fs/nfs/export.c b/fs/nfs/export.c
index be686b8e0c54..e9c233b6fd20 100644
--- a/fs/nfs/export.c
+++ b/fs/nfs/export.c
@@ -154,5 +154,6 @@ const struct export_operations nfs_export_ops = {
EXPORT_OP_CLOSE_BEFORE_UNLINK |
EXPORT_OP_REMOTE_FS |
EXPORT_OP_NOATOMIC_ATTR |
- EXPORT_OP_FLUSH_ON_CLOSE,
+ EXPORT_OP_FLUSH_ON_CLOSE |
+ EXPORT_OP_NOLOCKS,
};
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index 98b45b636be3..61ad269c825f 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1154,10 +1154,14 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
rpc_wake_up(&tbl->slot_tbl_waitq);
goto reset;
/* RPC connection errors */
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ return -NFS4ERR_FATAL_IOERROR;
+ fallthrough;
case -ECONNREFUSED:
case -EHOSTDOWN:
case -EHOSTUNREACH:
- case -ENETUNREACH:
case -EIO:
case -ETIMEDOUT:
case -EPIPE:
@@ -1183,6 +1187,7 @@ reset:
/* Retry all errors through either pNFS or MDS except for -EJUKEBOX */
static int ff_layout_async_handle_error_v3(struct rpc_task *task,
+ struct nfs_client *clp,
struct pnfs_layout_segment *lseg,
u32 idx)
{
@@ -1200,6 +1205,11 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
case -EJUKEBOX:
nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
goto out_retry;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags))
+ return -NFS4ERR_FATAL_IOERROR;
+ fallthrough;
default:
dprintk("%s DS connection error %d\n", __func__,
task->tk_status);
@@ -1234,7 +1244,7 @@ static int ff_layout_async_handle_error(struct rpc_task *task,
switch (vers) {
case 3:
- return ff_layout_async_handle_error_v3(task, lseg, idx);
+ return ff_layout_async_handle_error_v3(task, clp, lseg, idx);
case 4:
return ff_layout_async_handle_error_v4(task, state, clp,
lseg, idx);
@@ -1264,6 +1274,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
case -ECONNRESET:
case -EHOSTDOWN:
case -EHOSTUNREACH:
+ case -ENETDOWN:
case -ENETUNREACH:
case -EADDRINUSE:
case -ENOBUFS:
@@ -1337,6 +1348,9 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
return task->tk_status;
case -EAGAIN:
goto out_eagain;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
return 0;
@@ -1507,6 +1521,9 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
return task->tk_status;
case -EAGAIN:
return -EAGAIN;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
if (hdr->res.verf->committed == NFS_FILE_SYNC ||
@@ -1551,6 +1568,9 @@ static int ff_layout_commit_done_cb(struct rpc_task *task,
case -EAGAIN:
rpc_restart_call_prepare(task);
return -EAGAIN;
+ case -NFS4ERR_FATAL_IOERROR:
+ task->tk_status = -EIO;
+ return 0;
}
ff_layout_set_layoutcommit(data->inode, data->lseg, data->lwb);
diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
index b069385eea17..13f71ca8c974 100644
--- a/fs/nfs/fs_context.c
+++ b/fs/nfs/fs_context.c
@@ -50,6 +50,7 @@ enum nfs_param {
Opt_clientaddr,
Opt_cto,
Opt_alignwrite,
+ Opt_fatal_neterrors,
Opt_fg,
Opt_fscache,
Opt_fscache_flag,
@@ -72,6 +73,8 @@ enum nfs_param {
Opt_posix,
Opt_proto,
Opt_rdirplus,
+ Opt_rdirplus_none,
+ Opt_rdirplus_force,
Opt_rdma,
Opt_resvport,
Opt_retrans,
@@ -96,6 +99,20 @@ enum nfs_param {
};
enum {
+ Opt_fatal_neterrors_default,
+ Opt_fatal_neterrors_enetunreach,
+ Opt_fatal_neterrors_none,
+};
+
+static const struct constant_table nfs_param_enums_fatal_neterrors[] = {
+ { "default", Opt_fatal_neterrors_default },
+ { "ENETDOWN:ENETUNREACH", Opt_fatal_neterrors_enetunreach },
+ { "ENETUNREACH:ENETDOWN", Opt_fatal_neterrors_enetunreach },
+ { "none", Opt_fatal_neterrors_none },
+ {}
+};
+
+enum {
Opt_local_lock_all,
Opt_local_lock_flock,
Opt_local_lock_none,
@@ -151,6 +168,8 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_string("clientaddr", Opt_clientaddr),
fsparam_flag_no("cto", Opt_cto),
fsparam_flag_no("alignwrite", Opt_alignwrite),
+ fsparam_enum("fatal_neterrors", Opt_fatal_neterrors,
+ nfs_param_enums_fatal_neterrors),
fsparam_flag ("fg", Opt_fg),
fsparam_flag_no("fsc", Opt_fscache_flag),
fsparam_string("fsc", Opt_fscache),
@@ -174,7 +193,8 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
fsparam_u32 ("port", Opt_port),
fsparam_flag_no("posix", Opt_posix),
fsparam_string("proto", Opt_proto),
- fsparam_flag_no("rdirplus", Opt_rdirplus),
+ fsparam_flag_no("rdirplus", Opt_rdirplus), // rdirplus|nordirplus
+ fsparam_string("rdirplus", Opt_rdirplus), // rdirplus=...
fsparam_flag ("rdma", Opt_rdma),
fsparam_flag_no("resvport", Opt_resvport),
fsparam_u32 ("retrans", Opt_retrans),
@@ -288,6 +308,12 @@ static const struct constant_table nfs_xprtsec_policies[] = {
{}
};
+static const struct constant_table nfs_rdirplus_tokens[] = {
+ { "none", Opt_rdirplus_none },
+ { "force", Opt_rdirplus_force },
+ {}
+};
+
/*
* Sanity-check a server address provided by the mount command.
*
@@ -636,10 +662,25 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
ctx->flags &= ~NFS_MOUNT_NOACL;
break;
case Opt_rdirplus:
- if (result.negated)
+ if (result.negated) {
+ ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS;
ctx->flags |= NFS_MOUNT_NORDIRPLUS;
- else
- ctx->flags &= ~NFS_MOUNT_NORDIRPLUS;
+ } else if (!param->string) {
+ ctx->flags &= ~(NFS_MOUNT_NORDIRPLUS | NFS_MOUNT_FORCE_RDIRPLUS);
+ } else {
+ switch (lookup_constant(nfs_rdirplus_tokens, param->string, -1)) {
+ case Opt_rdirplus_none:
+ ctx->flags &= ~NFS_MOUNT_FORCE_RDIRPLUS;
+ ctx->flags |= NFS_MOUNT_NORDIRPLUS;
+ break;
+ case Opt_rdirplus_force:
+ ctx->flags &= ~NFS_MOUNT_NORDIRPLUS;
+ ctx->flags |= NFS_MOUNT_FORCE_RDIRPLUS;
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ }
break;
case Opt_sharecache:
if (result.negated)
@@ -872,6 +913,25 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
goto out_of_bounds;
ctx->nfs_server.max_connect = result.uint_32;
break;
+ case Opt_fatal_neterrors:
+ trace_nfs_mount_assign(param->key, param->string);
+ switch (result.uint_32) {
+ case Opt_fatal_neterrors_default:
+ if (fc->net_ns != &init_net)
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+ else
+ ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ case Opt_fatal_neterrors_enetunreach:
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ case Opt_fatal_neterrors_none:
+ ctx->flags &= ~NFS_MOUNT_NETUNREACH_FATAL;
+ break;
+ default:
+ goto out_invalid_value;
+ }
+ break;
case Opt_lookupcache:
trace_nfs_mount_assign(param->key, param->string);
switch (result.uint_32) {
@@ -1651,6 +1711,9 @@ static int nfs_init_fs_context(struct fs_context *fc)
ctx->xprtsec.cert_serial = TLS_NO_CERT;
ctx->xprtsec.privkey_serial = TLS_NO_PRIVKEY;
+ if (fc->net_ns != &init_net)
+ ctx->flags |= NFS_MOUNT_NETUNREACH_FATAL;
+
fc->s_iflags |= SB_I_STABLE_WRITES;
}
fc->fs_private = ctx;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 1aa67fca69b2..119e447758b9 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -74,6 +74,8 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
{
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
schedule();
if (signal_pending_state(mode, current))
return -ERESTARTSYS;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 1ac1d3eec517..ec8d32d0e2e9 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -912,6 +912,11 @@ static inline u32 nfs_stateid_hash(nfs4_stateid *stateid)
}
#endif
+static inline bool nfs_current_task_exiting(void)
+{
+ return (current->flags & PF_EXITING) != 0;
+}
+
static inline bool nfs_error_is_fatal(int err)
{
switch (err) {
diff --git a/fs/nfs/nfs3client.c b/fs/nfs/nfs3client.c
index b0c8a39c2bbd..0d7310c1ee0c 100644
--- a/fs/nfs/nfs3client.c
+++ b/fs/nfs/nfs3client.c
@@ -120,6 +120,8 @@ struct nfs_client *nfs3_set_ds_client(struct nfs_server *mds_srv,
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
__set_bit(NFS_CS_DS, &cl_init.init_flags);
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 755ed3c37051..a4cb67573aa7 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -39,7 +39,7 @@ nfs3_rpc_wrapper(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
__set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
schedule_timeout(NFS_JUKEBOX_RETRY_TIME);
res = -ERESTARTSYS;
- } while (!fatal_signal_pending(current));
+ } while (!fatal_signal_pending(current) && !nfs_current_task_exiting());
return res;
}
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 1924c4a2077b..5cf52ece96ac 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -21,6 +21,8 @@
#define NFSDBG_FACILITY NFSDBG_PROC
static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std);
+static int nfs42_proc_offload_status(struct file *file, nfs4_stateid *stateid,
+ u64 *copied);
static void nfs42_set_netaddr(struct file *filep, struct nfs42_netaddr *naddr)
{
@@ -173,6 +175,20 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
return err;
}
+static void nfs4_copy_dequeue_callback(struct nfs_server *dst_server,
+ struct nfs_server *src_server,
+ struct nfs4_copy_state *copy)
+{
+ spin_lock(&dst_server->nfs_client->cl_lock);
+ list_del_init(&copy->copies);
+ spin_unlock(&dst_server->nfs_client->cl_lock);
+ if (dst_server != src_server) {
+ spin_lock(&src_server->nfs_client->cl_lock);
+ list_del_init(&copy->src_copies);
+ spin_unlock(&src_server->nfs_client->cl_lock);
+ }
+}
+
static int handle_async_copy(struct nfs42_copy_res *res,
struct nfs_server *dst_server,
struct nfs_server *src_server,
@@ -182,9 +198,12 @@ static int handle_async_copy(struct nfs42_copy_res *res,
bool *restart)
{
struct nfs4_copy_state *copy, *tmp_copy = NULL, *iter;
- int status = NFS4_OK;
struct nfs_open_context *dst_ctx = nfs_file_open_context(dst);
struct nfs_open_context *src_ctx = nfs_file_open_context(src);
+ struct nfs_client *clp = dst_server->nfs_client;
+ unsigned long timeout = 3 * HZ;
+ int status = NFS4_OK;
+ u64 copied;
copy = kzalloc(sizeof(struct nfs4_copy_state), GFP_KERNEL);
if (!copy)
@@ -222,15 +241,12 @@ static int handle_async_copy(struct nfs42_copy_res *res,
spin_unlock(&src_server->nfs_client->cl_lock);
}
- status = wait_for_completion_interruptible(&copy->completion);
- spin_lock(&dst_server->nfs_client->cl_lock);
- list_del_init(&copy->copies);
- spin_unlock(&dst_server->nfs_client->cl_lock);
- if (dst_server != src_server) {
- spin_lock(&src_server->nfs_client->cl_lock);
- list_del_init(&copy->src_copies);
- spin_unlock(&src_server->nfs_client->cl_lock);
- }
+wait:
+ status = wait_for_completion_interruptible_timeout(&copy->completion,
+ timeout);
+ if (!status)
+ goto timeout;
+ nfs4_copy_dequeue_callback(dst_server, src_server, copy);
if (status == -ERESTARTSYS) {
goto out_cancel;
} else if (copy->flags || copy->error == NFS4ERR_PARTNER_NO_AUTH) {
@@ -240,6 +256,7 @@ static int handle_async_copy(struct nfs42_copy_res *res,
}
out:
res->write_res.count = copy->count;
+ /* Copy out the updated write verifier provided by CB_OFFLOAD. */
memcpy(&res->write_res.verifier, &copy->verf, sizeof(copy->verf));
status = -copy->error;
@@ -251,6 +268,39 @@ out_cancel:
if (!nfs42_files_from_same_server(src, dst))
nfs42_do_offload_cancel_async(src, src_stateid);
goto out_free;
+timeout:
+ timeout <<= 1;
+ if (timeout > (clp->cl_lease_time >> 1))
+ timeout = clp->cl_lease_time >> 1;
+ status = nfs42_proc_offload_status(dst, &copy->stateid, &copied);
+ if (status == -EINPROGRESS)
+ goto wait;
+ nfs4_copy_dequeue_callback(dst_server, src_server, copy);
+ switch (status) {
+ case 0:
+ /* The server recognized the copy stateid, so it hasn't
+ * rebooted. Don't overwrite the verifier returned in the
+ * COPY result. */
+ res->write_res.count = copied;
+ goto out_free;
+ case -EREMOTEIO:
+ /* COPY operation failed on the server. */
+ status = -EOPNOTSUPP;
+ res->write_res.count = copied;
+ goto out_free;
+ case -EBADF:
+ /* Server did not recognize the copy stateid. It has
+ * probably restarted and lost the plot. */
+ res->write_res.count = 0;
+ status = -EOPNOTSUPP;
+ break;
+ case -EOPNOTSUPP:
+ /* RFC 7862 REQUIREs server to support OFFLOAD_STATUS when
+ * it has signed up for an async COPY, so server is not
+ * spec-compliant. */
+ res->write_res.count = 0;
+ }
+ goto out_free;
}
static int process_copy_commit(struct file *dst, loff_t pos_dst,
@@ -582,6 +632,108 @@ static int nfs42_do_offload_cancel_async(struct file *dst,
return status;
}
+static int
+_nfs42_proc_offload_status(struct nfs_server *server, struct file *file,
+ struct nfs42_offload_data *data)
+{
+ struct nfs_open_context *ctx = nfs_file_open_context(file);
+ struct rpc_message msg = {
+ .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OFFLOAD_STATUS],
+ .rpc_argp = &data->args,
+ .rpc_resp = &data->res,
+ .rpc_cred = ctx->cred,
+ };
+ int status;
+
+ status = nfs4_call_sync(server->client, server, &msg,
+ &data->args.osa_seq_args,
+ &data->res.osr_seq_res, 1);
+ trace_nfs4_offload_status(&data->args, status);
+ switch (status) {
+ case 0:
+ break;
+
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+ /*
+ * Server does not recognize the COPY stateid. CB_OFFLOAD
+ * could have purged it, or server might have rebooted.
+ * Since COPY stateids don't have an associated inode,
+ * avoid triggering state recovery.
+ */
+ status = -EBADF;
+ break;
+ case -NFS4ERR_NOTSUPP:
+ case -ENOTSUPP:
+ case -EOPNOTSUPP:
+ server->caps &= ~NFS_CAP_OFFLOAD_STATUS;
+ status = -EOPNOTSUPP;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * nfs42_proc_offload_status - Poll completion status of an async copy operation
+ * @dst: handle of file being copied into
+ * @stateid: copy stateid (from async COPY result)
+ * @copied: OUT: number of bytes copied so far
+ *
+ * Return values:
+ * %0: Server returned an NFS4_OK completion status
+ * %-EINPROGRESS: Server returned no completion status
+ * %-EREMOTEIO: Server returned an error completion status
+ * %-EBADF: Server did not recognize the copy stateid
+ * %-EOPNOTSUPP: Server does not support OFFLOAD_STATUS
+ * %-ERESTARTSYS: Wait interrupted by signal
+ *
+ * Other negative errnos indicate the client could not complete the
+ * request.
+ */
+static int
+nfs42_proc_offload_status(struct file *dst, nfs4_stateid *stateid, u64 *copied)
+{
+ struct inode *inode = file_inode(dst);
+ struct nfs_server *server = NFS_SERVER(inode);
+ struct nfs4_exception exception = {
+ .inode = inode,
+ };
+ struct nfs42_offload_data *data;
+ int status;
+
+ if (!(server->caps & NFS_CAP_OFFLOAD_STATUS))
+ return -EOPNOTSUPP;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ data->seq_server = server;
+ data->args.osa_src_fh = NFS_FH(inode);
+ memcpy(&data->args.osa_stateid, stateid,
+ sizeof(data->args.osa_stateid));
+ exception.stateid = &data->args.osa_stateid;
+ do {
+ status = _nfs42_proc_offload_status(server, dst, data);
+ if (status == -EOPNOTSUPP)
+ goto out;
+ status = nfs4_handle_exception(server, status, &exception);
+ } while (exception.retry);
+ if (status)
+ goto out;
+
+ *copied = data->res.osr_count;
+ if (!data->res.complete_count)
+ status = -EINPROGRESS;
+ else if (data->res.osr_complete != NFS_OK)
+ status = -EREMOTEIO;
+
+out:
+ kfree(data);
+ return status;
+}
+
static int _nfs42_proc_copy_notify(struct file *src, struct file *dst,
struct nfs42_copy_notify_args *args,
struct nfs42_copy_notify_res *res)
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index 5072d7ea72e9..b1b663468249 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -35,6 +35,11 @@
#define encode_offload_cancel_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_STATEID_SIZE))
#define decode_offload_cancel_maxsz (op_decode_hdr_maxsz)
+#define encode_offload_status_maxsz (op_encode_hdr_maxsz + \
+ XDR_QUADLEN(NFS4_STATEID_SIZE))
+#define decode_offload_status_maxsz (op_decode_hdr_maxsz + \
+ 2 /* osr_count */ + \
+ 2 /* osr_complete */)
#define encode_copy_notify_maxsz (op_encode_hdr_maxsz + \
XDR_QUADLEN(NFS4_STATEID_SIZE) + \
1 + /* nl4_type */ \
@@ -143,6 +148,14 @@
decode_sequence_maxsz + \
decode_putfh_maxsz + \
decode_offload_cancel_maxsz)
+#define NFS4_enc_offload_status_sz (compound_encode_hdr_maxsz + \
+ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_offload_status_maxsz)
+#define NFS4_dec_offload_status_sz (compound_decode_hdr_maxsz + \
+ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_offload_status_maxsz)
#define NFS4_enc_copy_notify_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -345,6 +358,14 @@ static void encode_offload_cancel(struct xdr_stream *xdr,
encode_nfs4_stateid(xdr, &args->osa_stateid);
}
+static void encode_offload_status(struct xdr_stream *xdr,
+ const struct nfs42_offload_status_args *args,
+ struct compound_hdr *hdr)
+{
+ encode_op_hdr(xdr, OP_OFFLOAD_STATUS, decode_offload_status_maxsz, hdr);
+ encode_nfs4_stateid(xdr, &args->osa_stateid);
+}
+
static void encode_copy_notify(struct xdr_stream *xdr,
const struct nfs42_copy_notify_args *args,
struct compound_hdr *hdr)
@@ -570,6 +591,25 @@ static void nfs4_xdr_enc_offload_cancel(struct rpc_rqst *req,
}
/*
+ * Encode OFFLOAD_STATUS request
+ */
+static void nfs4_xdr_enc_offload_status(struct rpc_rqst *req,
+ struct xdr_stream *xdr,
+ const void *data)
+{
+ const struct nfs42_offload_status_args *args = data;
+ struct compound_hdr hdr = {
+ .minorversion = nfs4_xdr_minorversion(&args->osa_seq_args),
+ };
+
+ encode_compound_hdr(xdr, req, &hdr);
+ encode_sequence(xdr, &args->osa_seq_args, &hdr);
+ encode_putfh(xdr, args->osa_src_fh, &hdr);
+ encode_offload_status(xdr, args, &hdr);
+ encode_nops(&hdr);
+}
+
+/*
* Encode COPY_NOTIFY request
*/
static void nfs4_xdr_enc_copy_notify(struct rpc_rqst *req,
@@ -921,6 +961,26 @@ static int decode_offload_cancel(struct xdr_stream *xdr,
return decode_op_hdr(xdr, OP_OFFLOAD_CANCEL);
}
+static int decode_offload_status(struct xdr_stream *xdr,
+ struct nfs42_offload_status_res *res)
+{
+ ssize_t result;
+ int status;
+
+ status = decode_op_hdr(xdr, OP_OFFLOAD_STATUS);
+ if (status)
+ return status;
+ /* osr_count */
+ if (xdr_stream_decode_u64(xdr, &res->osr_count) < 0)
+ return -EIO;
+ /* osr_complete<1> */
+ result = xdr_stream_decode_uint32_array(xdr, &res->osr_complete, 1);
+ if (result < 0)
+ return -EIO;
+ res->complete_count = result;
+ return 0;
+}
+
static int decode_copy_notify(struct xdr_stream *xdr,
struct nfs42_copy_notify_res *res)
{
@@ -1371,6 +1431,32 @@ out:
}
/*
+ * Decode OFFLOAD_STATUS response
+ */
+static int nfs4_xdr_dec_offload_status(struct rpc_rqst *rqstp,
+ struct xdr_stream *xdr,
+ void *data)
+{
+ struct nfs42_offload_status_res *res = data;
+ struct compound_hdr hdr;
+ int status;
+
+ status = decode_compound_hdr(xdr, &hdr);
+ if (status)
+ goto out;
+ status = decode_sequence(xdr, &res->osr_seq_res, rqstp);
+ if (status)
+ goto out;
+ status = decode_putfh(xdr);
+ if (status)
+ goto out;
+ status = decode_offload_status(xdr, res);
+
+out:
+ return status;
+}
+
+/*
* Decode COPY_NOTIFY response
*/
static int nfs4_xdr_dec_copy_notify(struct rpc_rqst *rqstp,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 83378f69b35e..162c85a83a14 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -233,6 +233,8 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
__set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags);
if (test_bit(NFS_CS_PNFS, &cl_init->init_flags))
__set_bit(NFS_CS_PNFS, &clp->cl_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &cl_init->init_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &clp->cl_flags);
/*
* Set up the connection to the server before we add add to the
* global list.
@@ -937,6 +939,9 @@ static int nfs4_set_client(struct nfs_server *server,
__set_bit(NFS_CS_TSM_POSSIBLE, &cl_init.init_flags);
server->port = rpc_get_port((struct sockaddr *)addr);
+ if (server->flags & NFS_MOUNT_NETUNREACH_FATAL)
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
+
/* Allocate or find a client reference we can use */
clp = nfs_get_client(&cl_init);
if (IS_ERR(clp))
@@ -1011,6 +1016,8 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv,
if (mds_srv->flags & NFS_MOUNT_NORESVPORT)
__set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
+ if (test_bit(NFS_CS_NETUNREACH_FATAL, &mds_clp->cl_flags))
+ __set_bit(NFS_CS_NETUNREACH_FATAL, &cl_init.init_flags);
__set_bit(NFS_CS_PNFS, &cl_init.init_flags);
cl_init.max_connect = NFS_MAX_TRANSPORTS;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 70c8ea943019..970f28dbf253 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -195,6 +195,9 @@ static int nfs4_map_errors(int err)
return -EBUSY;
case -NFS4ERR_NOT_SAME:
return -ENOTSYNC;
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ break;
default:
dprintk("%s could not handle NFSv4 error %d\n",
__func__, -err);
@@ -443,6 +446,8 @@ static int nfs4_delay_killable(long *timeout)
{
might_sleep();
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
__set_current_state(TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
schedule_timeout(nfs4_update_delay(timeout));
if (!__fatal_signal_pending(current))
@@ -454,6 +459,8 @@ static int nfs4_delay_interruptible(long *timeout)
{
might_sleep();
+ if (unlikely(nfs_current_task_exiting()))
+ return -EINTR;
__set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE_UNSAFE);
schedule_timeout(nfs4_update_delay(timeout));
if (!signal_pending(current))
@@ -1774,7 +1781,8 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
rcu_read_unlock();
trace_nfs4_open_stateid_update_wait(state->inode, stateid, 0);
- if (!fatal_signal_pending(current)) {
+ if (!fatal_signal_pending(current) &&
+ !nfs_current_task_exiting()) {
if (schedule_timeout(5*HZ) == 0)
status = -EAGAIN;
else
@@ -3576,7 +3584,7 @@ static bool nfs4_refresh_open_old_stateid(nfs4_stateid *dst,
write_sequnlock(&state->seqlock);
trace_nfs4_close_stateid_update_wait(state->inode, dst, 0);
- if (fatal_signal_pending(current))
+ if (fatal_signal_pending(current) || nfs_current_task_exiting())
status = -EINTR;
else
if (schedule_timeout(5*HZ) != 0)
@@ -9594,7 +9602,7 @@ static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
return;
trace_nfs4_sequence(clp, task->tk_status);
- if (task->tk_status < 0 && !task->tk_client->cl_shutdown) {
+ if (task->tk_status < 0 && clp->cl_cons_state >= 0) {
dprintk("%s ERROR %d\n", __func__, task->tk_status);
if (refcount_read(&clp->cl_count) == 1)
return;
@@ -10798,7 +10806,8 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
| NFS_CAP_CLONE
| NFS_CAP_LAYOUTERROR
| NFS_CAP_READ_PLUS
- | NFS_CAP_MOVEABLE,
+ | NFS_CAP_MOVEABLE
+ | NFS_CAP_OFFLOAD_STATUS,
.init_client = nfs41_init_client,
.shutdown_client = nfs41_shutdown_client,
.match_stateid = nfs41_match_stateid,
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 542cdf71229f..7612e977e80b 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1198,7 +1198,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
struct rpc_clnt *clnt = clp->cl_rpcclient;
bool swapon = false;
- if (clnt->cl_shutdown)
+ if (clp->cl_cons_state < 0)
return;
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
@@ -1403,7 +1403,7 @@ int nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_
dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
clp->cl_hostname);
nfs4_schedule_state_manager(clp);
- return 0;
+ return clp->cl_cons_state < 0 ? clp->cl_cons_state : 0;
}
EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
@@ -2739,7 +2739,15 @@ out_error:
pr_warn_ratelimited("NFS: state manager%s%s failed on NFSv4 server %s"
" with error %d\n", section_sep, section,
clp->cl_hostname, -status);
- ssleep(1);
+ switch (status) {
+ case -ENETDOWN:
+ case -ENETUNREACH:
+ nfs_mark_client_ready(clp, -EIO);
+ break;
+ default:
+ ssleep(1);
+ break;
+ }
out_drain:
memalloc_nofs_restore(memflags);
nfs4_end_drain_session(clp);
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 22c973316f0b..bc67fe6801b1 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -2608,7 +2608,7 @@ TRACE_EVENT(nfs4_copy_notify,
)
);
-TRACE_EVENT(nfs4_offload_cancel,
+DECLARE_EVENT_CLASS(nfs4_offload_class,
TP_PROTO(
const struct nfs42_offload_status_args *args,
int error
@@ -2640,6 +2640,15 @@ TRACE_EVENT(nfs4_offload_cancel,
__entry->stateid_seq, __entry->stateid_hash
)
);
+#define DEFINE_NFS4_OFFLOAD_EVENT(name) \
+ DEFINE_EVENT(nfs4_offload_class, name, \
+ TP_PROTO( \
+ const struct nfs42_offload_status_args *args, \
+ int error \
+ ), \
+ TP_ARGS(args, error))
+DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_cancel);
+DEFINE_NFS4_OFFLOAD_EVENT(nfs4_offload_status);
DECLARE_EVENT_CLASS(nfs4_xattr_event,
TP_PROTO(
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e8ac3f615f93..55bef5fbfa47 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -82,9 +82,8 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
* we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2)
*/
#define pagepad_maxsz (1)
-#define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2)
-#define lock_owner_id_maxsz (1 + 1 + 4)
-#define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
+#define open_owner_id_maxsz (2 + 1 + 2 + 2)
+#define lock_owner_id_maxsz (2 + 1 + 2)
#define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define compound_decode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2))
#define op_encode_hdr_maxsz (1)
@@ -185,7 +184,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define encode_claim_null_maxsz (1 + nfs4_name_maxsz)
#define encode_open_maxsz (op_encode_hdr_maxsz + \
2 + encode_share_access_maxsz + 2 + \
- open_owner_id_maxsz + \
+ 1 + open_owner_id_maxsz + \
encode_opentype_maxsz + \
encode_claim_null_maxsz)
#define decode_space_limit_maxsz (3)
@@ -255,13 +254,14 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
#define encode_link_maxsz (op_encode_hdr_maxsz + \
nfs4_name_maxsz)
#define decode_link_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz)
-#define encode_lockowner_maxsz (7)
+#define encode_lockowner_maxsz (2 + 1 + lock_owner_id_maxsz)
+
#define encode_lock_maxsz (op_encode_hdr_maxsz + \
7 + \
1 + encode_stateid_maxsz + 1 + \
encode_lockowner_maxsz)
#define decode_lock_denied_maxsz \
- (8 + decode_lockowner_maxsz)
+ (2 + 2 + 1 + 2 + 1 + lock_owner_id_maxsz)
#define decode_lock_maxsz (op_decode_hdr_maxsz + \
decode_lock_denied_maxsz)
#define encode_lockt_maxsz (op_encode_hdr_maxsz + 5 + \
@@ -617,7 +617,7 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req,
encode_lockowner_maxsz)
#define NFS4_dec_release_lockowner_sz \
(compound_decode_hdr_maxsz + \
- decode_lockowner_maxsz)
+ decode_release_lockowner_maxsz)
#define NFS4_enc_access_sz (compound_encode_hdr_maxsz + \
encode_sequence_maxsz + \
encode_putfh_maxsz + \
@@ -1412,7 +1412,7 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena
__be32 *p;
/*
* opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
- * owner 4 = 32
+ * owner 28
*/
encode_nfs4_seqid(xdr, arg->seqid);
encode_share_access(xdr, arg->share_access);
@@ -5077,7 +5077,7 @@ static int decode_link(struct xdr_stream *xdr, struct nfs4_change_info *cinfo)
/*
* We create the owner, so we know a proper owner.id length is 4.
*/
-static int decode_lock_denied (struct xdr_stream *xdr, struct file_lock *fl)
+static int decode_lock_denied(struct xdr_stream *xdr, struct file_lock *fl)
{
uint64_t offset, length, clientid;
__be32 *p;
@@ -7702,6 +7702,7 @@ const struct rpc_procinfo nfs4_procedures[] = {
PROC42(CLONE, enc_clone, dec_clone),
PROC42(COPY, enc_copy, dec_copy),
PROC42(OFFLOAD_CANCEL, enc_offload_cancel, dec_offload_cancel),
+ PROC42(OFFLOAD_STATUS, enc_offload_status, dec_offload_status),
PROC42(COPY_NOTIFY, enc_copy_notify, dec_copy_notify),
PROC(LOOKUPP, enc_lookupp, dec_lookupp),
PROC42(LAYOUTERROR, enc_layouterror, dec_layouterror),
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index aeb715b4a690..9eea9e62afc9 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -454,8 +454,12 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
{ NFS_MOUNT_NONLM, ",nolock", "" },
{ NFS_MOUNT_NOACL, ",noacl", "" },
{ NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" },
+ { NFS_MOUNT_FORCE_RDIRPLUS, ",rdirplus=force", "" },
{ NFS_MOUNT_UNSHARED, ",nosharecache", "" },
{ NFS_MOUNT_NORESVPORT, ",noresvport", "" },
+ { NFS_MOUNT_NETUNREACH_FATAL,
+ ",fatal_neterrors=ENETDOWN:ENETUNREACH",
+ ",fatal_neterrors=none" },
{ 0, NULL, NULL }
};
const struct proc_nfs_info *nfs_infop;
diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
index 7b59a40d40c0..37cb2b776435 100644
--- a/fs/nfs/sysfs.c
+++ b/fs/nfs/sysfs.c
@@ -14,6 +14,7 @@
#include <linux/rcupdate.h>
#include <linux/lockd/lockd.h>
+#include "internal.h"
#include "nfs4_fs.h"
#include "netns.h"
#include "sysfs.h"
@@ -228,6 +229,25 @@ static void shutdown_client(struct rpc_clnt *clnt)
rpc_cancel_tasks(clnt, -EIO, shutdown_match_client, NULL);
}
+/*
+ * Shut down the nfs_client only once all the superblocks
+ * have been shut down.
+ */
+static void shutdown_nfs_client(struct nfs_client *clp)
+{
+ struct nfs_server *server;
+ rcu_read_lock();
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ if (!(server->flags & NFS_MOUNT_SHUTDOWN)) {
+ rcu_read_unlock();
+ return;
+ }
+ }
+ rcu_read_unlock();
+ nfs_mark_client_ready(clp, -EIO);
+ shutdown_client(clp->cl_rpcclient);
+}
+
static ssize_t
shutdown_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
@@ -259,7 +279,6 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
server->flags |= NFS_MOUNT_SHUTDOWN;
shutdown_client(server->client);
- shutdown_client(server->nfs_client->cl_rpcclient);
if (!IS_ERR(server->client_acl))
shutdown_client(server->client_acl);
@@ -267,11 +286,44 @@ shutdown_store(struct kobject *kobj, struct kobj_attribute *attr,
if (server->nlm_host)
shutdown_client(server->nlm_host->h_rpcclnt);
out:
+ shutdown_nfs_client(server->nfs_client);
return count;
}
static struct kobj_attribute nfs_sysfs_attr_shutdown = __ATTR_RW(shutdown);
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+static ssize_t
+implid_domain_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid;
+
+ if (!impl_id || strlen(impl_id->domain) == 0)
+ return 0; //sysfs_emit(buf, "");
+ return sysfs_emit(buf, "%s\n", impl_id->domain);
+}
+
+static struct kobj_attribute nfs_sysfs_attr_implid_domain = __ATTR_RO(implid_domain);
+
+
+static ssize_t
+implid_name_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct nfs_server *server = container_of(kobj, struct nfs_server, kobj);
+ struct nfs41_impl_id *impl_id = server->nfs_client->cl_implid;
+
+ if (!impl_id || strlen(impl_id->name) == 0)
+ return 0; //sysfs_emit(buf, "");
+ return sysfs_emit(buf, "%s\n", impl_id->name);
+}
+
+static struct kobj_attribute nfs_sysfs_attr_implid_name = __ATTR_RO(implid_name);
+
+#endif /* IS_ENABLED(CONFIG_NFS_V4_1) */
+
#define RPC_CLIENT_NAME_SIZE 64
void nfs_sysfs_link_rpc_client(struct nfs_server *server,
@@ -309,6 +361,32 @@ static struct kobj_type nfs_sb_ktype = {
.child_ns_type = nfs_netns_object_child_ns_type,
};
+#if IS_ENABLED(CONFIG_NFS_V4_1)
+static void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
+{
+ int ret;
+
+ if (!server->nfs_client->cl_implid)
+ return;
+
+ ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_domain.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+
+ ret = sysfs_create_file_ns(&server->kobj, &nfs_sysfs_attr_implid_name.attr,
+ nfs_netns_server_namespace(&server->kobj));
+ if (ret < 0)
+ pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
+ server->s_sysfs_id, ret);
+}
+#else /* CONFIG_NFS_V4_1 */
+static inline void nfs_sysfs_add_nfsv41_server(struct nfs_server *server)
+{
+}
+#endif /* CONFIG_NFS_V4_1 */
+
void nfs_sysfs_add_server(struct nfs_server *server)
{
int ret;
@@ -325,6 +403,8 @@ void nfs_sysfs_add_server(struct nfs_server *server)
if (ret < 0)
pr_warn("NFS: sysfs_create_file_ns for server-%d failed (%d)\n",
server->s_sysfs_id, ret);
+
+ nfs_sysfs_add_nfsv41_server(server);
}
EXPORT_SYMBOL_GPL(nfs_sysfs_add_server);
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index aa3d8bea3ec0..23df8b214474 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -579,8 +579,10 @@ retry:
while (!nfs_lock_request(head)) {
ret = nfs_wait_on_request(head);
- if (ret < 0)
+ if (ret < 0) {
+ nfs_release_request(head);
return ERR_PTR(ret);
+ }
}
/* Ensure that nobody removed the request before we locked it */
diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c
index ea382b75b26c..e2eaac14fd8e 100644
--- a/fs/nfs_common/nfsacl.c
+++ b/fs/nfs_common/nfsacl.c
@@ -42,7 +42,7 @@ struct nfsacl_encode_desc {
};
struct nfsacl_simple_acl {
- struct posix_acl acl;
+ struct posix_acl_hdr acl;
struct posix_acl_entry ace[4];
};
@@ -112,7 +112,8 @@ int nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
xdr_encode_word(buf, base, entries))
return -EINVAL;
if (encode_entries && acl && acl->a_count == 3) {
- struct posix_acl *acl2 = &aclbuf.acl;
+ struct posix_acl *acl2 =
+ container_of(&aclbuf.acl, struct posix_acl, hdr);
/* Avoid the use of posix_acl_alloc(). nfsacl_encode() is
* invoked in contexts where a memory allocation failure is
@@ -177,7 +178,8 @@ bool nfs_stream_encode_acl(struct xdr_stream *xdr, struct inode *inode,
return false;
if (encode_entries && acl && acl->a_count == 3) {
- struct posix_acl *acl2 = &aclbuf.acl;
+ struct posix_acl *acl2 =
+ container_of(&aclbuf.acl, struct posix_acl, hdr);
/* Avoid the use of posix_acl_alloc(). nfsacl_encode() is
* invoked in contexts where a memory allocation failure is
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index c0bd1509ccd4..792d3fed1b45 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -172,6 +172,16 @@ config NFSD_LEGACY_CLIENT_TRACKING
recoverydir, or spawn a process directly using a usermodehelper
upcall.
- These legacy client tracking methods have proven to be probelmatic
+ These legacy client tracking methods have proven to be problematic
and will be removed in the future. Say Y here if you need support
for them in the interim.
+
+config NFSD_V4_DELEG_TIMESTAMPS
+ bool "Support delegated timestamps"
+ depends on NFSD_V4
+ default n
+ help
+ NFSD implements delegated timestamps according to
+ draft-ietf-nfsv4-delstid-08 "Extending the Opening of Files". This
+ is currently an experimental feature and is therefore left disabled
+ by default.
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index fb9b1656a287..ab85e6a2454f 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -319,15 +319,14 @@ nfsd_file_check_writeback(struct nfsd_file *nf)
mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
}
-
-static bool nfsd_file_lru_add(struct nfsd_file *nf)
+static void nfsd_file_lru_add(struct nfsd_file *nf)
{
- set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
- if (list_lru_add_obj(&nfsd_file_lru, &nf->nf_lru)) {
+ refcount_inc(&nf->nf_ref);
+ if (list_lru_add_obj(&nfsd_file_lru, &nf->nf_lru))
trace_nfsd_file_lru_add(nf);
- return true;
- }
- return false;
+ else
+ WARN_ON(1);
+ nfsd_file_schedule_laundrette();
}
static bool nfsd_file_lru_remove(struct nfsd_file *nf)
@@ -363,30 +362,10 @@ nfsd_file_put(struct nfsd_file *nf)
if (test_bit(NFSD_FILE_GC, &nf->nf_flags) &&
test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
- /*
- * If this is the last reference (nf_ref == 1), then try to
- * transfer it to the LRU.
- */
- if (refcount_dec_not_one(&nf->nf_ref))
- return;
-
- /* Try to add it to the LRU. If that fails, decrement. */
- if (nfsd_file_lru_add(nf)) {
- /* If it's still hashed, we're done */
- if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
- nfsd_file_schedule_laundrette();
- return;
- }
-
- /*
- * We're racing with unhashing, so try to remove it from
- * the LRU. If removal fails, then someone else already
- * has our reference.
- */
- if (!nfsd_file_lru_remove(nf))
- return;
- }
+ set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
+ set_bit(NFSD_FILE_RECENT, &nf->nf_flags);
}
+
if (refcount_dec_and_test(&nf->nf_ref))
nfsd_file_free(nf);
}
@@ -530,13 +509,12 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
}
/*
- * Put the reference held on behalf of the LRU. If it wasn't the last
- * one, then just remove it from the LRU and ignore it.
+ * Put the reference held on behalf of the LRU if it is the last
+ * reference, else rotate.
*/
- if (!refcount_dec_and_test(&nf->nf_ref)) {
+ if (!refcount_dec_if_one(&nf->nf_ref)) {
trace_nfsd_file_gc_in_use(nf);
- list_lru_isolate(lru, &nf->nf_lru);
- return LRU_REMOVED;
+ return LRU_ROTATE;
}
/* Refcount went to zero. Unhash it and queue it to the dispose list */
@@ -548,14 +526,54 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
return LRU_REMOVED;
}
+static enum lru_status
+nfsd_file_gc_cb(struct list_head *item, struct list_lru_one *lru,
+ void *arg)
+{
+ struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
+
+ if (test_and_clear_bit(NFSD_FILE_RECENT, &nf->nf_flags)) {
+ /*
+ * "REFERENCED" really means "should be at the end of the
+ * LRU. As we are putting it there we can clear the flag.
+ */
+ clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
+ trace_nfsd_file_gc_aged(nf);
+ return LRU_ROTATE;
+ }
+ return nfsd_file_lru_cb(item, lru, arg);
+}
+
+/* If the shrinker runs between calls to list_lru_walk_node() in
+ * nfsd_file_gc(), the "remaining" count will be wrong. This could
+ * result in premature freeing of some files. This may not matter much
+ * but is easy to fix with this spinlock which temporarily disables
+ * the shrinker.
+ */
+static DEFINE_SPINLOCK(nfsd_gc_lock);
static void
nfsd_file_gc(void)
{
+ unsigned long ret = 0;
LIST_HEAD(dispose);
- unsigned long ret;
+ int nid;
+
+ spin_lock(&nfsd_gc_lock);
+ for_each_node_state(nid, N_NORMAL_MEMORY) {
+ unsigned long remaining = list_lru_count_node(&nfsd_file_lru, nid);
+
+ while (remaining > 0) {
+ unsigned long nr = min(remaining, NFSD_FILE_GC_BATCH);
- ret = list_lru_walk(&nfsd_file_lru, nfsd_file_lru_cb,
- &dispose, list_lru_count(&nfsd_file_lru));
+ remaining -= nr;
+ ret += list_lru_walk_node(&nfsd_file_lru, nid, nfsd_file_gc_cb,
+ &dispose, &nr);
+ if (nr)
+ /* walk aborted early */
+ remaining = 0;
+ }
+ }
+ spin_unlock(&nfsd_gc_lock);
trace_nfsd_file_gc_removed(ret, list_lru_count(&nfsd_file_lru));
nfsd_file_dispose_list_delayed(&dispose);
}
@@ -563,9 +581,9 @@ nfsd_file_gc(void)
static void
nfsd_file_gc_worker(struct work_struct *work)
{
- nfsd_file_gc();
if (list_lru_count(&nfsd_file_lru))
- nfsd_file_schedule_laundrette();
+ nfsd_file_gc();
+ nfsd_file_schedule_laundrette();
}
static unsigned long
@@ -580,8 +598,12 @@ nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
LIST_HEAD(dispose);
unsigned long ret;
+ if (!spin_trylock(&nfsd_gc_lock))
+ return SHRINK_STOP;
+
ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
nfsd_file_lru_cb, &dispose);
+ spin_unlock(&nfsd_gc_lock);
trace_nfsd_file_shrinker_removed(ret, list_lru_count(&nfsd_file_lru));
nfsd_file_dispose_list_delayed(&dispose);
return ret;
@@ -686,17 +708,12 @@ nfsd_file_close_inode(struct inode *inode)
void
nfsd_file_close_inode_sync(struct inode *inode)
{
- struct nfsd_file *nf;
LIST_HEAD(dispose);
trace_nfsd_file_close(inode);
nfsd_file_queue_for_close(inode, &dispose);
- while (!list_empty(&dispose)) {
- nf = list_first_entry(&dispose, struct nfsd_file, nf_gc);
- list_del_init(&nf->nf_gc);
- nfsd_file_free(nf);
- }
+ nfsd_file_dispose_list(&dispose);
}
static int
@@ -1058,16 +1075,8 @@ retry:
nf = nfsd_file_lookup_locked(net, current_cred(), inode, need, want_gc);
rcu_read_unlock();
- if (nf) {
- /*
- * If the nf is on the LRU then it holds an extra reference
- * that must be put if it's removed. It had better not be
- * the last one however, since we should hold another.
- */
- if (nfsd_file_lru_remove(nf))
- refcount_dec(&nf->nf_ref);
+ if (nf)
goto wait_for_construction;
- }
new = nfsd_file_alloc(net, inode, need, want_gc);
if (!new) {
@@ -1161,6 +1170,9 @@ open_file:
*/
if (status != nfs_ok || inode->i_nlink == 0)
nfsd_file_unhash(nf);
+ else if (want_gc)
+ nfsd_file_lru_add(nf);
+
clear_and_wake_up_bit(NFSD_FILE_PENDING, &nf->nf_flags);
if (status == nfs_ok)
goto out;
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
index d5db6b34ba30..5865f9c72712 100644
--- a/fs/nfsd/filecache.h
+++ b/fs/nfsd/filecache.h
@@ -4,6 +4,12 @@
#include <linux/fsnotify_backend.h>
/*
+ * Limit the time that the list_lru_one lock is held during
+ * an LRU scan.
+ */
+#define NFSD_FILE_GC_BATCH (16UL)
+
+/*
* This is the fsnotify_mark container that nfsd attaches to the files that it
* is holding open. Note that we have a separate refcount here aside from the
* one in the fsnotify_mark. We only want a single fsnotify_mark attached to
@@ -38,6 +44,7 @@ struct nfsd_file {
#define NFSD_FILE_PENDING (1)
#define NFSD_FILE_REFERENCED (2)
#define NFSD_FILE_GC (3)
+#define NFSD_FILE_RECENT (4)
unsigned long nf_flags;
refcount_t nf_ref;
unsigned char nf_may;
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 484077200c5d..ec6539cec0fe 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -46,8 +46,6 @@
#define NFSDDBG_FACILITY NFSDDBG_PROC
-static void nfsd4_mark_cb_fault(struct nfs4_client *clp);
-
#define NFSPROC4_CB_NULL 0
#define NFSPROC4_CB_COMPOUND 1
@@ -101,15 +99,15 @@ static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap,
if (bitmap[0] & FATTR4_WORD0_CHANGE)
if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_change) < 0)
- return -NFSERR_BAD_XDR;
+ return -EIO;
if (bitmap[0] & FATTR4_WORD0_SIZE)
if (xdr_stream_decode_u64(xdr, &fattr->ncf_cb_fsize) < 0)
- return -NFSERR_BAD_XDR;
+ return -EIO;
if (bitmap[2] & FATTR4_WORD2_TIME_DELEG_ACCESS) {
fattr4_time_deleg_access access;
if (!xdrgen_decode_fattr4_time_deleg_access(xdr, &access))
- return -NFSERR_BAD_XDR;
+ return -EIO;
fattr->ncf_cb_atime.tv_sec = access.seconds;
fattr->ncf_cb_atime.tv_nsec = access.nseconds;
@@ -118,7 +116,7 @@ static int decode_cb_fattr4(struct xdr_stream *xdr, uint32_t *bitmap,
fattr4_time_deleg_modify modify;
if (!xdrgen_decode_fattr4_time_deleg_modify(xdr, &modify))
- return -NFSERR_BAD_XDR;
+ return -EIO;
fattr->ncf_cb_mtime.tv_sec = modify.seconds;
fattr->ncf_cb_mtime.tv_nsec = modify.nseconds;
@@ -682,15 +680,15 @@ static int nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp,
if (unlikely(status || cb->cb_status))
return status;
if (xdr_stream_decode_uint32_array(xdr, bitmap, 3) < 0)
- return -NFSERR_BAD_XDR;
+ return -EIO;
if (xdr_stream_decode_u32(xdr, &attrlen) < 0)
- return -NFSERR_BAD_XDR;
+ return -EIO;
maxlen = sizeof(ncf->ncf_cb_change) + sizeof(ncf->ncf_cb_fsize);
if (bitmap[2] != 0)
maxlen += (sizeof(ncf->ncf_cb_mtime.tv_sec) +
sizeof(ncf->ncf_cb_mtime.tv_nsec)) * 2;
if (attrlen > maxlen)
- return -NFSERR_BAD_XDR;
+ return -EIO;
status = decode_cb_fattr4(xdr, bitmap, ncf);
return status;
}
@@ -1064,6 +1062,17 @@ static bool nfsd4_queue_cb(struct nfsd4_callback *cb)
return queue_work(clp->cl_callback_wq, &cb->cb_work);
}
+static void nfsd4_requeue_cb(struct rpc_task *task, struct nfsd4_callback *cb)
+{
+ struct nfs4_client *clp = cb->cb_clp;
+
+ if (!test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) {
+ trace_nfsd_cb_restart(clp, cb);
+ task->tk_status = 0;
+ set_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags);
+ }
+}
+
static void nfsd41_cb_inflight_begin(struct nfs4_client *clp)
{
atomic_inc(&clp->cl_cb_inflight);
@@ -1301,6 +1310,11 @@ static void nfsd41_destroy_cb(struct nfsd4_callback *cb)
trace_nfsd_cb_destroy(clp, cb);
nfsd41_cb_release_slot(cb);
+ if (test_bit(NFSD4_CALLBACK_WAKE, &cb->cb_flags))
+ clear_and_wake_up_bit(NFSD4_CALLBACK_RUNNING, &cb->cb_flags);
+ else
+ clear_bit(NFSD4_CALLBACK_RUNNING, &cb->cb_flags);
+
if (cb->cb_ops && cb->cb_ops->release)
cb->cb_ops->release(cb);
nfsd41_cb_inflight_end(clp);
@@ -1328,30 +1342,14 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
rpc_call_start(task);
}
+/* Returns true if CB_COMPOUND processing should continue */
static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback *cb)
{
- struct nfs4_client *clp = cb->cb_clp;
- struct nfsd4_session *session = clp->cl_cb_session;
- bool ret = true;
-
- if (!clp->cl_minorversion) {
- /*
- * If the backchannel connection was shut down while this
- * task was queued, we need to resubmit it after setting up
- * a new backchannel connection.
- *
- * Note that if we lost our callback connection permanently
- * the submission code will error out, so we don't need to
- * handle that case here.
- */
- if (RPC_SIGNALLED(task))
- goto need_restart;
-
- return true;
- }
+ struct nfsd4_session *session = cb->cb_clp->cl_cb_session;
+ bool ret = false;
if (cb->cb_held_slot < 0)
- goto need_restart;
+ goto requeue;
/* This is the operation status code for CB_SEQUENCE */
trace_nfsd_cb_seq_status(task, cb);
@@ -1365,11 +1363,16 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
* (sequence ID, cached reply) MUST NOT change.
*/
++session->se_cb_seq_nr[cb->cb_held_slot];
+ ret = true;
break;
case -ESERVERFAULT:
- ++session->se_cb_seq_nr[cb->cb_held_slot];
+ /*
+ * Call succeeded, but the session, slot index, or slot
+ * sequence number in the response do not match the same
+ * in the server's call. The sequence information is thus
+ * untrustworthy.
+ */
nfsd4_mark_cb_fault(cb->cb_clp);
- ret = false;
break;
case 1:
/*
@@ -1381,43 +1384,42 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback
fallthrough;
case -NFS4ERR_BADSESSION:
nfsd4_mark_cb_fault(cb->cb_clp);
- ret = false;
- goto need_restart;
+ goto requeue;
case -NFS4ERR_DELAY:
cb->cb_seq_status = 1;
- if (!rpc_restart_call(task))
- goto out;
-
+ if (RPC_SIGNALLED(task) || !rpc_restart_call(task))
+ goto requeue;
rpc_delay(task, 2 * HZ);
return false;
+ case -NFS4ERR_SEQ_MISORDERED:
case -NFS4ERR_BADSLOT:
+ /*
+ * A SEQ_MISORDERED or BADSLOT error means that the client and
+ * server are out of sync as to the backchannel parameters. Mark
+ * the backchannel faulty and restart the RPC, but leak the slot
+ * so that it's no longer used.
+ */
+ nfsd4_mark_cb_fault(cb->cb_clp);
+ cb->cb_held_slot = -1;
goto retry_nowait;
- case -NFS4ERR_SEQ_MISORDERED:
- if (session->se_cb_seq_nr[cb->cb_held_slot] != 1) {
- session->se_cb_seq_nr[cb->cb_held_slot] = 1;
- goto retry_nowait;
- }
- break;
default:
nfsd4_mark_cb_fault(cb->cb_clp);
}
trace_nfsd_cb_free_slot(task, cb);
nfsd41_cb_release_slot(cb);
-
- if (RPC_SIGNALLED(task))
- goto need_restart;
-out:
return ret;
retry_nowait:
- if (rpc_restart_call_prepare(task))
- ret = false;
- goto out;
-need_restart:
- if (!test_bit(NFSD4_CLIENT_CB_KILL, &clp->cl_flags)) {
- trace_nfsd_cb_restart(clp, cb);
- task->tk_status = 0;
- cb->cb_need_restart = true;
+ /*
+ * RPC_SIGNALLED() means that the rpc_client is being torn down and
+ * (possibly) recreated. Requeue the call in that case.
+ */
+ if (!RPC_SIGNALLED(task)) {
+ if (rpc_restart_call_prepare(task))
+ return false;
}
+requeue:
+ nfsd41_cb_release_slot(cb);
+ nfsd4_requeue_cb(task, cb);
return false;
}
@@ -1428,8 +1430,21 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
trace_nfsd_cb_rpc_done(clp);
- if (!nfsd4_cb_sequence_done(task, cb))
+ if (!clp->cl_minorversion) {
+ /*
+ * If the backchannel connection was shut down while this
+ * task was queued, we need to resubmit it after setting up
+ * a new backchannel connection.
+ *
+ * Note that if we lost our callback connection permanently
+ * the submission code will error out, so we don't need to
+ * handle that case here.
+ */
+ if (RPC_SIGNALLED(task))
+ nfsd4_requeue_cb(task, cb);
+ } else if (!nfsd4_cb_sequence_done(task, cb)) {
return;
+ }
if (cb->cb_status) {
WARN_ONCE(task->tk_status,
@@ -1462,7 +1477,7 @@ static void nfsd4_cb_release(void *calldata)
trace_nfsd_cb_rpc_release(cb->cb_clp);
- if (cb->cb_need_restart)
+ if (test_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags))
nfsd4_queue_cb(cb);
else
nfsd41_destroy_cb(cb);
@@ -1575,7 +1590,7 @@ nfsd4_run_cb_work(struct work_struct *work)
container_of(work, struct nfsd4_callback, cb_work);
struct nfs4_client *clp = cb->cb_clp;
struct rpc_clnt *clnt;
- int flags;
+ int flags, ret;
trace_nfsd_cb_start(clp);
@@ -1601,16 +1616,19 @@ nfsd4_run_cb_work(struct work_struct *work)
return;
}
- if (cb->cb_need_restart) {
- cb->cb_need_restart = false;
- } else {
+ if (!test_and_clear_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags)) {
if (cb->cb_ops && cb->cb_ops->prepare)
cb->cb_ops->prepare(cb);
}
+
cb->cb_msg.rpc_cred = clp->cl_cb_cred;
flags = clp->cl_minorversion ? RPC_TASK_NOCONNECT : RPC_TASK_SOFTCONN;
- rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
- cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
+ ret = rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | flags,
+ cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
+ if (ret != 0) {
+ set_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags);
+ nfsd4_queue_cb(cb);
+ }
}
void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
@@ -1620,10 +1638,10 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
cb->cb_msg.rpc_proc = &nfs4_cb_procedures[op];
cb->cb_msg.rpc_argp = cb;
cb->cb_msg.rpc_resp = cb;
+ cb->cb_flags = 0;
cb->cb_ops = ops;
INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
cb->cb_status = 0;
- cb->cb_need_restart = false;
cb->cb_held_slot = -1;
}
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index fbfddd3c4c94..290271ac4245 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -344,9 +344,10 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
- refcount_inc(&ls->ls_stid.sc_count);
- nfsd4_run_cb(&ls->ls_recall);
-
+ if (!test_and_set_bit(NFSD4_CALLBACK_RUNNING, &ls->ls_recall.cb_flags)) {
+ refcount_inc(&ls->ls_stid.sc_count);
+ nfsd4_run_cb(&ls->ls_recall);
+ }
out_unlock:
spin_unlock(&ls->ls_lock);
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index f6e06c779d09..b397246dae7b 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1847,7 +1847,7 @@ static void nfsd4_send_cb_offload(struct nfsd4_copy *copy)
NFSPROC4_CLNT_CB_OFFLOAD);
trace_nfsd_cb_offload(copy->cp_clp, &cbo->co_res.cb_stateid,
&cbo->co_fh, copy->cp_count, copy->nfserr);
- nfsd4_run_cb(&cbo->co_cb);
+ nfsd4_try_run_cb(&cbo->co_cb);
}
/**
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 153eeea2c7c9..2041268b398a 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -946,15 +946,6 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *sla
spin_lock_init(&stid->sc_lock);
INIT_LIST_HEAD(&stid->sc_cp_list);
- /*
- * It shouldn't be a problem to reuse an opaque stateid value.
- * I don't think it is for 4.1. But with 4.0 I worry that, for
- * example, a stray write retransmission could be accepted by
- * the server when it should have been rejected. Therefore,
- * adopt a trick from the sctp code to attempt to maximize the
- * amount of time until an id is reused, by ensuring they always
- * "increase" (mod INT_MAX):
- */
return stid;
out_free:
kmem_cache_free(slab, stid);
@@ -1050,6 +1041,12 @@ static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
return openlockstateid(stid);
}
+/*
+ * As the sc_free callback of deleg, this may be called by nfs4_put_stid
+ * in nfsd_break_one_deleg.
+ * Considering nfsd_break_one_deleg is called with the flc->flc_lock held,
+ * this function mustn't ever sleep.
+ */
static void nfs4_free_deleg(struct nfs4_stid *stid)
{
struct nfs4_delegation *dp = delegstateid(stid);
@@ -1378,7 +1375,8 @@ static void revoke_delegation(struct nfs4_delegation *dp)
struct nfs4_client *clp = dp->dl_stid.sc_client;
WARN_ON(!list_empty(&dp->dl_recall_lru));
- WARN_ON_ONCE(!(dp->dl_stid.sc_status &
+ WARN_ON_ONCE(dp->dl_stid.sc_client->cl_minorversion > 0 &&
+ !(dp->dl_stid.sc_status &
(SC_STATUS_REVOKED | SC_STATUS_ADMIN_REVOKED)));
trace_nfsd_stid_revoke(&dp->dl_stid);
@@ -3168,7 +3166,6 @@ nfsd4_cb_recall_any_release(struct nfsd4_callback *cb)
{
struct nfs4_client *clp = cb->cb_clp;
- clear_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
drop_client(clp);
}
@@ -3199,7 +3196,6 @@ nfsd4_cb_getattr_release(struct nfsd4_callback *cb)
struct nfs4_delegation *dp =
container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
- clear_and_wake_up_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags);
nfs4_put_stid(&dp->dl_stid);
}
@@ -3220,11 +3216,15 @@ static void nfs4_cb_getattr(struct nfs4_cb_fattr *ncf)
struct nfs4_delegation *dp =
container_of(ncf, struct nfs4_delegation, dl_cb_fattr);
- if (test_and_set_bit(CB_GETATTR_BUSY, &ncf->ncf_cb_flags))
+ if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &ncf->ncf_getattr.cb_flags))
return;
+
/* set to proper status when nfsd4_cb_getattr_done runs */
ncf->ncf_cb_status = NFS4ERR_IO;
+ /* ensure that wake_bit is done when RUNNING is cleared */
+ set_bit(NFSD4_CALLBACK_WAKE, &ncf->ncf_getattr.cb_flags);
+
refcount_inc(&dp->dl_stid.sc_count);
nfsd4_run_cb(&ncf->ncf_getattr);
}
@@ -4815,8 +4815,8 @@ out:
static unsigned long
nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
{
- int count;
struct nfsd_net *nn = shrink->private_data;
+ long count;
count = atomic_read(&nn->nfsd_courtesy_clients);
if (!count)
@@ -5414,6 +5414,11 @@ static const struct nfsd4_callback_ops nfsd4_cb_recall_ops = {
static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
{
+ bool queued;
+
+ if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &dp->dl_recall.cb_flags))
+ return;
+
/*
* We're assuming the state code never drops its reference
* without first removing the lease. Since we're in this lease
@@ -5422,7 +5427,10 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
* we know it's safe to take a reference.
*/
refcount_inc(&dp->dl_stid.sc_count);
- WARN_ON_ONCE(!nfsd4_run_cb(&dp->dl_recall));
+ queued = nfsd4_run_cb(&dp->dl_recall);
+ WARN_ON_ONCE(!queued);
+ if (!queued)
+ nfs4_put_stid(&dp->dl_stid);
}
/* Called from break_lease() with flc_lock held. */
@@ -5948,11 +5956,23 @@ nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
return 0;
}
+#ifdef CONFIG_NFSD_V4_DELEG_TIMESTAMPS
+static bool nfsd4_want_deleg_timestamps(const struct nfsd4_open *open)
+{
+ return open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS;
+}
+#else /* CONFIG_NFSD_V4_DELEG_TIMESTAMPS */
+static bool nfsd4_want_deleg_timestamps(const struct nfsd4_open *open)
+{
+ return false;
+}
+#endif /* CONFIG NFSD_V4_DELEG_TIMESTAMPS */
+
static struct nfs4_delegation *
nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
struct svc_fh *parent)
{
- bool deleg_ts = open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS;
+ bool deleg_ts = nfsd4_want_deleg_timestamps(open);
struct nfs4_client *clp = stp->st_stid.sc_client;
struct nfs4_file *fp = stp->st_stid.sc_file;
struct nfs4_clnt_odstate *odstate = stp->st_clnt_odstate;
@@ -5999,6 +6019,15 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
if (!nf)
return ERR_PTR(-EAGAIN);
+ /*
+ * File delegations and associated locks cannot be recovered if the
+ * export is from an NFS proxy server.
+ */
+ if (exportfs_cannot_lock(nf->nf_file->f_path.mnt->mnt_sb->s_export_op)) {
+ nfsd_file_put(nf);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
if (nfs4_delegation_exists(clp, fp))
@@ -6151,8 +6180,8 @@ static void
nfs4_open_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
struct svc_fh *currentfh)
{
- bool deleg_ts = open->op_deleg_want & OPEN4_SHARE_ACCESS_WANT_DELEG_TIMESTAMPS;
struct nfs4_openowner *oo = openowner(stp->st_stateowner);
+ bool deleg_ts = nfsd4_want_deleg_timestamps(open);
struct nfs4_client *clp = stp->st_stid.sc_client;
struct svc_fh *parent = NULL;
struct nfs4_delegation *dp;
@@ -6855,38 +6884,34 @@ deleg_reaper(struct nfsd_net *nn)
{
struct list_head *pos, *next;
struct nfs4_client *clp;
- LIST_HEAD(cblist);
spin_lock(&nn->client_lock);
list_for_each_safe(pos, next, &nn->client_lru) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
- if (clp->cl_state != NFSD4_ACTIVE ||
- list_empty(&clp->cl_delegations) ||
- atomic_read(&clp->cl_delegs_in_recall) ||
- test_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags) ||
- (ktime_get_boottime_seconds() -
- clp->cl_ra_time < 5)) {
+
+ if (clp->cl_state != NFSD4_ACTIVE)
+ continue;
+ if (list_empty(&clp->cl_delegations))
+ continue;
+ if (atomic_read(&clp->cl_delegs_in_recall))
+ continue;
+ if (test_and_set_bit(NFSD4_CALLBACK_RUNNING, &clp->cl_ra->ra_cb.cb_flags))
+ continue;
+ if (ktime_get_boottime_seconds() - clp->cl_ra_time < 5)
+ continue;
+ if (clp->cl_cb_state != NFSD4_CB_UP)
continue;
- }
- list_add(&clp->cl_ra_cblist, &cblist);
/* release in nfsd4_cb_recall_any_release */
kref_get(&clp->cl_nfsdfs.cl_ref);
- set_bit(NFSD4_CLIENT_CB_RECALL_ANY, &clp->cl_flags);
clp->cl_ra_time = ktime_get_boottime_seconds();
- }
- spin_unlock(&nn->client_lock);
-
- while (!list_empty(&cblist)) {
- clp = list_first_entry(&cblist, struct nfs4_client,
- cl_ra_cblist);
- list_del_init(&clp->cl_ra_cblist);
clp->cl_ra->ra_keep = 0;
clp->cl_ra->ra_bmval[0] = BIT(RCA4_TYPE_MASK_RDATA_DLG) |
BIT(RCA4_TYPE_MASK_WDATA_DLG);
trace_nfsd_cb_recall_any(clp->cl_ra);
nfsd4_run_cb(&clp->cl_ra->ra_cb);
}
+ spin_unlock(&nn->client_lock);
}
static void
@@ -7051,7 +7076,7 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
*/
statusmask |= SC_STATUS_REVOKED;
- statusmask |= SC_STATUS_ADMIN_REVOKED;
+ statusmask |= SC_STATUS_ADMIN_REVOKED | SC_STATUS_FREEABLE;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
@@ -7706,9 +7731,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
return status;
- status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG,
- SC_STATUS_REVOKED | SC_STATUS_FREEABLE,
- &s, nn);
+ status = nfsd4_lookup_stateid(cstate, stateid, SC_TYPE_DELEG, SC_STATUS_REVOKED, &s, nn);
if (status)
goto out;
dp = delegstateid(s);
@@ -7816,7 +7839,7 @@ nfsd4_lm_notify(struct file_lock *fl)
if (queue) {
trace_nfsd_cb_notify_lock(lo, nbl);
- nfsd4_run_cb(&nbl->nbl_cb);
+ nfsd4_try_run_cb(&nbl->nbl_cb);
}
}
@@ -8134,6 +8157,10 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
if (status != nfs_ok)
return status;
+ if (exportfs_cannot_lock(cstate->current_fh.fh_dentry->d_sb->s_export_op)) {
+ status = nfserr_notsupp;
+ goto out;
+ }
if (lock->lk_is_new) {
if (nfsd4_has_session(cstate))
@@ -8473,6 +8500,11 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfserr_lock_range;
goto put_stateid;
}
+ if (exportfs_cannot_lock(nf->nf_file->f_path.mnt->mnt_sb->s_export_op)) {
+ status = nfserr_notsupp;
+ goto put_file;
+ }
+
file_lock = locks_alloc_lock();
if (!file_lock) {
dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
@@ -9182,8 +9214,8 @@ nfsd4_deleg_getattr_conflict(struct svc_rqst *rqstp, struct dentry *dentry,
nfs4_cb_getattr(&dp->dl_cb_fattr);
spin_unlock(&ctx->flc_lock);
- wait_on_bit_timeout(&ncf->ncf_cb_flags, CB_GETATTR_BUSY,
- TASK_INTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT);
+ wait_on_bit_timeout(&ncf->ncf_getattr.cb_flags, NFSD4_CALLBACK_RUNNING,
+ TASK_UNINTERRUPTIBLE, NFSD_CB_GETATTR_TIMEOUT);
if (ncf->ncf_cb_status) {
/* Recall delegation only if client didn't respond */
status = nfserrno(nfsd_open_break_lease(inode, NFSD_MAY_READ));
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index ce2a71e4904c..ac265d6fde35 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1917,6 +1917,7 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
struct svc_serv *serv;
LIST_HEAD(permsocks);
struct nfsd_net *nn;
+ bool delete = false;
int err, rem;
mutex_lock(&nfsd_mutex);
@@ -1977,34 +1978,28 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
}
}
- /* For now, no removing old sockets while server is running */
- if (serv->sv_nrthreads && !list_empty(&permsocks)) {
+ /*
+ * If there are listener transports remaining on the permsocks list,
+ * it means we were asked to remove a listener.
+ */
+ if (!list_empty(&permsocks)) {
list_splice_init(&permsocks, &serv->sv_permsocks);
- spin_unlock_bh(&serv->sv_lock);
- err = -EBUSY;
- goto out_unlock_mtx;
+ delete = true;
}
+ spin_unlock_bh(&serv->sv_lock);
- /* Close the remaining sockets on the permsocks list */
- while (!list_empty(&permsocks)) {
- xprt = list_first_entry(&permsocks, struct svc_xprt, xpt_list);
- list_move(&xprt->xpt_list, &serv->sv_permsocks);
-
- /*
- * Newly-created sockets are born with the BUSY bit set. Clear
- * it if there are no threads, since nothing can pick it up
- * in that case.
- */
- if (!serv->sv_nrthreads)
- clear_bit(XPT_BUSY, &xprt->xpt_flags);
-
- set_bit(XPT_CLOSE, &xprt->xpt_flags);
- spin_unlock_bh(&serv->sv_lock);
- svc_xprt_close(xprt);
- spin_lock_bh(&serv->sv_lock);
+ /* Do not remove listeners while there are active threads. */
+ if (serv->sv_nrthreads) {
+ err = -EBUSY;
+ goto out_unlock_mtx;
}
- spin_unlock_bh(&serv->sv_lock);
+ /*
+ * Since we can't delete an arbitrary llist entry, destroy the
+ * remaining listeners and recreate the list.
+ */
+ if (delete)
+ svc_xprt_destroy_all(serv, net);
/* walk list of addrs again, open any that still don't exist */
nlmsg_for_each_attr(attr, info->nlhdr, GENL_HDRLEN, rem) {
@@ -2031,6 +2026,9 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
xprt = svc_find_listener(serv, xcl_name, net, sa);
if (xprt) {
+ if (delete)
+ WARN_ONCE(1, "Transport type=%s already exists\n",
+ xcl_name);
svc_xprt_put(xprt);
continue;
}
@@ -2204,8 +2202,14 @@ static __net_init int nfsd_net_init(struct net *net)
NFSD_STATS_COUNTERS_NUM);
if (retval)
goto out_repcache_error;
+
memset(&nn->nfsd_svcstats, 0, sizeof(nn->nfsd_svcstats));
nn->nfsd_svcstats.program = &nfsd_programs[0];
+ if (!nfsd_proc_stat_init(net)) {
+ retval = -ENOMEM;
+ goto out_proc_error;
+ }
+
for (i = 0; i < sizeof(nn->nfsd_versions); i++)
nn->nfsd_versions[i] = nfsd_support_version(i);
for (i = 0; i < sizeof(nn->nfsd4_minorversions); i++)
@@ -2215,13 +2219,14 @@ static __net_init int nfsd_net_init(struct net *net)
nfsd4_init_leases_net(nn);
get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
seqlock_init(&nn->writeverf_lock);
- nfsd_proc_stat_init(net);
#if IS_ENABLED(CONFIG_NFS_LOCALIO)
spin_lock_init(&nn->local_clients_lock);
INIT_LIST_HEAD(&nn->local_clients);
#endif
return 0;
+out_proc_error:
+ percpu_counter_destroy_many(nn->counter, NFSD_STATS_COUNTERS_NUM);
out_repcache_error:
nfsd_idmap_shutdown(net);
out_idmap_error:
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 74d2d7b42676..290e29dd43eb 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -67,12 +67,15 @@ typedef struct {
struct nfsd4_callback {
struct nfs4_client *cb_clp;
struct rpc_message cb_msg;
+#define NFSD4_CALLBACK_RUNNING (0)
+#define NFSD4_CALLBACK_WAKE (1)
+#define NFSD4_CALLBACK_REQUEUE (2)
+ unsigned long cb_flags;
const struct nfsd4_callback_ops *cb_ops;
struct work_struct cb_work;
int cb_seq_status;
int cb_status;
int cb_held_slot;
- bool cb_need_restart;
};
struct nfsd4_callback_ops {
@@ -162,15 +165,11 @@ struct nfs4_cb_fattr {
struct timespec64 ncf_cb_mtime;
struct timespec64 ncf_cb_atime;
- unsigned long ncf_cb_flags;
bool ncf_file_modified;
u64 ncf_initial_cinfo;
u64 ncf_cur_fsize;
};
-/* bits for ncf_cb_flags */
-#define CB_GETATTR_BUSY 0
-
/*
* Represents a delegation stateid. The nfs4_client holds references to these
* and they are put when it is being destroyed or when the delegation is
@@ -198,8 +197,8 @@ struct nfs4_delegation {
struct list_head dl_perclnt;
struct list_head dl_recall_lru; /* delegation recalled */
struct nfs4_clnt_odstate *dl_clnt_odstate;
- u32 dl_type;
time64_t dl_time;
+ u32 dl_type;
/* For recall: */
int dl_retries;
struct nfsd4_callback dl_recall;
@@ -452,7 +451,6 @@ struct nfs4_client {
#define NFSD4_CLIENT_UPCALL_LOCK (5) /* upcall serialization */
#define NFSD4_CLIENT_CB_FLAG_MASK (1 << NFSD4_CLIENT_CB_UPDATE | \
1 << NFSD4_CLIENT_CB_KILL)
-#define NFSD4_CLIENT_CB_RECALL_ANY (6)
unsigned long cl_flags;
struct workqueue_struct *cl_callback_wq;
@@ -498,7 +496,6 @@ struct nfs4_client {
struct nfsd4_cb_recall_any *cl_ra;
time64_t cl_ra_time;
- struct list_head cl_ra_cblist;
};
/* struct nfs4_client_reset
@@ -780,6 +777,13 @@ extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *
extern void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
const struct nfsd4_callback_ops *ops, enum nfsd4_cb_op op);
extern bool nfsd4_run_cb(struct nfsd4_callback *cb);
+
+static inline void nfsd4_try_run_cb(struct nfsd4_callback *cb)
+{
+ if (!test_and_set_bit(NFSD4_CALLBACK_RUNNING, &cb->cb_flags))
+ WARN_ON_ONCE(!nfsd4_run_cb(cb));
+}
+
extern void nfsd4_shutdown_callback(struct nfs4_client *);
extern void nfsd4_shutdown_copy(struct nfs4_client *clp);
void nfsd4_async_copy_reaper(struct nfsd_net *nn);
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index bb22893f1157..f7eaf95e20fc 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -73,11 +73,11 @@ static int nfsd_show(struct seq_file *seq, void *v)
DEFINE_PROC_SHOW_ATTRIBUTE(nfsd);
-void nfsd_proc_stat_init(struct net *net)
+struct proc_dir_entry *nfsd_proc_stat_init(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
+ return svc_proc_register(net, &nn->nfsd_svcstats, &nfsd_proc_ops);
}
void nfsd_proc_stat_shutdown(struct net *net)
diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
index 04aacb6c36e2..e4efb0e4e56d 100644
--- a/fs/nfsd/stats.h
+++ b/fs/nfsd/stats.h
@@ -10,7 +10,7 @@
#include <uapi/linux/nfsd/stats.h>
#include <linux/percpu_counter.h>
-void nfsd_proc_stat_init(struct net *net);
+struct proc_dir_entry *nfsd_proc_stat_init(struct net *net);
void nfsd_proc_stat_shutdown(struct net *net);
static inline void nfsd_stats_rc_hits_inc(struct nfsd_net *nn)
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index ad2c0c432d08..a7630e9f6577 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -803,6 +803,14 @@ DEFINE_EVENT(nfsd_cs_slot_class, nfsd_##name, \
DEFINE_CS_SLOT_EVENT(slot_seqid_conf);
DEFINE_CS_SLOT_EVENT(slot_seqid_unconf);
+#define show_nfs_slot_flags(val) \
+ __print_flags(val, "|", \
+ { NFSD4_SLOT_INUSE, "INUSE" }, \
+ { NFSD4_SLOT_CACHETHIS, "CACHETHIS" }, \
+ { NFSD4_SLOT_INITIALIZED, "INITIALIZED" }, \
+ { NFSD4_SLOT_CACHED, "CACHED" }, \
+ { NFSD4_SLOT_REUSED, "REUSED" })
+
TRACE_EVENT(nfsd_slot_seqid_sequence,
TP_PROTO(
const struct nfs4_client *clp,
@@ -813,10 +821,11 @@ TRACE_EVENT(nfsd_slot_seqid_sequence,
TP_STRUCT__entry(
__field(u32, seqid)
__field(u32, slot_seqid)
+ __field(u32, slot_index)
+ __field(unsigned long, slot_flags)
__field(u32, cl_boot)
__field(u32, cl_id)
__sockaddr(addr, clp->cl_cb_conn.cb_addrlen)
- __field(bool, in_use)
),
TP_fast_assign(
__entry->cl_boot = clp->cl_clientid.cl_boot;
@@ -825,11 +834,13 @@ TRACE_EVENT(nfsd_slot_seqid_sequence,
clp->cl_cb_conn.cb_addrlen);
__entry->seqid = seq->seqid;
__entry->slot_seqid = slot->sl_seqid;
+ __entry->slot_index = seq->slotid;
+ __entry->slot_flags = slot->sl_flags;
),
- TP_printk("addr=%pISpc client %08x:%08x seqid=%u slot_seqid=%u (%sin use)",
+ TP_printk("addr=%pISpc client %08x:%08x idx=%u seqid=%u slot_seqid=%u flags=%s",
__get_sockaddr(addr), __entry->cl_boot, __entry->cl_id,
- __entry->seqid, __entry->slot_seqid,
- __entry->in_use ? "" : "not "
+ __entry->slot_index, __entry->seqid, __entry->slot_seqid,
+ show_nfs_slot_flags(__entry->slot_flags)
)
);
@@ -1039,6 +1050,7 @@ DEFINE_CLID_EVENT(confirmed_r);
{ 1 << NFSD_FILE_HASHED, "HASHED" }, \
{ 1 << NFSD_FILE_PENDING, "PENDING" }, \
{ 1 << NFSD_FILE_REFERENCED, "REFERENCED" }, \
+ { 1 << NFSD_FILE_RECENT, "RECENT" }, \
{ 1 << NFSD_FILE_GC, "GC" })
DECLARE_EVENT_CLASS(nfsd_file_class,
@@ -1317,6 +1329,7 @@ DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_lru_del_disposed);
DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_in_use);
DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_writeback);
DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_referenced);
+DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_aged);
DEFINE_NFSD_FILE_GC_EVENT(nfsd_file_gc_disposed);
DECLARE_EVENT_CLASS(nfsd_file_lruwalk_class,
@@ -1346,6 +1359,7 @@ DEFINE_EVENT(nfsd_file_lruwalk_class, name, \
TP_ARGS(removed, remaining))
DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_gc_removed);
+DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_gc_recent);
DEFINE_NFSD_FILE_LRUWALK_EVENT(nfsd_file_shrinker_removed);
TRACE_EVENT(nfsd_file_close,
@@ -1602,7 +1616,7 @@ DECLARE_EVENT_CLASS(nfsd_cb_lifetime_class,
__entry->cl_id = clp->cl_clientid.cl_id;
__entry->cb = cb;
__entry->opcode = cb->cb_ops ? cb->cb_ops->opcode : _CB_NULL;
- __entry->need_restart = cb->cb_need_restart;
+ __entry->need_restart = test_bit(NFSD4_CALLBACK_REQUEUE, &cb->cb_flags);
__assign_sockaddr(addr, &clp->cl_cb_conn.cb_addr,
clp->cl_cb_conn.cb_addrlen)
),
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 34d7aa531662..9abdc4b75813 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -71,7 +71,6 @@ nfserrno (int errno)
{ nfserr_acces, -EACCES },
{ nfserr_exist, -EEXIST },
{ nfserr_xdev, -EXDEV },
- { nfserr_mlink, -EMLINK },
{ nfserr_nodev, -ENODEV },
{ nfserr_notdir, -ENOTDIR },
{ nfserr_isdir, -EISDIR },
@@ -1687,9 +1686,17 @@ out:
return err;
}
-/*
- * Create a hardlink
- * N.B. After this call _both_ ffhp and tfhp need an fh_put
+/**
+ * nfsd_link - create a link
+ * @rqstp: RPC transaction context
+ * @ffhp: the file handle of the directory where the new link is to be created
+ * @name: the filename of the new link
+ * @len: the length of @name in octets
+ * @tfhp: the file handle of an existing file object
+ *
+ * After this call _both_ ffhp and tfhp need an fh_put.
+ *
+ * Returns a generic NFS status code in network byte-order.
*/
__be32
nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
@@ -1697,6 +1704,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
{
struct dentry *ddir, *dnew, *dold;
struct inode *dirp;
+ int type;
__be32 err;
int host_err;
@@ -1716,11 +1724,11 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
if (isdotent(name, len))
goto out;
+ err = nfs_ok;
+ type = d_inode(tfhp->fh_dentry)->i_mode & S_IFMT;
host_err = fh_want_write(tfhp);
- if (host_err) {
- err = nfserrno(host_err);
+ if (host_err)
goto out;
- }
ddir = ffhp->fh_dentry;
dirp = d_inode(ddir);
@@ -1728,7 +1736,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
dnew = lookup_one_len(name, ddir, len);
if (IS_ERR(dnew)) {
- err = nfserrno(PTR_ERR(dnew));
+ host_err = PTR_ERR(dnew);
goto out_unlock;
}
@@ -1744,17 +1752,26 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
fh_fill_post_attrs(ffhp);
inode_unlock(dirp);
if (!host_err) {
- err = nfserrno(commit_metadata(ffhp));
- if (!err)
- err = nfserrno(commit_metadata(tfhp));
- } else {
- err = nfserrno(host_err);
+ host_err = commit_metadata(ffhp);
+ if (!host_err)
+ host_err = commit_metadata(tfhp);
}
+
dput(dnew);
out_drop_write:
fh_drop_write(tfhp);
+ if (host_err == -EBUSY) {
+ /*
+ * See RFC 8881 Section 18.9.4 para 1-2: NFSv4 LINK
+ * wants a status unique to the object type.
+ */
+ if (type != S_IFDIR)
+ err = nfserr_file_open;
+ else
+ err = nfserr_acces;
+ }
out:
- return err;
+ return err != nfs_ok ? err : nfserrno(host_err);
out_dput:
dput(dnew);
@@ -1783,9 +1800,19 @@ nfsd_has_cached_files(struct dentry *dentry)
return ret;
}
-/*
- * Rename a file
- * N.B. After this call _both_ ffhp and tfhp need an fh_put
+/**
+ * nfsd_rename - rename a directory entry
+ * @rqstp: RPC transaction context
+ * @ffhp: the file handle of parent directory containing the entry to be renamed
+ * @fname: the filename of directory entry to be renamed
+ * @flen: the length of @fname in octets
+ * @tfhp: the file handle of parent directory to contain the renamed entry
+ * @tname: the filename of the new entry
+ * @tlen: the length of @tlen in octets
+ *
+ * After this call _both_ ffhp and tfhp need an fh_put.
+ *
+ * Returns a generic NFS status code in network byte-order.
*/
__be32
nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
@@ -1793,6 +1820,7 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
{
struct dentry *fdentry, *tdentry, *odentry, *ndentry, *trap;
struct inode *fdir, *tdir;
+ int type = S_IFDIR;
__be32 err;
int host_err;
bool close_cached = false;
@@ -1850,11 +1878,14 @@ retry:
host_err = -EINVAL;
if (odentry == trap)
goto out_dput_old;
+ type = d_inode(odentry)->i_mode & S_IFMT;
ndentry = lookup_one_len(tname, tdentry, tlen);
host_err = PTR_ERR(ndentry);
if (IS_ERR(ndentry))
goto out_dput_old;
+ if (d_inode(ndentry))
+ type = d_inode(ndentry)->i_mode & S_IFMT;
host_err = -ENOTEMPTY;
if (ndentry == trap)
goto out_dput_new;
@@ -1892,7 +1923,18 @@ retry:
out_dput_old:
dput(odentry);
out_nfserr:
- err = nfserrno(host_err);
+ if (host_err == -EBUSY) {
+ /*
+ * See RFC 8881 Section 18.26.4 para 1-3: NFSv4 RENAME
+ * wants a status unique to the object type.
+ */
+ if (type != S_IFDIR)
+ err = nfserr_file_open;
+ else
+ err = nfserr_acces;
+ } else {
+ err = nfserrno(host_err);
+ }
if (!close_cached) {
fh_fill_post_attrs(ffhp);
@@ -1919,9 +1961,17 @@ out:
return err;
}
-/*
- * Unlink a file or directory
- * N.B. After this call fhp needs an fh_put
+/**
+ * nfsd_unlink - remove a directory entry
+ * @rqstp: RPC transaction context
+ * @fhp: the file handle of the parent directory to be modified
+ * @type: enforced file type of the object to be removed
+ * @fname: the name of directory entry to be removed
+ * @flen: length of @fname in octets
+ *
+ * After this call fhp needs an fh_put.
+ *
+ * Returns a generic NFS status code in network byte-order.
*/
__be32
nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
@@ -1995,15 +2045,17 @@ out_drop_write:
fh_drop_write(fhp);
out_nfserr:
if (host_err == -EBUSY) {
- /* name is mounted-on. There is no perfect
- * error status.
+ /*
+ * See RFC 8881 Section 18.25.4 para 4: NFSv4 REMOVE
+ * wants a status unique to the object type.
*/
- err = nfserr_file_open;
- } else {
- err = nfserrno(host_err);
+ if (type != S_IFDIR)
+ err = nfserr_file_open;
+ else
+ err = nfserr_acces;
}
out:
- return err;
+ return err != nfs_ok ? err : nfserrno(host_err);
out_unlock:
inode_unlock(dirp);
goto out_drop_write;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 3a202e51b360..83970d97840b 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2424,7 +2424,7 @@ static void nilfs_segctor_accept(struct nilfs_sc_info *sci)
* the area protected by sc_state_lock.
*/
if (thread_is_alive)
- del_timer_sync(&sci->sc_timer);
+ timer_delete_sync(&sci->sc_timer);
}
/**
diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
index af94e3737470..e946f75eb540 100644
--- a/fs/ntfs3/attrib.c
+++ b/fs/ntfs3/attrib.c
@@ -2664,8 +2664,9 @@ int attr_set_compress(struct ntfs_inode *ni, bool compr)
attr->nres.run_off = cpu_to_le16(run_off);
}
- /* Update data attribute flags. */
+ /* Update attribute flags. */
if (compr) {
+ attr->flags &= ~ATTR_FLAG_SPARSED;
attr->flags |= ATTR_FLAG_COMPRESSED;
attr->nres.c_unit = NTFS_LZNT_CUNIT;
} else {
diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c
index 3f96a11804c9..9b6a3f8d2e7c 100644
--- a/fs/ntfs3/file.c
+++ b/fs/ntfs3/file.c
@@ -101,8 +101,26 @@ int ntfs_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry,
/* Allowed to change compression for empty files and for directories only. */
if (!is_dedup(ni) && !is_encrypted(ni) &&
(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
- /* Change compress state. */
- int err = ni_set_compress(inode, flags & FS_COMPR_FL);
+ int err = 0;
+ struct address_space *mapping = inode->i_mapping;
+
+ /* write out all data and wait. */
+ filemap_invalidate_lock(mapping);
+ err = filemap_write_and_wait(mapping);
+
+ if (err >= 0) {
+ /* Change compress state. */
+ bool compr = flags & FS_COMPR_FL;
+ err = ni_set_compress(inode, compr);
+
+ /* For files change a_ops too. */
+ if (!err)
+ mapping->a_ops = compr ? &ntfs_aops_cmpr :
+ &ntfs_aops;
+ }
+
+ filemap_invalidate_unlock(mapping);
+
if (err)
return err;
}
@@ -412,6 +430,7 @@ static int ntfs_extend(struct inode *inode, loff_t pos, size_t count,
}
if (extend_init && !is_compressed(ni)) {
+ WARN_ON(ni->i_valid >= pos);
err = ntfs_extend_initialized_size(file, ni, ni->i_valid, pos);
if (err)
goto out;
@@ -1228,21 +1247,22 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret;
int err;
- err = check_write_restriction(inode);
- if (err)
- return err;
-
- if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
- ntfs_inode_warn(inode, "direct i/o + compressed not supported");
- return -EOPNOTSUPP;
- }
-
if (!inode_trylock(inode)) {
if (iocb->ki_flags & IOCB_NOWAIT)
return -EAGAIN;
inode_lock(inode);
}
+ ret = check_write_restriction(inode);
+ if (ret)
+ goto out;
+
+ if (is_compressed(ni) && (iocb->ki_flags & IOCB_DIRECT)) {
+ ntfs_inode_warn(inode, "direct i/o + compressed not supported");
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
ret = generic_write_checks(iocb, from);
if (ret <= 0)
goto out;
diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
index 5df6a0b5add9..b7a83200f2cc 100644
--- a/fs/ntfs3/frecord.c
+++ b/fs/ntfs3/frecord.c
@@ -281,63 +281,6 @@ struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
}
/*
- * ni_load_attr - Load attribute that contains given VCN.
- */
-struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
- const __le16 *name, u8 name_len, CLST vcn,
- struct mft_inode **pmi)
-{
- struct ATTR_LIST_ENTRY *le;
- struct ATTRIB *attr;
- struct mft_inode *mi;
- struct ATTR_LIST_ENTRY *next;
-
- if (!ni->attr_list.size) {
- if (pmi)
- *pmi = &ni->mi;
- return mi_find_attr(ni, &ni->mi, NULL, type, name, name_len,
- NULL);
- }
-
- le = al_find_ex(ni, NULL, type, name, name_len, NULL);
- if (!le)
- return NULL;
-
- /*
- * Unfortunately ATTR_LIST_ENTRY contains only start VCN.
- * So to find the ATTRIB segment that contains 'vcn' we should
- * enumerate some entries.
- */
- if (vcn) {
- for (;; le = next) {
- next = al_find_ex(ni, le, type, name, name_len, NULL);
- if (!next || le64_to_cpu(next->vcn) > vcn)
- break;
- }
- }
-
- if (ni_load_mi(ni, le, &mi))
- return NULL;
-
- if (pmi)
- *pmi = mi;
-
- attr = mi_find_attr(ni, mi, NULL, type, name, name_len, &le->id);
- if (!attr)
- return NULL;
-
- if (!attr->non_res)
- return attr;
-
- if (le64_to_cpu(attr->nres.svcn) <= vcn &&
- vcn <= le64_to_cpu(attr->nres.evcn))
- return attr;
-
- _ntfs_bad_inode(&ni->vfs_inode);
- return NULL;
-}
-
-/*
* ni_load_all_mi - Load all subrecords.
*/
int ni_load_all_mi(struct ntfs_inode *ni)
@@ -3434,10 +3377,12 @@ int ni_set_compress(struct inode *inode, bool compr)
}
ni->std_fa = std->fa;
- if (compr)
+ if (compr) {
+ std->fa &= ~FILE_ATTRIBUTE_SPARSE_FILE;
std->fa |= FILE_ATTRIBUTE_COMPRESSED;
- else
+ } else {
std->fa &= ~FILE_ATTRIBUTE_COMPRESSED;
+ }
if (ni->std_fa != std->fa) {
ni->std_fa = std->fa;
diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
index 938d351ebac7..df81f1f7330c 100644
--- a/fs/ntfs3/fsntfs.c
+++ b/fs/ntfs3/fsntfs.c
@@ -1035,34 +1035,6 @@ struct buffer_head *ntfs_bread(struct super_block *sb, sector_t block)
return NULL;
}
-int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer)
-{
- struct block_device *bdev = sb->s_bdev;
- u32 blocksize = sb->s_blocksize;
- u64 block = lbo >> sb->s_blocksize_bits;
- u32 off = lbo & (blocksize - 1);
- u32 op = blocksize - off;
-
- for (; bytes; block += 1, off = 0, op = blocksize) {
- struct buffer_head *bh = __bread(bdev, block, blocksize);
-
- if (!bh)
- return -EIO;
-
- if (op > bytes)
- op = bytes;
-
- memcpy(buffer, bh->b_data + off, op);
-
- put_bh(bh);
-
- bytes -= op;
- buffer = Add2Ptr(buffer, op);
- }
-
- return 0;
-}
-
int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
const void *buf, int wait)
{
diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
index 7eb9fae22f8d..78d20e4baa2c 100644
--- a/fs/ntfs3/index.c
+++ b/fs/ntfs3/index.c
@@ -618,7 +618,7 @@ static bool index_hdr_check(const struct INDEX_HDR *hdr, u32 bytes)
u32 off = le32_to_cpu(hdr->de_off);
if (!IS_ALIGNED(off, 8) || tot > bytes || end > tot ||
- off + sizeof(struct NTFS_DE) > end) {
+ size_add(off, sizeof(struct NTFS_DE)) > end) {
/* incorrect index buffer. */
return false;
}
@@ -736,7 +736,7 @@ fill_table:
if (end > total)
return NULL;
- if (off + sizeof(struct NTFS_DE) > end)
+ if (size_add(off, sizeof(struct NTFS_DE)) > end)
return NULL;
e = Add2Ptr(hdr, off);
diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
index a1e11228dafd..3e2957a1e360 100644
--- a/fs/ntfs3/inode.c
+++ b/fs/ntfs3/inode.c
@@ -1025,46 +1025,6 @@ int ntfs_sync_inode(struct inode *inode)
}
/*
- * writeback_inode - Helper function for ntfs_flush_inodes().
- *
- * This writes both the inode and the file data blocks, waiting
- * for in flight data blocks before the start of the call. It
- * does not wait for any io started during the call.
- */
-static int writeback_inode(struct inode *inode)
-{
- int ret = sync_inode_metadata(inode, 0);
-
- if (!ret)
- ret = filemap_fdatawrite(inode->i_mapping);
- return ret;
-}
-
-/*
- * ntfs_flush_inodes
- *
- * Write data and metadata corresponding to i1 and i2. The io is
- * started but we do not wait for any of it to finish.
- *
- * filemap_flush() is used for the block device, so if there is a dirty
- * page for a block already in flight, we will not wait and start the
- * io over again.
- */
-int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
- struct inode *i2)
-{
- int ret = 0;
-
- if (i1)
- ret = writeback_inode(i1);
- if (!ret && i2)
- ret = writeback_inode(i2);
- if (!ret)
- ret = filemap_flush(sb->s_bdev_file->f_mapping);
- return ret;
-}
-
-/*
* Helper function to read file.
*/
int inode_read_data(struct inode *inode, void *data, size_t bytes)
diff --git a/fs/ntfs3/ntfs.h b/fs/ntfs3/ntfs.h
index 241f2ffdd920..1ff13b6f9613 100644
--- a/fs/ntfs3/ntfs.h
+++ b/fs/ntfs3/ntfs.h
@@ -717,7 +717,7 @@ static inline struct NTFS_DE *hdr_first_de(const struct INDEX_HDR *hdr)
struct NTFS_DE *e;
u16 esize;
- if (de_off >= used || de_off + sizeof(struct NTFS_DE) > used )
+ if (de_off >= used || size_add(de_off, sizeof(struct NTFS_DE)) > used)
return NULL;
e = Add2Ptr(hdr, de_off);
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index 382820464dee..d628977e2556 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -530,9 +530,6 @@ struct ATTRIB *ni_find_attr(struct ntfs_inode *ni, struct ATTRIB *attr,
struct ATTRIB *ni_enum_attr_ex(struct ntfs_inode *ni, struct ATTRIB *attr,
struct ATTR_LIST_ENTRY **le,
struct mft_inode **mi);
-struct ATTRIB *ni_load_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
- const __le16 *name, u8 name_len, CLST vcn,
- struct mft_inode **pmi);
int ni_load_all_mi(struct ntfs_inode *ni);
bool ni_add_subrecord(struct ntfs_inode *ni, CLST rno, struct mft_inode **mi);
int ni_remove_attr(struct ntfs_inode *ni, enum ATTR_TYPE type,
@@ -619,7 +616,6 @@ enum NTFS_DIRTY_FLAGS {
NTFS_DIRTY_ERROR = 2,
};
int ntfs_set_state(struct ntfs_sb_info *sbi, enum NTFS_DIRTY_FLAGS dirty);
-int ntfs_sb_read(struct super_block *sb, u64 lbo, size_t bytes, void *buffer);
int ntfs_sb_write(struct super_block *sb, u64 lbo, size_t bytes,
const void *buffer, int wait);
int ntfs_sb_write_run(struct ntfs_sb_info *sbi, const struct runs_tree *run,
@@ -717,8 +713,6 @@ int ntfs_write_end(struct file *file, struct address_space *mapping, loff_t pos,
u32 len, u32 copied, struct folio *folio, void *fsdata);
int ntfs3_write_inode(struct inode *inode, struct writeback_control *wbc);
int ntfs_sync_inode(struct inode *inode);
-int ntfs_flush_inodes(struct super_block *sb, struct inode *i1,
- struct inode *i2);
int inode_read_data(struct inode *inode, void *data, size_t bytes);
int ntfs_create_inode(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const struct cpu_str *uni,
diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
index 6a0f6b0a3ab2..920a1ab47b63 100644
--- a/fs/ntfs3/super.c
+++ b/fs/ntfs3/super.c
@@ -555,6 +555,55 @@ static const struct proc_ops ntfs3_label_fops = {
.proc_write = ntfs3_label_write,
};
+static void ntfs_create_procdir(struct super_block *sb)
+{
+ struct proc_dir_entry *e;
+
+ if (!proc_info_root)
+ return;
+
+ e = proc_mkdir(sb->s_id, proc_info_root);
+ if (e) {
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ proc_create_data("volinfo", 0444, e,
+ &ntfs3_volinfo_fops, sb);
+ proc_create_data("label", 0644, e,
+ &ntfs3_label_fops, sb);
+ sbi->procdir = e;
+ }
+}
+
+static void ntfs_remove_procdir(struct super_block *sb)
+{
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+
+ if (!sbi->procdir)
+ return;
+
+ remove_proc_entry("label", sbi->procdir);
+ remove_proc_entry("volinfo", sbi->procdir);
+ remove_proc_entry(sb->s_id, proc_info_root);
+ sbi->procdir = NULL;
+}
+
+static void ntfs_create_proc_root(void)
+{
+ proc_info_root = proc_mkdir("fs/ntfs3", NULL);
+}
+
+static void ntfs_remove_proc_root(void)
+{
+ if (proc_info_root) {
+ remove_proc_entry("fs/ntfs3", NULL);
+ proc_info_root = NULL;
+ }
+}
+#else
+static void ntfs_create_procdir(struct super_block *sb) {}
+static void ntfs_remove_procdir(struct super_block *sb) {}
+static void ntfs_create_proc_root(void) {}
+static void ntfs_remove_proc_root(void) {}
#endif
static struct kmem_cache *ntfs_inode_cachep;
@@ -644,15 +693,7 @@ static void ntfs_put_super(struct super_block *sb)
{
struct ntfs_sb_info *sbi = sb->s_fs_info;
-#ifdef CONFIG_PROC_FS
- // Remove /proc/fs/ntfs3/..
- if (sbi->procdir) {
- remove_proc_entry("label", sbi->procdir);
- remove_proc_entry("volinfo", sbi->procdir);
- remove_proc_entry(sb->s_id, proc_info_root);
- sbi->procdir = NULL;
- }
-#endif
+ ntfs_remove_procdir(sb);
/* Mark rw ntfs as clear, if possible. */
ntfs_set_state(sbi, NTFS_DIRTY_CLEAR);
@@ -1590,20 +1631,7 @@ load_root:
kfree(boot2);
}
-#ifdef CONFIG_PROC_FS
- /* Create /proc/fs/ntfs3/.. */
- if (proc_info_root) {
- struct proc_dir_entry *e = proc_mkdir(sb->s_id, proc_info_root);
- static_assert((S_IRUGO | S_IWUSR) == 0644);
- if (e) {
- proc_create_data("volinfo", S_IRUGO, e,
- &ntfs3_volinfo_fops, sb);
- proc_create_data("label", S_IRUGO | S_IWUSR, e,
- &ntfs3_label_fops, sb);
- sbi->procdir = e;
- }
- }
-#endif
+ ntfs_create_procdir(sb);
if (is_legacy_ntfs(sb))
sb->s_flags |= SB_RDONLY;
@@ -1853,14 +1881,11 @@ static int __init init_ntfs_fs(void)
if (IS_ENABLED(CONFIG_NTFS3_LZX_XPRESS))
pr_info("ntfs3: Read-only LZX/Xpress compression included\n");
-#ifdef CONFIG_PROC_FS
- /* Create "/proc/fs/ntfs3" */
- proc_info_root = proc_mkdir("fs/ntfs3", NULL);
-#endif
+ ntfs_create_proc_root();
err = ntfs3_init_bitmap();
if (err)
- return err;
+ goto out2;
ntfs_inode_cachep = kmem_cache_create(
"ntfs_inode_cache", sizeof(struct ntfs_inode), 0,
@@ -1880,6 +1905,8 @@ out:
kmem_cache_destroy(ntfs_inode_cachep);
out1:
ntfs3_exit_bitmap();
+out2:
+ ntfs_remove_proc_root();
return err;
}
@@ -1890,11 +1917,7 @@ static void __exit exit_ntfs_fs(void)
unregister_filesystem(&ntfs_fs_type);
unregister_as_ntfs_legacy();
ntfs3_exit_bitmap();
-
-#ifdef CONFIG_PROC_FS
- if (proc_info_root)
- remove_proc_entry("fs/ntfs3", NULL);
-#endif
+ ntfs_remove_proc_root();
}
MODULE_LICENSE("GPL");
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 4414743b638e..b8ac85b548c7 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -1803,6 +1803,14 @@ static int __ocfs2_find_path(struct ocfs2_caching_info *ci,
el = root_el;
while (el->l_tree_depth) {
+ if (unlikely(le16_to_cpu(el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH)) {
+ ocfs2_error(ocfs2_metadata_cache_get_super(ci),
+ "Owner %llu has invalid tree depth %u in extent list\n",
+ (unsigned long long)ocfs2_metadata_cache_owner(ci),
+ le16_to_cpu(el->l_tree_depth));
+ ret = -EROFS;
+ goto out;
+ }
if (le16_to_cpu(el->l_next_free_rec) == 0) {
ocfs2_error(ocfs2_metadata_cache_get_super(ci),
"Owner %llu has empty extent list at depth %u\n",
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 5bbeb6fbb1ac..40b6bce12951 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -46,7 +46,6 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh = NULL;
struct buffer_head *buffer_cache_bh = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- void *kaddr;
trace_ocfs2_symlink_get_block(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@@ -91,17 +90,11 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
* could've happened. Since we've got a reference on
* the bh, even if it commits while we're doing the
* copy, the data is still good. */
- if (buffer_jbd(buffer_cache_bh)
- && ocfs2_inode_is_new(inode)) {
- kaddr = kmap_atomic(bh_result->b_page);
- if (!kaddr) {
- mlog(ML_ERROR, "couldn't kmap!\n");
- goto bail;
- }
- memcpy(kaddr + (bh_result->b_size * iblock),
- buffer_cache_bh->b_data,
- bh_result->b_size);
- kunmap_atomic(kaddr);
+ if (buffer_jbd(buffer_cache_bh) && ocfs2_inode_is_new(inode)) {
+ memcpy_to_folio(bh_result->b_folio,
+ bh_result->b_size * iblock,
+ buffer_cache_bh->b_data,
+ bh_result->b_size);
set_buffer_uptodate(bh_result);
}
brelse(buffer_cache_bh);
@@ -920,7 +913,7 @@ static void ocfs2_write_failure(struct inode *inode,
ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
user_pos, user_len);
- block_commit_write(&folio->page, from, to);
+ block_commit_write(folio, from, to);
}
}
}
@@ -2012,7 +2005,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos,
ocfs2_jbd2_inode_add_write(handle, inode,
start_byte, length);
}
- block_commit_write(&folio->page, from, to);
+ block_commit_write(folio, from, to);
}
}
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 0f46b22561d6..fce9beb214f0 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -724,7 +724,7 @@ static void o2net_shutdown_sc(struct work_struct *work)
if (o2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
/* we shouldn't flush as we're in the thread, the
* races with pending sc work structs are harmless */
- del_timer_sync(&sc->sc_idle_timeout);
+ timer_delete_sync(&sc->sc_idle_timeout);
o2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
sc_put(sc);
kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index e54f2c4b5a90..2056cf08ac1e 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -813,7 +813,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
/* must not update i_size! */
- block_commit_write(&folio->page, block_start + 1, block_start + 1);
+ block_commit_write(folio, block_start + 1, block_start + 1);
}
/*
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 15d9acd456ec..e85b1ccf81be 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -273,7 +273,7 @@ ssize_t ocfs2_quota_write(struct super_block *sb, int type,
if (new)
memset(bh->b_data, 0, sb->s_blocksize);
memcpy(bh->b_data + offset, data, len);
- flush_dcache_page(bh->b_page);
+ flush_dcache_folio(bh->b_folio);
set_buffer_uptodate(bh);
unlock_buffer(bh);
ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 5538c4aee8fa..b0d4e1908b22 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -416,7 +416,7 @@ static const struct file_operations proc_pid_cmdline_ops = {
#ifdef CONFIG_KALLSYMS
/*
* Provides a wchan file via kallsyms in a proper one-value-per-file format.
- * Returns the resolved symbol. If that fails, simply return the address.
+ * Returns the resolved symbol to user space.
*/
static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 77a517f91821..96122e91c645 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -157,6 +157,7 @@ unsigned name_to_int(const struct qstr *qstr);
/* Worst case buffer size needed for holding an integer. */
#define PROC_NUMBUF 13
+#ifdef CONFIG_PAGE_MAPCOUNT
/**
* folio_precise_page_mapcount() - Number of mappings of this folio page.
* @folio: The folio.
@@ -187,7 +188,49 @@ static inline int folio_precise_page_mapcount(struct folio *folio,
return mapcount;
}
+#else /* !CONFIG_PAGE_MAPCOUNT */
+static inline int folio_precise_page_mapcount(struct folio *folio,
+ struct page *page)
+{
+ BUILD_BUG();
+}
+#endif /* CONFIG_PAGE_MAPCOUNT */
+/**
+ * folio_average_page_mapcount() - Average number of mappings per page in this
+ * folio
+ * @folio: The folio.
+ *
+ * The average number of user page table entries that reference each page in
+ * this folio as tracked via the RMAP: either referenced directly (PTE) or
+ * as part of a larger area that covers this page (e.g., PMD).
+ *
+ * The average is calculated by rounding to the nearest integer; however,
+ * to avoid duplicated code in current callers, the average is at least
+ * 1 if any page of the folio is mapped.
+ *
+ * Returns: The average number of mappings per page in this folio.
+ */
+static inline int folio_average_page_mapcount(struct folio *folio)
+{
+ int mapcount, entire_mapcount, avg;
+
+ if (!folio_test_large(folio))
+ return atomic_read(&folio->_mapcount) + 1;
+
+ mapcount = folio_large_mapcount(folio);
+ if (unlikely(mapcount <= 0))
+ return 0;
+ entire_mapcount = folio_entire_mapcount(folio);
+ if (mapcount <= entire_mapcount)
+ return entire_mapcount;
+ mapcount -= entire_mapcount;
+
+ /* Round to closest integer ... */
+ avg = ((unsigned int)mapcount + folio_large_nr_pages(folio) / 2) >> folio_large_order(folio);
+ /* ... but return at least 1. */
+ return max_t(int, avg + entire_mapcount, 1);
+}
/*
* array.c
*/
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 8ba9b1472390..83be312159c9 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -162,6 +162,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "Unaccepted: ",
global_zone_page_state(NR_UNACCEPTED));
#endif
+ show_val_kb(m, "Balloon: ",
+ global_node_page_state(NR_BALLOON_PAGES));
hugetlb_report_meminfo(m);
diff --git a/fs/proc/page.c b/fs/proc/page.c
index a55f5acefa97..23fc771100ae 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -67,9 +67,14 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
* memmaps that were actually initialized.
*/
page = pfn_to_online_page(pfn);
- if (page)
- mapcount = folio_precise_page_mapcount(page_folio(page),
- page);
+ if (page) {
+ struct folio *folio = page_folio(page);
+
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ mapcount = folio_precise_page_mapcount(folio, page);
+ else
+ mapcount = folio_average_page_mapcount(folio);
+ }
if (put_user(mapcount, out)) {
ret = -EFAULT;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index f02cd362309a..994cde10e3f4 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -707,6 +707,8 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
struct folio *folio = page_folio(page);
int i, nr = compound ? compound_nr(page) : 1;
unsigned long size = nr * PAGE_SIZE;
+ bool exclusive;
+ int mapcount;
/*
* First accumulate quantities that depend only on |size| and the type
@@ -747,18 +749,29 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
dirty, locked, present);
return;
}
+
+ if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT)) {
+ mapcount = folio_average_page_mapcount(folio);
+ exclusive = !folio_maybe_mapped_shared(folio);
+ }
+
/*
* We obtain a snapshot of the mapcount. Without holding the folio lock
* this snapshot can be slightly wrong as we cannot always read the
* mapcount atomically.
*/
for (i = 0; i < nr; i++, page++) {
- int mapcount = folio_precise_page_mapcount(folio, page);
unsigned long pss = PAGE_SIZE << PSS_SHIFT;
+
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) {
+ mapcount = folio_precise_page_mapcount(folio, page);
+ exclusive = mapcount < 2;
+ }
+
if (mapcount >= 2)
pss /= mapcount;
smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
- dirty, locked, mapcount < 2);
+ dirty, locked, exclusive);
}
}
@@ -1023,7 +1036,7 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
if (folio) {
/* We treat non-present entries as "maybe shared". */
- if (!present || folio_likely_mapped_shared(folio) ||
+ if (!present || folio_maybe_mapped_shared(folio) ||
hugetlb_pmd_shared(pte))
mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
else
@@ -1632,6 +1645,7 @@ struct pagemapread {
#define PM_SOFT_DIRTY BIT_ULL(55)
#define PM_MMAP_EXCLUSIVE BIT_ULL(56)
#define PM_UFFD_WP BIT_ULL(57)
+#define PM_GUARD_REGION BIT_ULL(58)
#define PM_FILE BIT_ULL(61)
#define PM_SWAP BIT_ULL(62)
#define PM_PRESENT BIT_ULL(63)
@@ -1651,6 +1665,13 @@ static int add_to_pagemap(pagemap_entry_t *pme, struct pagemapread *pm)
return 0;
}
+static bool __folio_page_mapped_exclusively(struct folio *folio, struct page *page)
+{
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ return folio_precise_page_mapcount(folio, page) == 1;
+ return !folio_maybe_mapped_shared(folio);
+}
+
static int pagemap_pte_hole(unsigned long start, unsigned long end,
__always_unused int depth, struct mm_walk *walk)
{
@@ -1732,6 +1753,8 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
page = pfn_swap_entry_to_page(entry);
if (pte_marker_entry_uffd_wp(entry))
flags |= PM_UFFD_WP;
+ if (is_guard_swp_entry(entry))
+ flags |= PM_GUARD_REGION;
}
if (page) {
@@ -1739,7 +1762,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
if (!folio_test_anon(folio))
flags |= PM_FILE;
if ((flags & PM_PRESENT) &&
- folio_precise_page_mapcount(folio, page) == 1)
+ __folio_page_mapped_exclusively(folio, page))
flags |= PM_MMAP_EXCLUSIVE;
}
if (vma->vm_flags & VM_SOFTDIRTY)
@@ -1814,7 +1837,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
pagemap_entry_t pme;
if (folio && (flags & PM_PRESENT) &&
- folio_precise_page_mapcount(folio, page + idx) == 1)
+ __folio_page_mapped_exclusively(folio, page))
cur_flags |= PM_MMAP_EXCLUSIVE;
pme = make_pme(frame, cur_flags);
@@ -1879,7 +1902,7 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
if (!folio_test_anon(folio))
flags |= PM_FILE;
- if (!folio_likely_mapped_shared(folio) &&
+ if (!folio_maybe_mapped_shared(folio) &&
!hugetlb_pmd_shared(ptep))
flags |= PM_MMAP_EXCLUSIVE;
@@ -1931,7 +1954,8 @@ static const struct mm_walk_ops pagemap_ops = {
* Bit 55 pte is soft-dirty (see Documentation/admin-guide/mm/soft-dirty.rst)
* Bit 56 page exclusively mapped
* Bit 57 pte is uffd-wp write-protected
- * Bits 58-60 zero
+ * Bit 58 pte is a guard region
+ * Bits 59-60 zero
* Bit 61 page is file-page or shared-anon
* Bit 62 page swapped
* Bit 63 page present
@@ -2455,22 +2479,19 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
spinlock_t *ptl;
int ret;
- arch_enter_lazy_mmu_mode();
-
ret = pagemap_scan_thp_entry(pmd, start, end, walk);
- if (ret != -ENOENT) {
- arch_leave_lazy_mmu_mode();
+ if (ret != -ENOENT)
return ret;
- }
ret = 0;
start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
if (!pte) {
- arch_leave_lazy_mmu_mode();
walk->action = ACTION_AGAIN;
return 0;
}
+ arch_enter_lazy_mmu_mode();
+
if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
/* Fast path for performing exclusive WP */
for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
@@ -2539,8 +2560,8 @@ flush_and_return:
if (flush_end)
flush_tlb_range(vma, start, addr);
- pte_unmap_unlock(start_pte, ptl);
arch_leave_lazy_mmu_mode();
+ pte_unmap_unlock(start_pte, ptl);
cond_resched();
return ret;
@@ -2855,7 +2876,12 @@ static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
unsigned long nr_pages)
{
struct folio *folio = page_folio(page);
- int count = folio_precise_page_mapcount(folio, page);
+ int count;
+
+ if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
+ count = folio_precise_page_mapcount(folio, page);
+ else
+ count = folio_average_page_mapcount(folio);
md->pages += nr_pages;
if (pte_dirty || folio_test_dirty(folio))
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 557cf9d40177..f8b9c9c73997 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -563,7 +563,7 @@ void pstore_unregister(struct pstore_info *psi)
pstore_unregister_kmsg();
/* Stop timer and make sure all work has finished. */
- del_timer_sync(&pstore_timer);
+ timer_delete_sync(&pstore_timer);
flush_work(&pstore_work);
/* Remove all backend records from filesystem tree. */
diff --git a/fs/smb/client/cifs_fs_sb.h b/fs/smb/client/cifs_fs_sb.h
index 651759192280..5e8d163cb5f8 100644
--- a/fs/smb/client/cifs_fs_sb.h
+++ b/fs/smb/client/cifs_fs_sb.h
@@ -49,6 +49,7 @@
struct cifs_sb_info {
struct rb_root tlink_tree;
+ struct list_head tcon_sb_link;
spinlock_t tlink_tree_lock;
struct tcon_link *master_tlink;
struct nls_table *local_nls;
diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
index 64bd68f750f8..63b3b1290bed 100644
--- a/fs/smb/client/cifsacl.c
+++ b/fs/smb/client/cifsacl.c
@@ -811,7 +811,23 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
return;
for (i = 0; i < num_aces; ++i) {
+ if (end_of_acl - acl_base < acl_size)
+ break;
+
ppace[i] = (struct smb_ace *) (acl_base + acl_size);
+ acl_base = (char *)ppace[i];
+ acl_size = offsetof(struct smb_ace, sid) +
+ offsetof(struct smb_sid, sub_auth);
+
+ if (end_of_acl - acl_base < acl_size ||
+ ppace[i]->sid.num_subauth == 0 ||
+ ppace[i]->sid.num_subauth > SID_MAX_SUB_AUTHORITIES ||
+ (end_of_acl - acl_base <
+ acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth) ||
+ (le16_to_cpu(ppace[i]->size) <
+ acl_size + sizeof(__le32) * ppace[i]->sid.num_subauth))
+ break;
+
#ifdef CONFIG_CIFS_DEBUG2
dump_ace(ppace[i], end_of_acl);
#endif
@@ -855,7 +871,6 @@ static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
(void *)ppace[i],
sizeof(struct smb_ace)); */
- acl_base = (char *)ppace[i];
acl_size = le16_to_cpu(ppace[i]->size);
}
@@ -1550,7 +1565,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
int rc = 0;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
struct smb_version_operations *ops;
- const u32 info = 0;
+ const u32 info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO;
cifs_dbg(NOISY, "converting ACL to mode for %s\n", path);
@@ -1604,7 +1619,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
struct tcon_link *tlink;
struct smb_version_operations *ops;
bool mode_from_sid, id_from_sid;
- const u32 info = 0;
+ const u32 info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO;
bool posix;
tlink = cifs_sb_tlink(cifs_sb);
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index e69968e88fe7..35892df7335c 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -704,18 +704,12 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server)
cifs_free_hash(&server->secmech.md5);
cifs_free_hash(&server->secmech.sha512);
- if (!SERVER_IS_CHAN(server)) {
- if (server->secmech.enc) {
- crypto_free_aead(server->secmech.enc);
- server->secmech.enc = NULL;
- }
-
- if (server->secmech.dec) {
- crypto_free_aead(server->secmech.dec);
- server->secmech.dec = NULL;
- }
- } else {
+ if (server->secmech.enc) {
+ crypto_free_aead(server->secmech.enc);
server->secmech.enc = NULL;
+ }
+ if (server->secmech.dec) {
+ crypto_free_aead(server->secmech.dec);
server->secmech.dec = NULL;
}
}
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index 6a3bd652d251..a08c42363ffc 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -637,6 +637,10 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
cifs_sb->ctx->dir_mode);
if (cifs_sb->ctx->iocharset)
seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
+ if (tcon->ses->unicode == 0)
+ seq_puts(s, ",nounicode");
+ else if (tcon->ses->unicode == 1)
+ seq_puts(s, ",unicode");
if (tcon->seal)
seq_puts(s, ",seal");
else if (tcon->ses->server->ignore_signature)
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index 8dea0cf3a8de..ca435a3841b8 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -135,7 +135,6 @@ extern ssize_t cifs_file_copychunk_range(unsigned int xid,
extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern void cifs_setsize(struct inode *inode, loff_t offset);
-extern int cifs_truncate_page(struct address_space *mapping, loff_t from);
struct smb3_fs_context;
extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type,
@@ -146,6 +145,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 53
-#define CIFS_VERSION "2.53"
+#define SMB3_PRODUCT_BUILD 54
+#define CIFS_VERSION "2.54"
#endif /* _CIFSFS_H */
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index cddeb2adbf4a..3b32116b0b49 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -625,10 +625,8 @@ struct smb_version_operations {
bool (*is_status_io_timeout)(char *buf);
/* Check for STATUS_NETWORK_NAME_DELETED */
bool (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv);
- int (*parse_reparse_point)(struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct kvec *rsp_iov,
- struct cifs_open_info_data *data);
+ struct reparse_data_buffer * (*get_reparse_point_buffer)(const struct kvec *rsp_iov,
+ u32 *plen);
int (*create_reparse_symlink)(const unsigned int xid,
struct inode *inode,
struct dentry *dentry,
@@ -653,6 +651,7 @@ struct smb_version_values {
unsigned int cap_unix;
unsigned int cap_nt_find;
unsigned int cap_large_files;
+ unsigned int cap_unicode;
__u16 signing_enabled;
__u16 signing_required;
size_t create_lease_size;
@@ -713,6 +712,8 @@ struct TCP_Server_Info {
spinlock_t srv_lock; /* protect anything here that is not protected */
__u64 conn_id; /* connection identifier (useful for debugging) */
int srv_count; /* reference counter */
+ int rfc1001_sessinit; /* whether to estasblish netbios session */
+ bool with_rfc1001; /* if netbios session is used */
/* 15 character server name + 0x20 16th byte indicating type = srv */
char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
struct smb_version_operations *ops;
@@ -1120,6 +1121,7 @@ struct cifs_ses {
bool sign; /* is signing required? */
bool domainAuto:1;
bool expired_pwd; /* track if access denied or expired pwd so can know if need to update */
+ int unicode;
unsigned int flags;
__u16 session_flags;
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
@@ -1319,7 +1321,8 @@ struct cifs_tcon {
#endif
struct list_head pending_opens; /* list of incomplete opens */
struct cached_fids *cfids;
- /* BB add field for back pointer to sb struct(s)? */
+ struct list_head cifs_sb_list;
+ spinlock_t sb_list_lock;
#ifdef CONFIG_CIFS_DFS_UPCALL
struct delayed_work dfs_cache_work;
struct list_head dfs_ses_list;
@@ -1716,6 +1719,7 @@ struct mid_q_entry {
void *resp_buf; /* pointer to received SMB header */
unsigned int resp_buf_size;
int mid_state; /* wish this were enum but can not pass to wait_event */
+ int mid_rc; /* rc for MID_RC */
unsigned int mid_flags;
__le16 command; /* smb command code */
unsigned int optype; /* operation type */
@@ -1878,6 +1882,7 @@ static inline bool is_replayable_error(int error)
#define MID_RESPONSE_MALFORMED 0x10
#define MID_SHUTDOWN 0x20
#define MID_RESPONSE_READY 0x40 /* ready for other process handle the rsp */
+#define MID_RC 0x80 /* mid_rc contains custom rc */
/* Flags */
#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index 48d0d6f439cf..18d67ab113f0 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -2256,6 +2256,8 @@ typedef struct {
#define FILE_SUPPORTS_ENCRYPTION 0x00020000
#define FILE_SUPPORTS_OBJECT_IDS 0x00010000
#define FILE_VOLUME_IS_COMPRESSED 0x00008000
+#define FILE_SUPPORTS_POSIX_UNLINK_RENAME 0x00000400
+#define FILE_RETURNS_CLEANUP_RESULT_INFO 0x00000200
#define FILE_SUPPORTS_REMOTE_STORAGE 0x00000100
#define FILE_SUPPORTS_REPARSE_POINTS 0x00000080
#define FILE_SUPPORTS_SPARSE_FILES 0x00000040
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 81680001944d..cfcc07905bdf 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -31,6 +31,9 @@ extern void cifs_small_buf_release(void *);
extern void free_rsp_buf(int, void *);
extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
unsigned int /* length */);
+extern int smb_send_kvec(struct TCP_Server_Info *server,
+ struct msghdr *msg,
+ size_t *sent);
extern unsigned int _get_xid(void);
extern void _free_xid(unsigned int);
#define get_xid() \
@@ -592,7 +595,6 @@ int cifs_async_readv(struct cifs_io_subrequest *rdata);
int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid);
void cifs_async_writev(struct cifs_io_subrequest *wdata);
-void cifs_writev_complete(struct work_struct *work);
int cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const unsigned char *path, char *pbuf,
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 4fc9485c5d91..60cb264a01e5 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -437,7 +437,10 @@ CIFSSMBNegotiate(const unsigned int xid,
return rc;
pSMB->hdr.Mid = get_next_mid(server);
- pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
+ pSMB->hdr.Flags2 |= SMBFLG2_ERR_STATUS;
+
+ if (ses->unicode != 0)
+ pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
if (should_set_ext_sec_flag(ses->sectype)) {
cifs_dbg(FYI, "Requesting extended security\n");
@@ -1038,15 +1041,31 @@ static __u16 convert_disposition(int disposition)
static int
access_flags_to_smbopen_mode(const int access_flags)
{
- int masked_flags = access_flags & (GENERIC_READ | GENERIC_WRITE);
-
- if (masked_flags == GENERIC_READ)
- return SMBOPEN_READ;
- else if (masked_flags == GENERIC_WRITE)
+ /*
+ * SYSTEM_SECURITY grants both read and write access to SACL, treat is as read/write.
+ * MAXIMUM_ALLOWED grants as many access as possible, so treat it as read/write too.
+ * SYNCHRONIZE as is does not grant any specific access, so do not check its mask.
+ * If only SYNCHRONIZE bit is specified then fallback to read access.
+ */
+ bool with_write_flags = access_flags & (FILE_WRITE_DATA | FILE_APPEND_DATA | FILE_WRITE_EA |
+ FILE_DELETE_CHILD | FILE_WRITE_ATTRIBUTES | DELETE |
+ WRITE_DAC | WRITE_OWNER | SYSTEM_SECURITY |
+ MAXIMUM_ALLOWED | GENERIC_WRITE | GENERIC_ALL);
+ bool with_read_flags = access_flags & (FILE_READ_DATA | FILE_READ_EA | FILE_EXECUTE |
+ FILE_READ_ATTRIBUTES | READ_CONTROL |
+ SYSTEM_SECURITY | MAXIMUM_ALLOWED | GENERIC_ALL |
+ GENERIC_EXECUTE | GENERIC_READ);
+ bool with_execute_flags = access_flags & (FILE_EXECUTE | MAXIMUM_ALLOWED | GENERIC_ALL |
+ GENERIC_EXECUTE);
+
+ if (with_write_flags && with_read_flags)
+ return SMBOPEN_READWRITE;
+ else if (with_write_flags)
return SMBOPEN_WRITE;
-
- /* just go for read/write */
- return SMBOPEN_READWRITE;
+ else if (with_execute_flags)
+ return SMBOPEN_EXECUTE;
+ else
+ return SMBOPEN_READ;
}
int
@@ -2709,6 +2728,9 @@ int cifs_query_reparse_point(const unsigned int xid,
if (cap_unix(tcon->ses))
return -EOPNOTSUPP;
+ if (!(le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS))
+ return -EOPNOTSUPP;
+
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.cifs_sb = cifs_sb,
@@ -3400,8 +3422,7 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
/* BB TEST with big acls that might need to be e.g. larger than 16K */
pSMB->MaxSetupCount = 0;
pSMB->Fid = fid; /* file handle always le */
- pSMB->AclFlags = cpu_to_le32(CIFS_ACL_OWNER | CIFS_ACL_GROUP |
- CIFS_ACL_DACL | info);
+ pSMB->AclFlags = cpu_to_le32(info);
pSMB->ByteCount = cpu_to_le16(11); /* 3 bytes pad + 8 bytes parm */
inc_rfc1001_len(pSMB, 11);
iov[0].iov_base = (char *)pSMB;
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index 73f93a35eedd..4a0b2d220fe8 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -300,6 +300,7 @@ cifs_abort_connection(struct TCP_Server_Info *server)
server->ssocket->flags);
sock_release(server->ssocket);
server->ssocket = NULL;
+ put_net(cifs_net_ns(server));
}
server->sequence_number = 0;
server->session_estab = false;
@@ -370,7 +371,7 @@ static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num
*
*/
static int __cifs_reconnect(struct TCP_Server_Info *server,
- bool mark_smb_session)
+ bool mark_smb_session, bool once)
{
int rc = 0;
@@ -398,6 +399,9 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
if (rc) {
cifs_server_unlock(server);
cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
+ /* If was asked to reconnect only once, do not try it more times */
+ if (once)
+ break;
msleep(3000);
} else {
atomic_inc(&tcpSesReconnectCount);
@@ -563,19 +567,33 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
return rc;
}
-int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
+static int
+_cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session, bool once)
{
if (!server->leaf_fullpath)
- return __cifs_reconnect(server, mark_smb_session);
+ return __cifs_reconnect(server, mark_smb_session, once);
return reconnect_dfs_server(server);
}
#else
-int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
+static int
+_cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session, bool once)
{
- return __cifs_reconnect(server, mark_smb_session);
+ return __cifs_reconnect(server, mark_smb_session, once);
}
#endif
+int
+cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
+{
+ return _cifs_reconnect(server, mark_smb_session, false);
+}
+
+static int
+cifs_reconnect_once(struct TCP_Server_Info *server)
+{
+ return _cifs_reconnect(server, true, true);
+}
+
static void
cifs_echo_request(struct work_struct *work)
{
@@ -802,26 +820,110 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
/* Regular SMB response */
return true;
case RFC1002_SESSION_KEEP_ALIVE:
+ /*
+ * RFC 1002 session keep alive can sent by the server only when
+ * we established a RFC 1002 session. But Samba servers send
+ * RFC 1002 session keep alive also over port 445 on which
+ * RFC 1002 session is not established.
+ */
cifs_dbg(FYI, "RFC 1002 session keep alive\n");
break;
case RFC1002_POSITIVE_SESSION_RESPONSE:
- cifs_dbg(FYI, "RFC 1002 positive session response\n");
+ /*
+ * RFC 1002 positive session response cannot be returned
+ * for SMB request. RFC 1002 session response is handled
+ * exclusively in ip_rfc1001_connect() function.
+ */
+ cifs_server_dbg(VFS, "RFC 1002 positive session response (unexpected)\n");
+ cifs_reconnect(server, true);
break;
case RFC1002_NEGATIVE_SESSION_RESPONSE:
/*
* We get this from Windows 98 instead of an error on
- * SMB negprot response.
- */
- cifs_dbg(FYI, "RFC 1002 negative session response\n");
- /* give server a second to clean up */
- msleep(1000);
- /*
- * Always try 445 first on reconnect since we get NACK
- * on some if we ever connected to port 139 (the NACK
- * is since we do not begin with RFC1001 session
- * initialize frame).
+ * SMB negprot response, when we have not established
+ * RFC 1002 session (which means ip_rfc1001_connect()
+ * was skipped). Note that same still happens with
+ * Windows Server 2022 when connecting via port 139.
+ * So for this case when mount option -o nonbsessinit
+ * was not specified, try to reconnect with establishing
+ * RFC 1002 session. If new socket establishment with
+ * RFC 1002 session was successful then return to the
+ * mid's caller -EAGAIN, so it can retry the request.
*/
- cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
+ if (!cifs_rdma_enabled(server) &&
+ server->tcpStatus == CifsInNegotiate &&
+ !server->with_rfc1001 &&
+ server->rfc1001_sessinit != 0) {
+ int rc, mid_rc;
+ struct mid_q_entry *mid, *nmid;
+ LIST_HEAD(dispose_list);
+
+ cifs_dbg(FYI, "RFC 1002 negative session response during SMB Negotiate, retrying with NetBIOS session\n");
+
+ /*
+ * Before reconnect, delete all pending mids for this
+ * server, so reconnect would not signal connection
+ * aborted error to mid's callbacks. Note that for this
+ * server there should be exactly one pending mid
+ * corresponding to SMB1/SMB2 Negotiate packet.
+ */
+ spin_lock(&server->mid_lock);
+ list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
+ kref_get(&mid->refcount);
+ list_move(&mid->qhead, &dispose_list);
+ mid->mid_flags |= MID_DELETED;
+ }
+ spin_unlock(&server->mid_lock);
+
+ /* Now try to reconnect once with NetBIOS session. */
+ server->with_rfc1001 = true;
+ rc = cifs_reconnect_once(server);
+
+ /*
+ * If reconnect was successful then indicate -EAGAIN
+ * to mid's caller. If reconnect failed with -EAGAIN
+ * then mask it as -EHOSTDOWN, so mid's caller would
+ * know that it failed.
+ */
+ if (rc == 0)
+ mid_rc = -EAGAIN;
+ else if (rc == -EAGAIN)
+ mid_rc = -EHOSTDOWN;
+ else
+ mid_rc = rc;
+
+ /*
+ * After reconnect (either successful or unsuccessful)
+ * deliver reconnect status to mid's caller via mid's
+ * callback. Use MID_RC state which indicates that the
+ * return code should be read from mid_rc member.
+ */
+ list_for_each_entry_safe(mid, nmid, &dispose_list, qhead) {
+ list_del_init(&mid->qhead);
+ mid->mid_rc = mid_rc;
+ mid->mid_state = MID_RC;
+ mid->callback(mid);
+ release_mid(mid);
+ }
+
+ /*
+ * If reconnect failed then wait two seconds. In most
+ * cases we were been called from the mount context and
+ * delivered failure to mid's callback will stop this
+ * receiver task thread and fails the mount process.
+ * So wait two seconds to prevent another reconnect
+ * in this task thread, which would be useless as the
+ * mount context will fail at all.
+ */
+ if (rc != 0)
+ msleep(2000);
+ } else {
+ cifs_server_dbg(VFS, "RFC 1002 negative session response (unexpected)\n");
+ cifs_reconnect(server, true);
+ }
+ break;
+ case RFC1002_RETARGET_SESSION_RESPONSE:
+ cifs_server_dbg(VFS, "RFC 1002 retarget session response (unexpected)\n");
cifs_reconnect(server, true);
break;
default:
@@ -1676,6 +1778,7 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
/* Grab netns reference for this server. */
cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
+ tcp_ses->sign = ctx->sign;
tcp_ses->conn_id = atomic_inc_return(&tcpSesNextId);
tcp_ses->noblockcnt = ctx->rootfs;
tcp_ses->noblocksnd = ctx->noblocksnd || ctx->rootfs;
@@ -1699,6 +1802,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
ctx->source_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
memcpy(tcp_ses->server_RFC1001_name,
ctx->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
+ tcp_ses->rfc1001_sessinit = ctx->rfc1001_sessinit;
+ tcp_ses->with_rfc1001 = false;
tcp_ses->session_estab = false;
tcp_ses->sequence_number = 0;
tcp_ses->channel_sequence_num = 0; /* only tracked for primary channel */
@@ -1729,12 +1834,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
*/
tcp_ses->tcpStatus = CifsNew;
++tcp_ses->srv_count;
+ tcp_ses->echo_interval = ctx->echo_interval * HZ;
- if (ctx->echo_interval >= SMB_ECHO_INTERVAL_MIN &&
- ctx->echo_interval <= SMB_ECHO_INTERVAL_MAX)
- tcp_ses->echo_interval = ctx->echo_interval * HZ;
- else
- tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ;
if (tcp_ses->rdma) {
#ifndef CONFIG_CIFS_SMB_DIRECT
cifs_dbg(VFS, "CONFIG_CIFS_SMB_DIRECT is not enabled\n");
@@ -2349,6 +2450,7 @@ retry_old_session:
ses->cred_uid = ctx->cred_uid;
ses->linux_uid = ctx->linux_uid;
+ ses->unicode = ctx->unicode;
ses->sectype = ctx->sectype;
ses->sign = ctx->sign;
@@ -2454,6 +2556,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
return 0;
if (tcon->nodelete != ctx->nodelete)
return 0;
+ if (tcon->posix_extensions != ctx->linux_ext)
+ return 0;
return 1;
}
@@ -3026,6 +3130,44 @@ bind_socket(struct TCP_Server_Info *server)
}
static int
+smb_recv_kvec(struct TCP_Server_Info *server, struct msghdr *msg, size_t *recv)
+{
+ int rc = 0;
+ int retries = 0;
+ int msg_flags = server->noblocksnd ? MSG_DONTWAIT : 0;
+
+ *recv = 0;
+
+ while (msg_data_left(msg)) {
+ rc = sock_recvmsg(server->ssocket, msg, msg_flags);
+ if (rc == -EAGAIN) {
+ retries++;
+ if (retries >= 14 ||
+ (!server->noblocksnd && (retries > 2))) {
+ cifs_server_dbg(VFS, "sends on sock %p stuck for 15 seconds\n",
+ server->ssocket);
+ return -EAGAIN;
+ }
+ msleep(1 << retries);
+ continue;
+ }
+
+ if (rc < 0)
+ return rc;
+
+ if (rc == 0) {
+ cifs_dbg(FYI, "Received no data (TCP RST)\n");
+ return -ECONNABORTED;
+ }
+
+ /* recv was at least partially successful */
+ *recv += rc;
+ retries = 0; /* in case we get ENOSPC on the next send */
+ }
+ return 0;
+}
+
+static int
ip_rfc1001_connect(struct TCP_Server_Info *server)
{
int rc = 0;
@@ -3035,8 +3177,12 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
* sessinit is sent but no second negprot
*/
struct rfc1002_session_packet req = {};
- struct smb_hdr *smb_buf = (struct smb_hdr *)&req;
+ struct rfc1002_session_packet resp = {};
+ struct msghdr msg = {};
+ struct kvec iov = {};
unsigned int len;
+ size_t sent;
+ size_t recv;
req.trailer.session_req.called_len = sizeof(req.trailer.session_req.called_name);
@@ -3065,19 +3211,119 @@ ip_rfc1001_connect(struct TCP_Server_Info *server)
* As per rfc1002, @len must be the number of bytes that follows the
* length field of a rfc1002 session request payload.
*/
- len = sizeof(req) - offsetof(struct rfc1002_session_packet, trailer.session_req);
+ len = sizeof(req.trailer.session_req);
+ req.type = RFC1002_SESSION_REQUEST;
+ req.flags = 0;
+ req.length = cpu_to_be16(len);
+ len += offsetof(typeof(req), trailer.session_req);
+ iov.iov_base = &req;
+ iov.iov_len = len;
+ iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, len);
+ rc = smb_send_kvec(server, &msg, &sent);
+ if (rc < 0 || len != sent)
+ return (rc == -EINTR || rc == -EAGAIN) ? rc : -ECONNABORTED;
- smb_buf->smb_buf_length = cpu_to_be32((RFC1002_SESSION_REQUEST << 24) | len);
- rc = smb_send(server, smb_buf, len);
/*
* RFC1001 layer in at least one server requires very short break before
* negprot presumably because not expecting negprot to follow so fast.
- * This is a simple solution that works without complicating the code
- * and causes no significant slowing down on mount for everyone else
+ * For example DOS SMB servers cannot process negprot if it was received
+ * before the server sent response for SESSION_REQUEST packet. So, wait
+ * for the response, read it and parse it as it can contain useful error
+ * information (e.g. specified server name was incorrect). For example
+ * even the latest Windows Server 2022 SMB1 server over port 139 send
+ * error if its server name was in SESSION_REQUEST packet incorrect.
+ * Nowadays usage of port 139 is not common, so waiting for reply here
+ * does not slowing down mounting of common case (over port 445).
*/
- usleep_range(1000, 2000);
+ len = offsetof(typeof(resp), trailer);
+ iov.iov_base = &resp;
+ iov.iov_len = len;
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, len);
+ rc = smb_recv_kvec(server, &msg, &recv);
+ if (rc < 0 || recv != len)
+ return (rc == -EINTR || rc == -EAGAIN) ? rc : -ECONNABORTED;
+
+ switch (resp.type) {
+ case RFC1002_POSITIVE_SESSION_RESPONSE:
+ if (be16_to_cpu(resp.length) != 0) {
+ cifs_dbg(VFS, "RFC 1002 positive session response but with invalid non-zero length %u\n",
+ be16_to_cpu(resp.length));
+ return -EIO;
+ }
+ cifs_dbg(FYI, "RFC 1002 positive session response");
+ break;
+ case RFC1002_NEGATIVE_SESSION_RESPONSE:
+ /* Read RFC1002 response error code and convert it to errno in rc */
+ len = sizeof(resp.trailer.neg_ses_resp_error_code);
+ iov.iov_base = &resp.trailer.neg_ses_resp_error_code;
+ iov.iov_len = len;
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, len);
+ if (be16_to_cpu(resp.length) == len &&
+ smb_recv_kvec(server, &msg, &recv) == 0 &&
+ recv == len) {
+ cifs_dbg(VFS, "RFC 1002 negative session response with error 0x%x\n",
+ resp.trailer.neg_ses_resp_error_code);
+ switch (resp.trailer.neg_ses_resp_error_code) {
+ case RFC1002_NOT_LISTENING_CALLED:
+ /* server does not listen for specified server name */
+ fallthrough;
+ case RFC1002_NOT_PRESENT:
+ /* server name is incorrect */
+ rc = -ENOENT;
+ cifs_dbg(VFS, "Server rejected NetBIOS servername %.15s\n",
+ server->server_RFC1001_name[0] ?
+ server->server_RFC1001_name :
+ DEFAULT_CIFS_CALLED_NAME);
+ cifs_dbg(VFS, "Specify correct NetBIOS servername in source path or with -o servern= option\n");
+ break;
+ case RFC1002_NOT_LISTENING_CALLING:
+ /* client name was not accepted by server */
+ rc = -EACCES;
+ cifs_dbg(VFS, "Server rejected NetBIOS clientname %.15s\n",
+ server->workstation_RFC1001_name[0] ?
+ server->workstation_RFC1001_name :
+ "LINUX_CIFS_CLNT");
+ cifs_dbg(VFS, "Specify correct NetBIOS clientname with -o netbiosname= option\n");
+ break;
+ case RFC1002_INSUFFICIENT_RESOURCE:
+ /* remote server resource error */
+ rc = -EREMOTEIO;
+ break;
+ case RFC1002_UNSPECIFIED_ERROR:
+ default:
+ /* other/unknown error */
+ rc = -EIO;
+ break;
+ }
+ } else {
+ cifs_dbg(VFS, "RFC 1002 negative session response\n");
+ rc = -EIO;
+ }
+ return rc;
+ case RFC1002_RETARGET_SESSION_RESPONSE:
+ cifs_dbg(VFS, "RFC 1002 retarget session response\n");
+ if (be16_to_cpu(resp.length) == sizeof(resp.trailer.retarget_resp)) {
+ len = sizeof(resp.trailer.retarget_resp);
+ iov.iov_base = &resp.trailer.retarget_resp;
+ iov.iov_len = len;
+ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, len);
+ if (smb_recv_kvec(server, &msg, &recv) == 0 && recv == len) {
+ cifs_dbg(VFS, "Server wants to redirect connection\n");
+ cifs_dbg(VFS, "Remount with options -o ip=%pI4,port=%u\n",
+ &resp.trailer.retarget_resp.retarget_ip_addr,
+ be16_to_cpu(resp.trailer.retarget_resp.port));
+ }
+ }
+ cifs_dbg(VFS, "Closing connection\n");
+ /* FIXME: Should we automatically redirect to new retarget_resp server? */
+ return -EMULTIHOP;
+ default:
+ cifs_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", resp.type);
+ return -EIO;
+ }
- return rc;
+ server->with_rfc1001 = true;
+ return 0;
}
static int
@@ -3123,8 +3369,12 @@ generic_ip_connect(struct TCP_Server_Info *server)
/*
* Grab netns reference for the socket.
*
- * It'll be released here, on error, or in clean_demultiplex_info() upon server
- * teardown.
+ * This reference will be released in several situations:
+ * - In the failure path before the cifsd thread is started.
+ * - In the all place where server->socket is released, it is
+ * also set to NULL.
+ * - Ultimately in clean_demultiplex_info(), during the final
+ * teardown.
*/
get_net(net);
@@ -3140,10 +3390,8 @@ generic_ip_connect(struct TCP_Server_Info *server)
}
rc = bind_socket(server);
- if (rc < 0) {
- put_net(cifs_net_ns(server));
+ if (rc < 0)
return rc;
- }
/*
* Eventually check for other socket options to change from
@@ -3186,11 +3434,17 @@ generic_ip_connect(struct TCP_Server_Info *server)
return rc;
}
trace_smb3_connect_done(server->hostname, server->conn_id, &server->dstaddr);
- if (sport == htons(RFC1001_PORT))
- rc = ip_rfc1001_connect(server);
- if (rc < 0)
- put_net(cifs_net_ns(server));
+ /*
+ * Establish RFC1001 NetBIOS session when it was explicitly requested
+ * by mount option -o nbsessinit, or when connecting to default RFC1001
+ * server port (139) and it was not explicitly disabled by mount option
+ * -o nonbsessinit.
+ */
+ if (server->with_rfc1001 ||
+ server->rfc1001_sessinit == 1 ||
+ (server->rfc1001_sessinit == -1 && sport == htons(RFC1001_PORT)))
+ rc = ip_rfc1001_connect(server);
return rc;
}
@@ -3338,6 +3592,7 @@ int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
struct smb3_fs_context *ctx = cifs_sb->ctx;
INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
+ INIT_LIST_HEAD(&cifs_sb->tcon_sb_link);
spin_lock_init(&cifs_sb->tlink_tree_lock);
cifs_sb->tlink_tree = RB_ROOT;
@@ -3570,6 +3825,10 @@ static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
spin_unlock(&cifs_sb->tlink_tree_lock);
+ spin_lock(&tcon->sb_list_lock);
+ list_add(&cifs_sb->tcon_sb_link, &tcon->cifs_sb_list);
+ spin_unlock(&tcon->sb_list_lock);
+
queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks,
TLINK_IDLE_EXPIRE);
return 0;
@@ -3911,9 +4170,19 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
struct rb_root *root = &cifs_sb->tlink_tree;
struct rb_node *node;
struct tcon_link *tlink;
+ struct cifs_tcon *tcon = NULL;
cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
+ if (cifs_sb->master_tlink) {
+ tcon = cifs_sb->master_tlink->tl_tcon;
+ if (tcon) {
+ spin_lock(&tcon->sb_list_lock);
+ list_del_init(&cifs_sb->tcon_sb_link);
+ spin_unlock(&tcon->sb_list_lock);
+ }
+ }
+
spin_lock(&cifs_sb->tlink_tree_lock);
while ((node = rb_first(root))) {
tlink = rb_entry(node, struct tcon_link, tl_rbnode);
@@ -3935,11 +4204,13 @@ int
cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
struct TCP_Server_Info *server)
{
+ bool in_retry = false;
int rc = 0;
if (!server->ops->need_neg || !server->ops->negotiate)
return -ENOSYS;
+retry:
/* only send once per connect */
spin_lock(&server->srv_lock);
if (server->tcpStatus != CifsGood &&
@@ -3959,6 +4230,14 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
spin_unlock(&server->srv_lock);
rc = server->ops->negotiate(xid, ses, server);
+ if (rc == -EAGAIN) {
+ /* Allow one retry attempt */
+ if (!in_retry) {
+ in_retry = true;
+ goto retry;
+ }
+ rc = -EHOSTDOWN;
+ }
if (rc == 0) {
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsInNegotiate)
@@ -3981,7 +4260,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
struct TCP_Server_Info *server,
struct nls_table *nls_info)
{
- int rc = -ENOSYS;
+ int rc = 0;
struct TCP_Server_Info *pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr;
struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr;
@@ -4033,6 +4312,26 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
if (!linuxExtEnabled)
ses->capabilities &= (~server->vals->cap_unix);
+ /*
+ * Check if the server supports specified encoding mode.
+ * Zero value in vals->cap_unicode indidcates that chosen
+ * protocol dialect does not support non-UNICODE mode.
+ */
+ if (ses->unicode == 1 && server->vals->cap_unicode != 0 &&
+ !(server->capabilities & server->vals->cap_unicode)) {
+ cifs_dbg(VFS, "Server does not support mounting in UNICODE mode\n");
+ rc = -EOPNOTSUPP;
+ } else if (ses->unicode == 0 && server->vals->cap_unicode == 0) {
+ cifs_dbg(VFS, "Server does not support mounting in non-UNICODE mode\n");
+ rc = -EOPNOTSUPP;
+ } else if (ses->unicode == 0) {
+ /*
+ * When UNICODE mode was explicitly disabled then
+ * do not announce client UNICODE capability.
+ */
+ ses->capabilities &= (~server->vals->cap_unicode);
+ }
+
if (ses->auth_key.response) {
cifs_dbg(FYI, "Free previous auth_key.response = %p\n",
ses->auth_key.response);
@@ -4045,8 +4344,12 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
server->sec_mode, server->capabilities, server->timeAdj);
- if (server->ops->sess_setup)
- rc = server->ops->sess_setup(xid, ses, server, nls_info);
+ if (!rc) {
+ if (server->ops->sess_setup)
+ rc = server->ops->sess_setup(xid, ses, server, nls_info);
+ else
+ rc = -ENOSYS;
+ }
if (rc) {
cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
@@ -4116,6 +4419,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid)
ctx->seal = master_tcon->seal;
ctx->witness = master_tcon->use_witness;
ctx->dfs_root_ses = master_tcon->ses->dfs_root_ses;
+ ctx->unicode = master_tcon->ses->unicode;
rc = cifs_set_vol_auth(ctx, master_tcon->ses);
if (rc) {
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 9e4f7378f30f..8407fb108664 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -3082,7 +3082,7 @@ void cifs_oplock_break(struct work_struct *work)
cinode->oplock = 0;
}
- if (inode && S_ISREG(inode->i_mode)) {
+ if (S_ISREG(inode->i_mode)) {
if (CIFS_CACHE_READ(cinode))
break_lease(inode, O_RDONLY);
else
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 8c73d4d60d1a..2980941b9667 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -134,6 +134,8 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_flag("compress", Opt_compress),
fsparam_flag("witness", Opt_witness),
fsparam_flag_no("nativesocket", Opt_nativesocket),
+ fsparam_flag_no("unicode", Opt_unicode),
+ fsparam_flag_no("nbsessinit", Opt_nbsessinit),
/* Mount options which take uid or gid */
fsparam_uid("backupuid", Opt_backupuid),
@@ -963,6 +965,14 @@ static int smb3_verify_reconfigure_ctx(struct fs_context *fc,
cifs_errorf(fc, "can not change iocharset during remount\n");
return -EINVAL;
}
+ if (new_ctx->unicode != old_ctx->unicode) {
+ cifs_errorf(fc, "can not change unicode during remount\n");
+ return -EINVAL;
+ }
+ if (new_ctx->rfc1001_sessinit != old_ctx->rfc1001_sessinit) {
+ cifs_errorf(fc, "can not change nbsessinit during remount\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -1118,6 +1128,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
int i, opt;
bool is_smb3 = !strcmp(fc->fs_type->name, "smb3");
bool skip_parsing = false;
+ char *hostname;
cifs_dbg(FYI, "CIFS: parsing cifs mount option '%s'\n", param->key);
@@ -1327,6 +1338,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
case Opt_rsize:
ctx->rsize = result.uint_32;
ctx->got_rsize = true;
+ ctx->vol_rsize = ctx->rsize;
break;
case Opt_wsize:
ctx->wsize = result.uint_32;
@@ -1342,6 +1354,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->wsize, PAGE_SIZE);
}
}
+ ctx->vol_wsize = ctx->wsize;
break;
case Opt_acregmax:
if (result.uint_32 > CIFS_MAX_ACTIMEO / HZ) {
@@ -1377,6 +1390,11 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->closetimeo = HZ * result.uint_32;
break;
case Opt_echo_interval:
+ if (result.uint_32 < SMB_ECHO_INTERVAL_MIN ||
+ result.uint_32 > SMB_ECHO_INTERVAL_MAX) {
+ cifs_errorf(fc, "echo interval is out of bounds\n");
+ goto cifs_parse_mount_err;
+ }
ctx->echo_interval = result.uint_32;
break;
case Opt_snapshot:
@@ -1443,6 +1461,16 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
cifs_errorf(fc, "OOM when copying UNC string\n");
goto cifs_parse_mount_err;
}
+ hostname = extract_hostname(ctx->UNC);
+ if (IS_ERR(hostname)) {
+ cifs_errorf(fc, "Cannot extract hostname from UNC string\n");
+ goto cifs_parse_mount_err;
+ }
+ /* last byte, type, is 0x20 for servr type */
+ memset(ctx->target_rfc1001_name, 0x20, RFC1001_NAME_LEN_WITH_NULL);
+ for (i = 0; i < RFC1001_NAME_LEN && hostname[i] != 0; i++)
+ ctx->target_rfc1001_name[i] = toupper(hostname[i]);
+ kfree(hostname);
break;
case Opt_user:
kfree(ctx->username);
@@ -1586,6 +1614,10 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
if (i == RFC1001_NAME_LEN && param->string[i] != 0)
pr_warn("server netbiosname longer than 15 truncated\n");
break;
+ case Opt_nbsessinit:
+ ctx->rfc1001_sessinit = !result.negated;
+ cifs_dbg(FYI, "rfc1001_sessinit set to %d\n", ctx->rfc1001_sessinit);
+ break;
case Opt_ver:
/* version of mount userspace tools, not dialect */
/* If interface changes in mount.cifs bump to new ver */
@@ -1627,6 +1659,10 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->witness = true;
pr_warn_once("Witness protocol support is experimental\n");
break;
+ case Opt_unicode:
+ ctx->unicode = !result.negated;
+ cifs_dbg(FYI, "unicode set to %d\n", ctx->unicode);
+ break;
case Opt_rootfs:
#ifndef CONFIG_CIFS_ROOT
cifs_dbg(VFS, "rootfs support requires CONFIG_CIFS_ROOT config option\n");
@@ -1869,13 +1905,16 @@ int smb3_init_fs_context(struct fs_context *fc)
memset(ctx->source_rfc1001_name, 0x20, RFC1001_NAME_LEN);
for (i = 0; i < strnlen(nodename, RFC1001_NAME_LEN); i++)
ctx->source_rfc1001_name[i] = toupper(nodename[i]);
-
ctx->source_rfc1001_name[RFC1001_NAME_LEN] = 0;
+
/*
* null target name indicates to use *SMBSERVR default called name
* if we end up sending RFC1001 session initialize
*/
ctx->target_rfc1001_name[0] = 0;
+
+ ctx->rfc1001_sessinit = -1; /* autodetect based on port number */
+
ctx->cred_uid = current_uid();
ctx->linux_uid = current_uid();
ctx->linux_gid = current_gid();
@@ -1928,6 +1967,8 @@ int smb3_init_fs_context(struct fs_context *fc)
ctx->symlink_type = CIFS_SYMLINK_TYPE_DEFAULT;
ctx->nonativesocket = 0;
+ ctx->unicode = -1; /* autodetect, but prefer UNICODE mode */
+
/*
* short int override_uid = -1;
* short int override_gid = -1;
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index 881bfc08667e..d1d29249bcdb 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -135,6 +135,7 @@ enum cifs_param {
Opt_witness,
Opt_is_upcall_target_mount,
Opt_is_upcall_target_application,
+ Opt_unicode,
/* Mount options which take numeric value */
Opt_backupuid,
@@ -173,6 +174,7 @@ enum cifs_param {
Opt_iocharset,
Opt_netbiosname,
Opt_servern,
+ Opt_nbsessinit,
Opt_ver,
Opt_vers,
Opt_sec,
@@ -215,6 +217,7 @@ struct smb3_fs_context {
char *iocharset; /* local code page for mapping to and from Unicode */
char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
+ int rfc1001_sessinit;
kuid_t cred_uid;
kuid_t linux_uid;
kgid_t linux_gid;
@@ -279,6 +282,9 @@ struct smb3_fs_context {
bool use_client_guid:1;
/* reuse existing guid for multichannel */
u8 client_guid[SMB2_CLIENT_GUID_SIZE];
+ /* User-specified original r/wsize value */
+ unsigned int vol_rsize;
+ unsigned int vol_wsize;
unsigned int bsize;
unsigned int rasize;
unsigned int rsize;
@@ -306,6 +312,7 @@ struct smb3_fs_context {
bool compress; /* enable SMB2 messages (READ/WRITE) de/compression */
bool rootfs:1; /* if it's a SMB root file system */
bool witness:1; /* use witness protocol */
+ int unicode;
char *leaf_fullpath;
struct cifs_ses *dfs_root_ses;
bool dfs_automount:1; /* set for dfs automount only */
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 3bb21aa58474..75be4b46bc6f 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -1203,18 +1203,17 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
goto out;
}
break;
- case IO_REPARSE_TAG_MOUNT_POINT:
- cifs_create_junction_fattr(fattr, sb);
- rc = 0;
- goto out;
default:
/* Check for cached reparse point data */
if (data->symlink_target || data->reparse.buf) {
rc = 0;
- } else if (iov && server->ops->parse_reparse_point) {
- rc = server->ops->parse_reparse_point(cifs_sb,
- full_path,
- iov, data);
+ } else if (iov && server->ops->get_reparse_point_buffer) {
+ struct reparse_data_buffer *reparse_buf;
+ u32 reparse_len;
+
+ reparse_buf = server->ops->get_reparse_point_buffer(iov, &reparse_len);
+ rc = parse_reparse_point(reparse_buf, reparse_len,
+ cifs_sb, full_path, data);
/*
* If the reparse point was not handled but it is the
* name surrogate which points to directory, then treat
@@ -1228,6 +1227,16 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
cifs_create_junction_fattr(fattr, sb);
goto out;
}
+ /*
+ * If the reparse point is unsupported by the Linux SMB
+ * client then let it process by the SMB server. So mask
+ * the -EOPNOTSUPP error code. This will allow Linux SMB
+ * client to send SMB OPEN request to server. If server
+ * does not support this reparse point too then server
+ * will return error during open the path.
+ */
+ if (rc == -EOPNOTSUPP)
+ rc = 0;
}
if (data->reparse.tag == IO_REPARSE_TAG_SYMLINK && !rc) {
@@ -2901,23 +2910,6 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
return -EOPNOTSUPP;
}
-int cifs_truncate_page(struct address_space *mapping, loff_t from)
-{
- pgoff_t index = from >> PAGE_SHIFT;
- unsigned offset = from & (PAGE_SIZE - 1);
- struct page *page;
- int rc = 0;
-
- page = grab_cache_page(mapping, index);
- if (!page)
- return -ENOMEM;
-
- zero_user_segment(page, offset, PAGE_SIZE);
- unlock_page(page);
- put_page(page);
- return rc;
-}
-
void cifs_setsize(struct inode *inode, loff_t offset)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
@@ -3012,8 +3004,6 @@ set_size_out:
*/
attrs->ia_ctime = attrs->ia_mtime = current_time(inode);
attrs->ia_valid |= ATTR_CTIME | ATTR_MTIME;
-
- cifs_truncate_page(inode->i_mapping, inode->i_size);
}
return rc;
diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c
index 6e6c09cc5ce7..769752ad2c5c 100644
--- a/fs/smb/client/link.c
+++ b/fs/smb/client/link.c
@@ -258,7 +258,7 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct cifs_io_parms io_parms = {0};
int buf_type = CIFS_NO_BUFFER;
- FILE_ALL_INFO file_info;
+ struct cifs_open_info_data query_data;
oparms = (struct cifs_open_parms) {
.tcon = tcon,
@@ -270,11 +270,11 @@ cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
.fid = &fid,
};
- rc = CIFS_open(xid, &oparms, &oplock, &file_info);
+ rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &query_data);
if (rc)
return rc;
- if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
+ if (query_data.fi.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
rc = -ENOENT;
/* it's not a symlink */
goto out;
@@ -313,7 +313,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
.fid = &fid,
};
- rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, NULL);
if (rc)
return rc;
@@ -643,7 +643,8 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
case CIFS_SYMLINK_TYPE_NATIVE:
case CIFS_SYMLINK_TYPE_NFS:
case CIFS_SYMLINK_TYPE_WSL:
- if (server->ops->create_reparse_symlink) {
+ if (server->ops->create_reparse_symlink &&
+ (le32_to_cpu(pTcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS)) {
rc = server->ops->create_reparse_symlink(xid, inode,
direntry,
pTcon,
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index b328dc5c7988..7b6ed9b23e71 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -137,8 +137,10 @@ tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
spin_lock_init(&ret_buf->tc_lock);
INIT_LIST_HEAD(&ret_buf->openFileList);
INIT_LIST_HEAD(&ret_buf->tcon_list);
+ INIT_LIST_HEAD(&ret_buf->cifs_sb_list);
spin_lock_init(&ret_buf->open_file_lock);
spin_lock_init(&ret_buf->stat_lock);
+ spin_lock_init(&ret_buf->sb_list_lock);
atomic_set(&ret_buf->num_local_opens, 0);
atomic_set(&ret_buf->num_remote_opens, 0);
ret_buf->stats_from_time = ktime_get_real_seconds();
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index 2b9e9885dc42..bb25e77c5540 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -542,12 +542,12 @@ static int wsl_set_reparse_buf(struct reparse_data_buffer **buf,
kfree(symname_utf16);
return -ENOMEM;
}
- /* Flag 0x02000000 is unknown, but all wsl symlinks have this value */
- symlink_buf->Flags = cpu_to_le32(0x02000000);
- /* PathBuffer is in UTF-8 but without trailing null-term byte */
+ /* Version field must be set to 2 (MS-FSCC 2.1.2.7) */
+ symlink_buf->Version = cpu_to_le32(2);
+ /* Target for Version 2 is in UTF-8 but without trailing null-term byte */
symname_utf8_len = utf16s_to_utf8s((wchar_t *)symname_utf16, symname_utf16_len/2,
UTF16_LITTLE_ENDIAN,
- symlink_buf->PathBuffer,
+ symlink_buf->Target,
symname_utf8_maxlen);
*buf = (struct reparse_data_buffer *)symlink_buf;
buf_len = sizeof(struct reparse_wsl_symlink_data_buffer) + symname_utf8_len;
@@ -1016,29 +1016,36 @@ static int parse_reparse_wsl_symlink(struct reparse_wsl_symlink_data_buffer *buf
struct cifs_open_info_data *data)
{
int len = le16_to_cpu(buf->ReparseDataLength);
+ int data_offset = offsetof(typeof(*buf), Target) - offsetof(typeof(*buf), Version);
int symname_utf8_len;
__le16 *symname_utf16;
int symname_utf16_len;
- if (len <= sizeof(buf->Flags)) {
+ if (len <= data_offset) {
cifs_dbg(VFS, "srv returned malformed wsl symlink buffer\n");
return -EIO;
}
- /* PathBuffer is in UTF-8 but without trailing null-term byte */
- symname_utf8_len = len - sizeof(buf->Flags);
+ /* MS-FSCC 2.1.2.7 defines layout of the Target field only for Version 2. */
+ if (le32_to_cpu(buf->Version) != 2) {
+ cifs_dbg(VFS, "srv returned unsupported wsl symlink version %u\n", le32_to_cpu(buf->Version));
+ return -EIO;
+ }
+
+ /* Target for Version 2 is in UTF-8 but without trailing null-term byte */
+ symname_utf8_len = len - data_offset;
/*
* Check that buffer does not contain null byte
* because Linux cannot process symlink with null byte.
*/
- if (strnlen(buf->PathBuffer, symname_utf8_len) != symname_utf8_len) {
+ if (strnlen(buf->Target, symname_utf8_len) != symname_utf8_len) {
cifs_dbg(VFS, "srv returned null byte in wsl symlink target location\n");
return -EIO;
}
symname_utf16 = kzalloc(symname_utf8_len * 2, GFP_KERNEL);
if (!symname_utf16)
return -ENOMEM;
- symname_utf16_len = utf8s_to_utf16s(buf->PathBuffer, symname_utf8_len,
+ symname_utf16_len = utf8s_to_utf16s(buf->Target, symname_utf8_len,
UTF16_LITTLE_ENDIAN,
(wchar_t *) symname_utf16, symname_utf8_len * 2);
if (symname_utf16_len < 0) {
@@ -1062,8 +1069,6 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
const char *full_path,
struct cifs_open_info_data *data)
{
- struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
-
data->reparse.buf = buf;
/* See MS-FSCC 2.1.2 */
@@ -1090,24 +1095,17 @@ int parse_reparse_point(struct reparse_data_buffer *buf,
}
return 0;
default:
- cifs_tcon_dbg(VFS | ONCE, "unhandled reparse tag: 0x%08x\n",
- le32_to_cpu(buf->ReparseTag));
return -EOPNOTSUPP;
}
}
-int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct kvec *rsp_iov,
- struct cifs_open_info_data *data)
+struct reparse_data_buffer *smb2_get_reparse_point_buffer(const struct kvec *rsp_iov,
+ u32 *plen)
{
- struct reparse_data_buffer *buf;
struct smb2_ioctl_rsp *io = rsp_iov->iov_base;
- u32 plen = le32_to_cpu(io->OutputCount);
-
- buf = (struct reparse_data_buffer *)((u8 *)io +
- le32_to_cpu(io->OutputOffset));
- return parse_reparse_point(buf, plen, cifs_sb, full_path, data);
+ *plen = le32_to_cpu(io->OutputCount);
+ return (struct reparse_data_buffer *)((u8 *)io +
+ le32_to_cpu(io->OutputOffset));
}
static bool wsl_to_fattr(struct cifs_open_info_data *data,
@@ -1233,16 +1231,6 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
bool ok;
switch (tag) {
- case IO_REPARSE_TAG_INTERNAL:
- if (!(fattr->cf_cifsattrs & ATTR_DIRECTORY))
- return false;
- fallthrough;
- case IO_REPARSE_TAG_DFS:
- case IO_REPARSE_TAG_DFSR:
- case IO_REPARSE_TAG_MOUNT_POINT:
- /* See cifs_create_junction_fattr() */
- fattr->cf_mode = S_IFDIR | 0711;
- break;
case IO_REPARSE_TAG_LX_SYMLINK:
case IO_REPARSE_TAG_LX_FIFO:
case IO_REPARSE_TAG_AF_UNIX:
@@ -1262,7 +1250,14 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
fattr->cf_mode |= S_IFLNK;
break;
default:
- return false;
+ if (!(fattr->cf_cifsattrs & ATTR_DIRECTORY))
+ return false;
+ if (!IS_REPARSE_TAG_NAME_SURROGATE(tag) &&
+ tag != IO_REPARSE_TAG_INTERNAL)
+ return false;
+ /* See cifs_create_junction_fattr() */
+ fattr->cf_mode = S_IFDIR | 0711;
+ break;
}
fattr->cf_dtype = S_DT(fattr->cf_mode);
diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
index c0be5ab45a78..08de853b36a8 100644
--- a/fs/smb/client/reparse.h
+++ b/fs/smb/client/reparse.h
@@ -135,9 +135,6 @@ int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
int smb2_mknod_reparse(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, umode_t mode, dev_t dev);
-int smb2_parse_reparse_point(struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct kvec *rsp_iov,
- struct cifs_open_info_data *data);
+struct reparse_data_buffer *smb2_get_reparse_point_buffer(const struct kvec *rsp_iov, u32 *len);
#endif /* _CIFS_REPARSE_H */
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index faa80e7d54a6..b3fa9ee26912 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -242,7 +242,7 @@ int cifs_try_adding_channels(struct cifs_ses *ses)
iface->num_channels++;
iface->weight_fulfilled++;
- cifs_dbg(VFS, "successfully opened new channel on iface:%pIS\n",
+ cifs_info("successfully opened new channel on iface:%pIS\n",
&iface->sockaddr);
break;
}
@@ -501,6 +501,7 @@ cifs_ses_add_channel(struct cifs_ses *ses,
ctx->password = ses->password;
ctx->sectype = ses->sectype;
ctx->sign = ses->sign;
+ ctx->unicode = ses->unicode;
/* UNC and paths */
/* XXX: Use ses->server->hostname? */
@@ -522,6 +523,13 @@ cifs_ses_add_channel(struct cifs_ses *ses,
ctx->sockopt_tcp_nodelay = ses->server->tcp_nodelay;
ctx->echo_interval = ses->server->echo_interval / HZ;
ctx->max_credits = ses->server->max_credits;
+ ctx->min_offload = ses->server->min_offload;
+ ctx->compress = ses->server->compression.requested;
+ ctx->dfs_conn = ses->server->dfs_conn;
+ ctx->ignore_signature = ses->server->ignore_signature;
+ ctx->leaf_fullpath = ses->server->leaf_fullpath;
+ ctx->rootfs = ses->server->noblockcnt;
+ ctx->retrans = ses->server->retrans;
/*
* This will be used for encoding/decoding user/domain/pw
@@ -672,6 +680,22 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
*pbcc_area = bcc_ptr;
}
+static void
+ascii_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
+{
+ char *bcc_ptr = *pbcc_area;
+
+ strcpy(bcc_ptr, "Linux version ");
+ bcc_ptr += strlen("Linux version ");
+ strcpy(bcc_ptr, init_utsname()->release);
+ bcc_ptr += strlen(init_utsname()->release) + 1;
+
+ strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
+ bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+
+ *pbcc_area = bcc_ptr;
+}
+
static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
@@ -696,6 +720,25 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
*pbcc_area = bcc_ptr;
}
+static void ascii_domain_string(char **pbcc_area, struct cifs_ses *ses,
+ const struct nls_table *nls_cp)
+{
+ char *bcc_ptr = *pbcc_area;
+ int len;
+
+ /* copy domain */
+ if (ses->domainName != NULL) {
+ len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ if (WARN_ON_ONCE(len < 0))
+ len = CIFS_MAX_DOMAINNAME_LEN - 1;
+ bcc_ptr += len;
+ } /* else we send a null domain name so server will default to its own domain */
+ *bcc_ptr = 0;
+ bcc_ptr++;
+
+ *pbcc_area = bcc_ptr;
+}
+
static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
@@ -741,25 +784,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
*bcc_ptr = 0;
bcc_ptr++; /* account for null termination */
- /* copy domain */
- if (ses->domainName != NULL) {
- len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
- if (WARN_ON_ONCE(len < 0))
- len = CIFS_MAX_DOMAINNAME_LEN - 1;
- bcc_ptr += len;
- } /* else we send a null domain name so server will default to its own domain */
- *bcc_ptr = 0;
- bcc_ptr++;
-
/* BB check for overflow here */
- strcpy(bcc_ptr, "Linux version ");
- bcc_ptr += strlen("Linux version ");
- strcpy(bcc_ptr, init_utsname()->release);
- bcc_ptr += strlen(init_utsname()->release) + 1;
-
- strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
- bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+ ascii_domain_string(&bcc_ptr, ses, nls_cp);
+ ascii_oslm_strings(&bcc_ptr, nls_cp);
*pbcc_area = bcc_ptr;
}
@@ -1562,7 +1590,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
sess_data->iov[1].iov_len = msg->secblob_len;
pSMB->req.SecurityBlobLength = cpu_to_le16(sess_data->iov[1].iov_len);
- if (ses->capabilities & CAP_UNICODE) {
+ if (pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) {
/* unicode strings must be word aligned */
if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
*bcc_ptr = 0;
@@ -1571,8 +1599,8 @@ sess_auth_kerberos(struct sess_data *sess_data)
unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
unicode_domain_string(&bcc_ptr, ses, sess_data->nls_cp);
} else {
- /* BB: is this right? */
- ascii_ssetup_strings(&bcc_ptr, ses, sess_data->nls_cp);
+ ascii_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+ ascii_domain_string(&bcc_ptr, ses, sess_data->nls_cp);
}
sess_data->iov[2].iov_len = (long) bcc_ptr -
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index d6e2fb669c40..0adeec652dc1 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -14,6 +14,8 @@
#include "cifspdu.h"
#include "cifs_unicode.h"
#include "fs_context.h"
+#include "nterr.h"
+#include "smberr.h"
/*
* An NT cancel request header looks just like the original request except:
@@ -426,13 +428,6 @@ cifs_negotiate(const unsigned int xid,
{
int rc;
rc = CIFSSMBNegotiate(xid, ses, server);
- if (rc == -EAGAIN) {
- /* retry only once on 1st time connection */
- set_credits(server, 1);
- rc = CIFSSMBNegotiate(xid, ses, server);
- if (rc == -EAGAIN)
- rc = -EHOSTDOWN;
- }
return rc;
}
@@ -444,8 +439,8 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
unsigned int wsize;
/* start with specified wsize, or default */
- if (ctx->wsize)
- wsize = ctx->wsize;
+ if (ctx->got_wsize)
+ wsize = ctx->vol_wsize;
else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
wsize = CIFS_DEFAULT_IOSIZE;
else
@@ -497,7 +492,7 @@ cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
else
defsize = server->maxBuf - sizeof(READ_RSP);
- rsize = ctx->rsize ? ctx->rsize : defsize;
+ rsize = ctx->got_rsize ? ctx->vol_rsize : defsize;
/*
* no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to
@@ -573,6 +568,42 @@ static int cifs_query_path_info(const unsigned int xid,
data->reparse_point = le32_to_cpu(fi.Attributes) & ATTR_REPARSE;
}
+#ifdef CONFIG_CIFS_XATTR
+ /*
+ * For WSL CHR and BLK reparse points it is required to fetch
+ * EA $LXDEV which contains major and minor device numbers.
+ */
+ if (!rc && data->reparse_point) {
+ struct smb2_file_full_ea_info *ea;
+
+ ea = (struct smb2_file_full_ea_info *)data->wsl.eas;
+ rc = CIFSSMBQAllEAs(xid, tcon, full_path, SMB2_WSL_XATTR_DEV,
+ &ea->ea_data[SMB2_WSL_XATTR_NAME_LEN + 1],
+ SMB2_WSL_XATTR_DEV_SIZE, cifs_sb);
+ if (rc == SMB2_WSL_XATTR_DEV_SIZE) {
+ ea->next_entry_offset = cpu_to_le32(0);
+ ea->flags = 0;
+ ea->ea_name_length = SMB2_WSL_XATTR_NAME_LEN;
+ ea->ea_value_length = cpu_to_le16(SMB2_WSL_XATTR_DEV_SIZE);
+ memcpy(&ea->ea_data[0], SMB2_WSL_XATTR_DEV, SMB2_WSL_XATTR_NAME_LEN + 1);
+ data->wsl.eas_len = sizeof(*ea) + SMB2_WSL_XATTR_NAME_LEN + 1 +
+ SMB2_WSL_XATTR_DEV_SIZE;
+ rc = 0;
+ } else if (rc >= 0) {
+ /* It is an error if EA $LXDEV has wrong size. */
+ rc = -EINVAL;
+ } else {
+ /*
+ * In all other cases ignore error if fetching
+ * of EA $LXDEV failed. It is needed only for
+ * WSL CHR and BLK reparse points and wsl_to_fattr()
+ * handle the case when EA is missing.
+ */
+ rc = 0;
+ }
+ }
+#endif
+
return rc;
}
@@ -975,18 +1006,13 @@ static int cifs_query_symlink(const unsigned int xid,
return rc;
}
-static int cifs_parse_reparse_point(struct cifs_sb_info *cifs_sb,
- const char *full_path,
- struct kvec *rsp_iov,
- struct cifs_open_info_data *data)
+static struct reparse_data_buffer *cifs_get_reparse_point_buffer(const struct kvec *rsp_iov,
+ u32 *plen)
{
- struct reparse_data_buffer *buf;
TRANSACT_IOCTL_RSP *io = rsp_iov->iov_base;
- u32 plen = le16_to_cpu(io->ByteCount);
-
- buf = (struct reparse_data_buffer *)((__u8 *)&io->hdr.Protocol +
- le32_to_cpu(io->DataOffset));
- return parse_reparse_point(buf, plen, cifs_sb, full_path, data);
+ *plen = le16_to_cpu(io->ByteCount);
+ return (struct reparse_data_buffer *)((__u8 *)&io->hdr.Protocol +
+ le32_to_cpu(io->DataOffset));
}
static bool
@@ -1069,6 +1095,47 @@ cifs_make_node(unsigned int xid, struct inode *inode,
full_path, mode, dev);
}
+static bool
+cifs_is_network_name_deleted(char *buf, struct TCP_Server_Info *server)
+{
+ struct smb_hdr *shdr = (struct smb_hdr *)buf;
+ struct TCP_Server_Info *pserver;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+
+ if (shdr->Flags2 & SMBFLG2_ERR_STATUS) {
+ if (shdr->Status.CifsError != cpu_to_le32(NT_STATUS_NETWORK_NAME_DELETED))
+ return false;
+ } else {
+ if (shdr->Status.DosError.ErrorClass != ERRSRV ||
+ shdr->Status.DosError.Error != cpu_to_le16(ERRinvtid))
+ return false;
+ }
+
+ /* If server is a channel, select the primary channel */
+ pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+ if (cifs_ses_exiting(ses))
+ continue;
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ if (tcon->tid == shdr->Tid) {
+ spin_lock(&tcon->tc_lock);
+ tcon->need_reconnect = true;
+ spin_unlock(&tcon->tc_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ pr_warn_once("Server share %s deleted.\n",
+ tcon->tree_name);
+ return true;
+ }
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ return false;
+}
+
struct smb_version_operations smb1_operations = {
.send_cancel = send_nt_cancel,
.compare_fids = cifs_compare_fids,
@@ -1121,7 +1188,7 @@ struct smb_version_operations smb1_operations = {
.rename = CIFSSMBRename,
.create_hardlink = CIFSCreateHardLink,
.query_symlink = cifs_query_symlink,
- .parse_reparse_point = cifs_parse_reparse_point,
+ .get_reparse_point_buffer = cifs_get_reparse_point_buffer,
.open = cifs_open_file,
.set_fid = cifs_set_fid,
.close = cifs_close_file,
@@ -1153,6 +1220,7 @@ struct smb_version_operations smb1_operations = {
.get_acl_by_fid = get_cifs_acl_by_fid,
.set_acl = set_cifs_acl,
.make_node = cifs_make_node,
+ .is_network_name_deleted = cifs_is_network_name_deleted,
};
struct smb_version_values smb1_values = {
@@ -1170,6 +1238,7 @@ struct smb_version_values smb1_values = {
.cap_unix = CAP_UNIX,
.cap_nt_find = CAP_NT_SMBS | CAP_NT_FIND,
.cap_large_files = CAP_LARGE_FILES,
+ .cap_unicode = CAP_UNICODE,
.signing_enabled = SECMODE_SIGN_ENABLED,
.signing_required = SECMODE_SIGN_REQUIRED,
};
diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c
index d609a20fb98a..a7f629238830 100644
--- a/fs/smb/client/smb2file.c
+++ b/fs/smb/client/smb2file.c
@@ -152,16 +152,35 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32
int err_buftype = CIFS_NO_BUFFER;
struct cifs_fid *fid = oparms->fid;
struct network_resiliency_req nr_ioctl_req;
+ bool retry_without_read_attributes = false;
smb2_path = cifs_convert_path_to_utf16(oparms->path, oparms->cifs_sb);
if (smb2_path == NULL)
return -ENOMEM;
- oparms->desired_access |= FILE_READ_ATTRIBUTES;
+ /*
+ * GENERIC_READ, GENERIC_EXECUTE, GENERIC_ALL and MAXIMUM_ALLOWED
+ * contains also FILE_READ_ATTRIBUTES access right. So do not append
+ * FILE_READ_ATTRIBUTES when not needed and prevent calling code path
+ * for retry_without_read_attributes.
+ */
+ if (!(oparms->desired_access & FILE_READ_ATTRIBUTES) &&
+ !(oparms->desired_access & GENERIC_READ) &&
+ !(oparms->desired_access & GENERIC_EXECUTE) &&
+ !(oparms->desired_access & GENERIC_ALL) &&
+ !(oparms->desired_access & MAXIMUM_ALLOWED)) {
+ oparms->desired_access |= FILE_READ_ATTRIBUTES;
+ retry_without_read_attributes = true;
+ }
smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov,
&err_buftype);
+ if (rc == -EACCES && retry_without_read_attributes) {
+ oparms->desired_access &= ~FILE_READ_ATTRIBUTES;
+ rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov,
+ &err_buftype);
+ }
if (rc && data) {
struct smb2_hdr *hdr = err_iov.iov_base;
diff --git a/fs/smb/client/smb2glob.h b/fs/smb/client/smb2glob.h
index 2466e6155136..224495322a05 100644
--- a/fs/smb/client/smb2glob.h
+++ b/fs/smb/client/smb2glob.h
@@ -38,6 +38,7 @@ enum smb2_compound_ops {
SMB2_OP_SET_REPARSE,
SMB2_OP_GET_REPARSE,
SMB2_OP_QUERY_WSL_EA,
+ SMB2_OP_OPEN_QUERY,
};
/* Used when constructing chained read requests. */
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 826b57a5a2a8..57d9bfbadd97 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -176,6 +176,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec *out_iov, int *out_buftype, struct dentry *dentry)
{
+ struct smb2_create_rsp *create_rsp = NULL;
struct smb2_query_info_rsp *qi_rsp = NULL;
struct smb2_compound_vars *vars = NULL;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
@@ -265,7 +266,13 @@ replay_again:
num_rqst++;
rc = 0;
- for (i = 0; i < num_cmds; i++) {
+ i = 0;
+
+ /* Skip the leading explicit OPEN operation */
+ if (num_cmds > 0 && cmds[0] == SMB2_OP_OPEN_QUERY)
+ i++;
+
+ for (; i < num_cmds; i++) {
/* Operation */
switch (cmds[i]) {
case SMB2_OP_QUERY_INFO:
@@ -640,6 +647,27 @@ finished:
}
tmp_rc = rc;
+
+ if (rc == 0 && num_cmds > 0 && cmds[0] == SMB2_OP_OPEN_QUERY) {
+ create_rsp = rsp_iov[0].iov_base;
+ idata = in_iov[0].iov_base;
+ idata->fi.CreationTime = create_rsp->CreationTime;
+ idata->fi.LastAccessTime = create_rsp->LastAccessTime;
+ idata->fi.LastWriteTime = create_rsp->LastWriteTime;
+ idata->fi.ChangeTime = create_rsp->ChangeTime;
+ idata->fi.Attributes = create_rsp->FileAttributes;
+ idata->fi.AllocationSize = create_rsp->AllocationSize;
+ idata->fi.EndOfFile = create_rsp->EndofFile;
+ if (le32_to_cpu(idata->fi.NumberOfLinks) == 0)
+ idata->fi.NumberOfLinks = cpu_to_le32(1); /* dummy value */
+ idata->fi.DeletePending = 0;
+ idata->fi.Directory = !!(le32_to_cpu(create_rsp->FileAttributes) & ATTR_DIRECTORY);
+
+ /* smb2_parse_contexts() fills idata->fi.IndexNumber */
+ rc = smb2_parse_contexts(server, &rsp_iov[0], &oparms->fid->epoch,
+ oparms->fid->lease_key, &oplock, &idata->fi, NULL);
+ }
+
for (i = 0; i < num_cmds; i++) {
char *buf = rsp_iov[i + i].iov_base;
@@ -978,6 +1006,43 @@ int smb2_query_path_info(const unsigned int xid,
case 0:
rc = parse_create_response(data, cifs_sb, full_path, &out_iov[0]);
break;
+ case -EACCES:
+ /*
+ * If SMB2_OP_QUERY_INFO (called when POSIX extensions are not used) failed with
+ * STATUS_ACCESS_DENIED then it means that caller does not have permission to
+ * open the path with FILE_READ_ATTRIBUTES access and therefore cannot issue
+ * SMB2_OP_QUERY_INFO command.
+ *
+ * There is an alternative way how to query limited information about path but still
+ * suitable for stat() syscall. SMB2 OPEN/CREATE operation returns in its successful
+ * response subset of query information.
+ *
+ * So try to open the path without FILE_READ_ATTRIBUTES but with MAXIMUM_ALLOWED
+ * access which will grant the maximum possible access to the file and the response
+ * will contain required query information for stat() syscall.
+ */
+
+ if (tcon->posix_extensions)
+ break;
+
+ num_cmds = 1;
+ cmds[0] = SMB2_OP_OPEN_QUERY;
+ in_iov[0].iov_base = data;
+ in_iov[0].iov_len = sizeof(*data);
+ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, MAXIMUM_ALLOWED,
+ FILE_OPEN, create_options, ACL_NO_MODE);
+ free_rsp_iov(out_iov, out_buftype, ARRAY_SIZE(out_iov));
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
+ &oparms, in_iov, cmds, num_cmds,
+ cfile, out_iov, out_buftype, NULL);
+
+ hdr = out_iov[0].iov_base;
+ if (!hdr || out_buftype[0] == CIFS_NO_BUFFER)
+ goto out;
+
+ if (!rc)
+ rc = parse_create_response(data, cifs_sb, full_path, &out_iov[0]);
+ break;
case -EOPNOTSUPP:
/*
* BB TODO: When support for special files added to Samba
@@ -1273,6 +1338,14 @@ struct inode *smb2_get_reparse_inode(struct cifs_open_info_data *data,
int rc;
int i;
+ /*
+ * If server filesystem does not support reparse points then do not
+ * attempt to create reparse point. This will prevent creating unusable
+ * empty object on the server.
+ */
+ if (!(le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS))
+ return ERR_PTR(-EOPNOTSUPP);
+
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
SYNCHRONIZE | DELETE |
FILE_READ_ATTRIBUTES |
diff --git a/fs/smb/client/smb2misc.c b/fs/smb/client/smb2misc.c
index f3c4b70b77b9..cddf273c14ae 100644
--- a/fs/smb/client/smb2misc.c
+++ b/fs/smb/client/smb2misc.c
@@ -816,11 +816,12 @@ smb2_handle_cancelled_close(struct cifs_tcon *tcon, __u64 persistent_fid,
WARN_ONCE(tcon->tc_count < 0, "tcon refcount is negative");
spin_unlock(&cifs_tcp_ses_lock);
- if (tcon->ses)
+ if (tcon->ses) {
server = tcon->ses->server;
-
- cifs_server_dbg(FYI, "tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n",
- tcon->tid, persistent_fid, volatile_fid);
+ cifs_server_dbg(FYI,
+ "tid=0x%x: tcon is closing, skipping async close retry of fid %llu %llu\n",
+ tcon->tid, persistent_fid, volatile_fid);
+ }
return 0;
}
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 4dd11eafb69d..2fe8eeb98535 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -464,12 +464,20 @@ smb2_negotiate(const unsigned int xid,
server->CurrentMid = 0;
spin_unlock(&server->mid_lock);
rc = SMB2_negotiate(xid, ses, server);
- /* BB we probably don't need to retry with modern servers */
- if (rc == -EAGAIN)
- rc = -EHOSTDOWN;
return rc;
}
+static inline unsigned int
+prevent_zero_iosize(unsigned int size, const char *type)
+{
+ if (size == 0) {
+ cifs_dbg(VFS, "SMB: Zero %ssize calculated, using minimum value %u\n",
+ type, CIFS_MIN_DEFAULT_IOSIZE);
+ return CIFS_MIN_DEFAULT_IOSIZE;
+ }
+ return size;
+}
+
static unsigned int
smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
{
@@ -477,12 +485,12 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
unsigned int wsize;
/* start with specified wsize, or default */
- wsize = ctx->wsize ? ctx->wsize : CIFS_DEFAULT_IOSIZE;
+ wsize = ctx->got_wsize ? ctx->vol_wsize : CIFS_DEFAULT_IOSIZE;
wsize = min_t(unsigned int, wsize, server->max_write);
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
- return wsize;
+ return prevent_zero_iosize(wsize, "w");
}
static unsigned int
@@ -492,7 +500,7 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
unsigned int wsize;
/* start with specified wsize, or default */
- wsize = ctx->wsize ? ctx->wsize : SMB3_DEFAULT_IOSIZE;
+ wsize = ctx->got_wsize ? ctx->vol_wsize : SMB3_DEFAULT_IOSIZE;
wsize = min_t(unsigned int, wsize, server->max_write);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->rdma) {
@@ -514,7 +522,7 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
- return wsize;
+ return prevent_zero_iosize(wsize, "w");
}
static unsigned int
@@ -524,13 +532,13 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
unsigned int rsize;
/* start with specified rsize, or default */
- rsize = ctx->rsize ? ctx->rsize : CIFS_DEFAULT_IOSIZE;
+ rsize = ctx->got_rsize ? ctx->vol_rsize : CIFS_DEFAULT_IOSIZE;
rsize = min_t(unsigned int, rsize, server->max_read);
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
- return rsize;
+ return prevent_zero_iosize(rsize, "r");
}
static unsigned int
@@ -540,7 +548,7 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
unsigned int rsize;
/* start with specified rsize, or default */
- rsize = ctx->rsize ? ctx->rsize : SMB3_DEFAULT_IOSIZE;
+ rsize = ctx->got_rsize ? ctx->vol_rsize : SMB3_DEFAULT_IOSIZE;
rsize = min_t(unsigned int, rsize, server->max_read);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->rdma) {
@@ -563,7 +571,7 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
- return rsize;
+ return prevent_zero_iosize(rsize, "r");
}
/*
@@ -969,7 +977,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
if (islink)
rc = -EREMOTE;
}
- if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) && cifs_sb &&
+ if (rc == -EREMOTE && IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) &&
(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS))
rc = -EOPNOTSUPP;
goto out;
@@ -3526,8 +3534,6 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
if (rc == 0) {
netfs_resize_file(&cifsi->netfs, new_eof, true);
cifs_setsize(inode, new_eof);
- cifs_truncate_page(inode->i_mapping, inode->i_size);
- truncate_setsize(inode, new_eof);
}
goto out;
}
@@ -4549,9 +4555,9 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
return rc;
}
} else {
- if (unlikely(!server->secmech.dec))
- return -EIO;
-
+ rc = smb3_crypto_aead_allocate(server);
+ if (unlikely(rc))
+ return rc;
tfm = server->secmech.dec;
}
@@ -5229,7 +5235,7 @@ static int smb2_make_node(unsigned int xid, struct inode *inode,
const char *full_path, umode_t mode, dev_t dev)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- int rc;
+ int rc = -EOPNOTSUPP;
/*
* Check if mounted with mount parm 'sfu' mount parm.
@@ -5240,7 +5246,7 @@ static int smb2_make_node(unsigned int xid, struct inode *inode,
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
rc = cifs_sfu_make_node(xid, inode, dentry, tcon,
full_path, mode, dev);
- } else {
+ } else if (le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS) {
rc = smb2_mknod_reparse(xid, inode, dentry, tcon,
full_path, mode, dev);
}
@@ -5297,7 +5303,7 @@ struct smb_version_operations smb20_operations = {
.unlink = smb2_unlink,
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
- .parse_reparse_point = smb2_parse_reparse_point,
+ .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
.create_reparse_symlink = smb2_create_reparse_symlink,
@@ -5400,7 +5406,7 @@ struct smb_version_operations smb21_operations = {
.unlink = smb2_unlink,
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
- .parse_reparse_point = smb2_parse_reparse_point,
+ .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
.create_reparse_symlink = smb2_create_reparse_symlink,
@@ -5507,7 +5513,7 @@ struct smb_version_operations smb30_operations = {
.unlink = smb2_unlink,
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
- .parse_reparse_point = smb2_parse_reparse_point,
+ .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
.create_reparse_symlink = smb2_create_reparse_symlink,
@@ -5623,7 +5629,7 @@ struct smb_version_operations smb311_operations = {
.unlink = smb2_unlink,
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
- .parse_reparse_point = smb2_parse_reparse_point,
+ .get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
.create_reparse_symlink = smb2_create_reparse_symlink,
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index f9c521b3c65e..c4d52bebd37d 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -43,6 +43,7 @@
#endif
#include "cached_dir.h"
#include "compress.h"
+#include "fs_context.h"
/*
* The following table defines the expected "StructureSize" of SMB2 requests
@@ -1251,15 +1252,8 @@ SMB2_negotiate(const unsigned int xid,
cifs_server_dbg(VFS, "Missing expected negotiate contexts\n");
}
- if (server->cipher_type && !rc) {
- if (!SERVER_IS_CHAN(server)) {
- rc = smb3_crypto_aead_allocate(server);
- } else {
- /* For channels, just reuse the primary server crypto secmech. */
- server->secmech.enc = server->primary_server->secmech.enc;
- server->secmech.dec = server->primary_server->secmech.dec;
- }
- }
+ if (server->cipher_type && !rc)
+ rc = smb3_crypto_aead_allocate(server);
neg_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
@@ -3916,12 +3910,10 @@ SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
void **data, u32 *plen, u32 extra_info)
{
- __u32 additional_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
- extra_info;
*plen = 0;
return query_info(xid, tcon, persistent_fid, volatile_fid,
- 0, SMB2_O_INFO_SECURITY, additional_info,
+ 0, SMB2_O_INFO_SECURITY, extra_info,
SMB2_MAX_BUFFER_SIZE, MIN_SEC_DESC_LEN, data, plen);
}
@@ -4091,6 +4083,24 @@ smb2_echo_callback(struct mid_q_entry *mid)
add_credits(server, &credits, CIFS_ECHO_OP);
}
+static void cifs_renegotiate_iosize(struct TCP_Server_Info *server,
+ struct cifs_tcon *tcon)
+{
+ struct cifs_sb_info *cifs_sb;
+
+ if (server == NULL || tcon == NULL)
+ return;
+
+ spin_lock(&tcon->sb_list_lock);
+ list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link) {
+ cifs_sb->ctx->rsize =
+ server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
+ cifs_sb->ctx->wsize =
+ server->ops->negotiate_wsize(tcon, cifs_sb->ctx);
+ }
+ spin_unlock(&tcon->sb_list_lock);
+}
+
void smb2_reconnect_server(struct work_struct *work)
{
struct TCP_Server_Info *server = container_of(work,
@@ -4176,9 +4186,10 @@ void smb2_reconnect_server(struct work_struct *work)
list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) {
rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server, true);
- if (!rc)
+ if (!rc) {
+ cifs_renegotiate_iosize(server, tcon);
cifs_reopen_persistent_handles(tcon);
- else
+ } else
resched = true;
list_del_init(&tcon->rlist);
if (tcon->ipc)
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 0dc80959ce48..266af17aa7d9 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -179,7 +179,7 @@ delete_mid(struct mid_q_entry *mid)
* Our basic "send data to server" function. Should be called with srv_mutex
* held. The caller is responsible for handling the results.
*/
-static int
+int
smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
size_t *sent)
{
@@ -894,6 +894,9 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
case MID_SHUTDOWN:
rc = -EHOSTDOWN;
break;
+ case MID_RC:
+ rc = mid->mid_rc;
+ break;
default:
if (!(mid->mid_flags & MID_DELETED)) {
list_del_init(&mid->qhead);
diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
index 58a584f0b27e..b88fa04f5792 100644
--- a/fs/smb/client/xattr.c
+++ b/fs/smb/client/xattr.c
@@ -31,6 +31,8 @@
* secure, replaced by SMB2 (then even more highly secure SMB3) many years ago
*/
#define SMB3_XATTR_CIFS_ACL "system.smb3_acl" /* DACL only */
+#define SMB3_XATTR_CIFS_NTSD_SACL "system.smb3_ntsd_sacl" /* SACL only */
+#define SMB3_XATTR_CIFS_NTSD_OWNER "system.smb3_ntsd_owner" /* owner only */
#define SMB3_XATTR_CIFS_NTSD "system.smb3_ntsd" /* owner plus DACL */
#define SMB3_XATTR_CIFS_NTSD_FULL "system.smb3_ntsd_full" /* owner/DACL/SACL */
#define SMB3_XATTR_ATTRIB "smb3.dosattrib" /* full name: user.smb3.dosattrib */
@@ -38,6 +40,7 @@
/* BB need to add server (Samba e.g) support for security and trusted prefix */
enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT,
+ XATTR_CIFS_NTSD_SACL, XATTR_CIFS_NTSD_OWNER,
XATTR_CIFS_NTSD, XATTR_CIFS_NTSD_FULL };
static int cifs_attrib_set(unsigned int xid, struct cifs_tcon *pTcon,
@@ -160,6 +163,8 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
break;
case XATTR_CIFS_ACL:
+ case XATTR_CIFS_NTSD_SACL:
+ case XATTR_CIFS_NTSD_OWNER:
case XATTR_CIFS_NTSD:
case XATTR_CIFS_NTSD_FULL: {
struct smb_ntsd *pacl;
@@ -187,6 +192,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
CIFS_ACL_GROUP |
CIFS_ACL_DACL);
break;
+ case XATTR_CIFS_NTSD_OWNER:
+ aclflags = (CIFS_ACL_OWNER |
+ CIFS_ACL_GROUP);
+ break;
+ case XATTR_CIFS_NTSD_SACL:
+ aclflags = CIFS_ACL_SACL;
+ break;
case XATTR_CIFS_ACL:
default:
aclflags = CIFS_ACL_DACL;
@@ -308,6 +320,8 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
break;
case XATTR_CIFS_ACL:
+ case XATTR_CIFS_NTSD_SACL:
+ case XATTR_CIFS_NTSD_OWNER:
case XATTR_CIFS_NTSD:
case XATTR_CIFS_NTSD_FULL: {
/*
@@ -320,10 +334,23 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
if (pTcon->ses->server->ops->get_acl == NULL)
goto out; /* rc already EOPNOTSUPP */
- if (handler->flags == XATTR_CIFS_NTSD_FULL) {
+ switch (handler->flags) {
+ case XATTR_CIFS_NTSD_FULL:
+ extra_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO | SACL_SECINFO;
+ break;
+ case XATTR_CIFS_NTSD:
+ extra_info = OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO;
+ break;
+ case XATTR_CIFS_NTSD_OWNER:
+ extra_info = OWNER_SECINFO | GROUP_SECINFO;
+ break;
+ case XATTR_CIFS_NTSD_SACL:
extra_info = SACL_SECINFO;
- } else {
- extra_info = 0;
+ break;
+ case XATTR_CIFS_ACL:
+ default:
+ extra_info = DACL_SECINFO;
+ break;
}
pacl = pTcon->ses->server->ops->get_acl(cifs_sb,
inode, full_path, &acllen, extra_info);
@@ -441,6 +468,20 @@ static const struct xattr_handler smb3_acl_xattr_handler = {
.set = cifs_xattr_set,
};
+static const struct xattr_handler smb3_ntsd_sacl_xattr_handler = {
+ .name = SMB3_XATTR_CIFS_NTSD_SACL,
+ .flags = XATTR_CIFS_NTSD_SACL,
+ .get = cifs_xattr_get,
+ .set = cifs_xattr_set,
+};
+
+static const struct xattr_handler smb3_ntsd_owner_xattr_handler = {
+ .name = SMB3_XATTR_CIFS_NTSD_OWNER,
+ .flags = XATTR_CIFS_NTSD_OWNER,
+ .get = cifs_xattr_get,
+ .set = cifs_xattr_set,
+};
+
static const struct xattr_handler cifs_cifs_ntsd_xattr_handler = {
.name = CIFS_XATTR_CIFS_NTSD,
.flags = XATTR_CIFS_NTSD,
@@ -486,6 +527,8 @@ const struct xattr_handler * const cifs_xattr_handlers[] = {
&cifs_os2_xattr_handler,
&cifs_cifs_acl_xattr_handler,
&smb3_acl_xattr_handler, /* alias for above since avoiding "cifs" */
+ &smb3_ntsd_sacl_xattr_handler,
+ &smb3_ntsd_owner_xattr_handler,
&cifs_cifs_ntsd_xattr_handler,
&smb3_ntsd_xattr_handler, /* alias for above since avoiding "cifs" */
&cifs_cifs_ntsd_full_xattr_handler,
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index c7a0efda4403..f79a5165a7cc 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -95,6 +95,9 @@
*/
#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
+/* According to MS-SMB2 specification The minimum recommended value is 65536.*/
+#define CIFS_MIN_DEFAULT_IOSIZE (65536)
+
/*
* SMB2 Header Definition
*
@@ -1564,13 +1567,13 @@ struct reparse_nfs_data_buffer {
__u8 DataBuffer[];
} __packed;
-/* For IO_REPARSE_TAG_LX_SYMLINK */
+/* For IO_REPARSE_TAG_LX_SYMLINK - see MS-FSCC 2.1.2.7 */
struct reparse_wsl_symlink_data_buffer {
__le32 ReparseTag;
__le16 ReparseDataLength;
__u16 Reserved;
- __le32 Flags;
- __u8 PathBuffer[]; /* Variable Length UTF-8 string without nul-term */
+ __le32 Version; /* Always 2 */
+ __u8 Target[]; /* Variable Length UTF-8 string without nul-term */
} __packed;
struct validate_negotiate_info_req {
diff --git a/fs/smb/server/Kconfig b/fs/smb/server/Kconfig
index cabe6a843c6a..cf70e96ad4de 100644
--- a/fs/smb/server/Kconfig
+++ b/fs/smb/server/Kconfig
@@ -70,4 +70,4 @@ config SMB_SERVER_CHECK_CAP_NET_ADMIN
config SMB_SERVER_KERBEROS5
bool "Support for Kerberos 5"
depends on SMB_SERVER
- default n
+ default y
diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
index 2a5b4a96bf99..83caa3849749 100644
--- a/fs/smb/server/auth.c
+++ b/fs/smb/server/auth.c
@@ -1016,9 +1016,9 @@ static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id,
ses_enc_key = enc ? sess->smb3encryptionkey :
sess->smb3decryptionkey;
- if (enc)
- ksmbd_user_session_get(sess);
memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE);
+ if (!enc)
+ ksmbd_user_session_put(sess);
return 0;
}
@@ -1218,7 +1218,7 @@ free_iv:
free_sg:
kfree(sg);
free_req:
- kfree(req);
+ aead_request_free(req);
free_ctx:
ksmbd_release_crypto_ctx(ctx);
return rc;
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index 91c2318639e7..14620e147dda 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -27,6 +27,7 @@ enum {
KSMBD_SESS_EXITING,
KSMBD_SESS_NEED_RECONNECT,
KSMBD_SESS_NEED_NEGOTIATE,
+ KSMBD_SESS_NEED_SETUP,
KSMBD_SESS_RELEASING
};
@@ -187,6 +188,11 @@ static inline bool ksmbd_conn_need_negotiate(struct ksmbd_conn *conn)
return READ_ONCE(conn->status) == KSMBD_SESS_NEED_NEGOTIATE;
}
+static inline bool ksmbd_conn_need_setup(struct ksmbd_conn *conn)
+{
+ return READ_ONCE(conn->status) == KSMBD_SESS_NEED_SETUP;
+}
+
static inline bool ksmbd_conn_need_reconnect(struct ksmbd_conn *conn)
{
return READ_ONCE(conn->status) == KSMBD_SESS_NEED_RECONNECT;
@@ -217,6 +223,11 @@ static inline void ksmbd_conn_set_need_negotiate(struct ksmbd_conn *conn)
WRITE_ONCE(conn->status, KSMBD_SESS_NEED_NEGOTIATE);
}
+static inline void ksmbd_conn_set_need_setup(struct ksmbd_conn *conn)
+{
+ WRITE_ONCE(conn->status, KSMBD_SESS_NEED_SETUP);
+}
+
static inline void ksmbd_conn_set_need_reconnect(struct ksmbd_conn *conn)
{
WRITE_ONCE(conn->status, KSMBD_SESS_NEED_RECONNECT);
diff --git a/fs/smb/server/mgmt/user_session.c b/fs/smb/server/mgmt/user_session.c
index 71c6939dfbf1..3f45f28f6f0f 100644
--- a/fs/smb/server/mgmt/user_session.c
+++ b/fs/smb/server/mgmt/user_session.c
@@ -181,7 +181,7 @@ static void ksmbd_expire_session(struct ksmbd_conn *conn)
down_write(&sessions_table_lock);
down_write(&conn->session_lock);
xa_for_each(&conn->sessions, id, sess) {
- if (atomic_read(&sess->refcnt) == 0 &&
+ if (atomic_read(&sess->refcnt) <= 1 &&
(sess->state != SMB2_SESSION_VALID ||
time_after(jiffies,
sess->last_active + SMB2_SESSION_TIMEOUT))) {
@@ -230,7 +230,11 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
if (!ksmbd_chann_del(conn, sess) &&
xa_empty(&sess->ksmbd_chann_list)) {
hash_del(&sess->hlist);
- ksmbd_session_destroy(sess);
+ down_write(&conn->session_lock);
+ xa_erase(&conn->sessions, sess->id);
+ up_write(&conn->session_lock);
+ if (atomic_dec_and_test(&sess->refcnt))
+ ksmbd_session_destroy(sess);
}
}
}
@@ -249,13 +253,30 @@ void ksmbd_sessions_deregister(struct ksmbd_conn *conn)
if (xa_empty(&sess->ksmbd_chann_list)) {
xa_erase(&conn->sessions, sess->id);
hash_del(&sess->hlist);
- ksmbd_session_destroy(sess);
+ if (atomic_dec_and_test(&sess->refcnt))
+ ksmbd_session_destroy(sess);
}
}
up_write(&conn->session_lock);
up_write(&sessions_table_lock);
}
+bool is_ksmbd_session_in_connection(struct ksmbd_conn *conn,
+ unsigned long long id)
+{
+ struct ksmbd_session *sess;
+
+ down_read(&conn->session_lock);
+ sess = xa_load(&conn->sessions, id);
+ if (sess) {
+ up_read(&conn->session_lock);
+ return true;
+ }
+ up_read(&conn->session_lock);
+
+ return false;
+}
+
struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
unsigned long long id)
{
@@ -309,8 +330,8 @@ void ksmbd_user_session_put(struct ksmbd_session *sess)
if (atomic_read(&sess->refcnt) <= 0)
WARN_ON(1);
- else
- atomic_dec(&sess->refcnt);
+ else if (atomic_dec_and_test(&sess->refcnt))
+ ksmbd_session_destroy(sess);
}
struct preauth_session *ksmbd_preauth_session_alloc(struct ksmbd_conn *conn,
@@ -353,13 +374,13 @@ void destroy_previous_session(struct ksmbd_conn *conn,
ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_RECONNECT);
err = ksmbd_conn_wait_idle_sess_id(conn, id);
if (err) {
- ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
+ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP);
goto out;
}
ksmbd_destroy_file_table(&prev_sess->file_table);
prev_sess->state = SMB2_SESSION_EXPIRED;
- ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_NEGOTIATE);
+ ksmbd_all_conn_set_status(id, KSMBD_SESS_NEED_SETUP);
ksmbd_launch_ksmbd_durable_scavenger();
out:
up_write(&conn->session_lock);
@@ -417,7 +438,7 @@ static struct ksmbd_session *__session_create(int protocol)
xa_init(&sess->rpc_handle_list);
sess->sequence_number = 1;
rwlock_init(&sess->tree_conns_lock);
- atomic_set(&sess->refcnt, 1);
+ atomic_set(&sess->refcnt, 2);
ret = __init_smb2_session(sess);
if (ret)
diff --git a/fs/smb/server/mgmt/user_session.h b/fs/smb/server/mgmt/user_session.h
index c1c4b20bd5c6..f21348381d59 100644
--- a/fs/smb/server/mgmt/user_session.h
+++ b/fs/smb/server/mgmt/user_session.h
@@ -87,6 +87,8 @@ void ksmbd_session_destroy(struct ksmbd_session *sess);
struct ksmbd_session *ksmbd_session_lookup_slowpath(unsigned long long id);
struct ksmbd_session *ksmbd_session_lookup(struct ksmbd_conn *conn,
unsigned long long id);
+bool is_ksmbd_session_in_connection(struct ksmbd_conn *conn,
+ unsigned long long id);
int ksmbd_session_register(struct ksmbd_conn *conn,
struct ksmbd_session *sess);
void ksmbd_sessions_deregister(struct ksmbd_conn *conn);
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index 28886ff1ee57..f103b1bd0400 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -724,8 +724,8 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo)
work->conn = conn;
work->sess = opinfo->sess;
+ ksmbd_conn_r_count_inc(conn);
if (opinfo->op_state == OPLOCK_ACK_WAIT) {
- ksmbd_conn_r_count_inc(conn);
INIT_WORK(&work->work, __smb2_oplock_break_noti);
ksmbd_queue_work(work);
@@ -833,8 +833,8 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
work->conn = conn;
work->sess = opinfo->sess;
+ ksmbd_conn_r_count_inc(conn);
if (opinfo->op_state == OPLOCK_ACK_WAIT) {
- ksmbd_conn_r_count_inc(conn);
INIT_WORK(&work->work, __smb2_lease_break_noti);
ksmbd_queue_work(work);
wait_for_break_ack(opinfo);
@@ -1505,6 +1505,10 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) {
struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
+ if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
+ sizeof(struct create_lease_v2) - 4)
+ return NULL;
+
memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lreq->req_state = lc->lcontext.LeaseState;
lreq->flags = lc->lcontext.LeaseFlags;
@@ -1517,6 +1521,10 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
} else {
struct create_lease *lc = (struct create_lease *)cc;
+ if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
+ sizeof(struct create_lease))
+ return NULL;
+
memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lreq->req_state = lc->lcontext.LeaseState;
lreq->flags = lc->lcontext.LeaseFlags;
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index c53121538990..d24d95d15d87 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1249,7 +1249,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
}
conn->srv_sec_mode = le16_to_cpu(rsp->SecurityMode);
- ksmbd_conn_set_need_negotiate(conn);
+ ksmbd_conn_set_need_setup(conn);
err_out:
ksmbd_conn_unlock(conn);
@@ -1271,6 +1271,9 @@ static int alloc_preauth_hash(struct ksmbd_session *sess,
if (sess->Preauth_HashValue)
return 0;
+ if (!conn->preauth_info)
+ return -ENOMEM;
+
sess->Preauth_HashValue = kmemdup(conn->preauth_info->Preauth_HashValue,
PREAUTH_HASHVALUE_SIZE, KSMBD_DEFAULT_GFP);
if (!sess->Preauth_HashValue)
@@ -1674,6 +1677,11 @@ int smb2_sess_setup(struct ksmbd_work *work)
ksmbd_debug(SMB, "Received smb2 session setup request\n");
+ if (!ksmbd_conn_need_setup(conn) && !ksmbd_conn_good(conn)) {
+ work->send_no_response = 1;
+ return rc;
+ }
+
WORK_BUFFERS(work, req, rsp);
rsp->StructureSize = cpu_to_le16(9);
@@ -1707,44 +1715,38 @@ int smb2_sess_setup(struct ksmbd_work *work)
if (conn->dialect != sess->dialect) {
rc = -EINVAL;
- ksmbd_user_session_put(sess);
goto out_err;
}
if (!(req->hdr.Flags & SMB2_FLAGS_SIGNED)) {
rc = -EINVAL;
- ksmbd_user_session_put(sess);
goto out_err;
}
if (strncmp(conn->ClientGUID, sess->ClientGUID,
SMB2_CLIENT_GUID_SIZE)) {
rc = -ENOENT;
- ksmbd_user_session_put(sess);
goto out_err;
}
if (sess->state == SMB2_SESSION_IN_PROGRESS) {
rc = -EACCES;
- ksmbd_user_session_put(sess);
goto out_err;
}
if (sess->state == SMB2_SESSION_EXPIRED) {
rc = -EFAULT;
- ksmbd_user_session_put(sess);
goto out_err;
}
- ksmbd_user_session_put(sess);
if (ksmbd_conn_need_reconnect(conn)) {
rc = -EFAULT;
+ ksmbd_user_session_put(sess);
sess = NULL;
goto out_err;
}
- sess = ksmbd_session_lookup(conn, sess_id);
- if (!sess) {
+ if (is_ksmbd_session_in_connection(conn, sess_id)) {
rc = -EACCES;
goto out_err;
}
@@ -1910,10 +1912,12 @@ out_err:
sess->last_active = jiffies;
sess->state = SMB2_SESSION_EXPIRED;
+ ksmbd_user_session_put(sess);
+ work->sess = NULL;
if (try_delay) {
ksmbd_conn_set_need_reconnect(conn);
ssleep(5);
- ksmbd_conn_set_need_negotiate(conn);
+ ksmbd_conn_set_need_setup(conn);
}
}
smb2_set_err_rsp(work);
@@ -2239,14 +2243,15 @@ int smb2_session_logoff(struct ksmbd_work *work)
return -ENOENT;
}
- ksmbd_destroy_file_table(&sess->file_table);
down_write(&conn->session_lock);
sess->state = SMB2_SESSION_EXPIRED;
up_write(&conn->session_lock);
- ksmbd_free_user(sess->user);
- sess->user = NULL;
- ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_NEGOTIATE);
+ if (sess->user) {
+ ksmbd_free_user(sess->user);
+ sess->user = NULL;
+ }
+ ksmbd_all_conn_set_status(sess_id, KSMBD_SESS_NEED_SETUP);
rsp->StructureSize = cpu_to_le16(4);
err = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_logoff_rsp));
@@ -2708,6 +2713,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
goto out;
}
+ if (le16_to_cpu(context->DataOffset) +
+ le32_to_cpu(context->DataLength) <
+ sizeof(struct create_durable_reconn_v2_req)) {
+ err = -EINVAL;
+ goto out;
+ }
+
recon_v2 = (struct create_durable_reconn_v2_req *)context;
persistent_id = recon_v2->Fid.PersistentFileId;
dh_info->fp = ksmbd_lookup_durable_fd(persistent_id);
@@ -2741,6 +2753,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
goto out;
}
+ if (le16_to_cpu(context->DataOffset) +
+ le32_to_cpu(context->DataLength) <
+ sizeof(struct create_durable_reconn_req)) {
+ err = -EINVAL;
+ goto out;
+ }
+
recon = (struct create_durable_reconn_req *)context;
persistent_id = recon->Data.Fid.PersistentFileId;
dh_info->fp = ksmbd_lookup_durable_fd(persistent_id);
@@ -2766,6 +2785,13 @@ static int parse_durable_handle_context(struct ksmbd_work *work,
goto out;
}
+ if (le16_to_cpu(context->DataOffset) +
+ le32_to_cpu(context->DataLength) <
+ sizeof(struct create_durable_req_v2)) {
+ err = -EINVAL;
+ goto out;
+ }
+
durable_v2_blob =
(struct create_durable_req_v2 *)context;
ksmbd_debug(SMB, "Request for durable v2 open\n");
diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
index a3d8a905b07e..d742ba754348 100644
--- a/fs/smb/server/smb_common.h
+++ b/fs/smb/server/smb_common.h
@@ -72,6 +72,8 @@
#define FILE_SUPPORTS_ENCRYPTION 0x00020000
#define FILE_SUPPORTS_OBJECT_IDS 0x00010000
#define FILE_VOLUME_IS_COMPRESSED 0x00008000
+#define FILE_SUPPORTS_POSIX_UNLINK_RENAME 0x00000400
+#define FILE_RETURNS_CLEANUP_RESULT_INFO 0x00000200
#define FILE_SUPPORTS_REMOTE_STORAGE 0x00000100
#define FILE_SUPPORTS_REPARSE_POINTS 0x00000080
#define FILE_SUPPORTS_SPARSE_FILES 0x00000040
diff --git a/fs/smb/server/smbacl.c b/fs/smb/server/smbacl.c
index 49b128698670..5aa7a66334d9 100644
--- a/fs/smb/server/smbacl.c
+++ b/fs/smb/server/smbacl.c
@@ -270,6 +270,11 @@ static int sid_to_id(struct mnt_idmap *idmap,
return -EIO;
}
+ if (psid->num_subauth == 0) {
+ pr_err("%s: zero subauthorities!\n", __func__);
+ return -EIO;
+ }
+
if (sidtype == SIDOWNER) {
kuid_t uid;
uid_t id;
@@ -1026,7 +1031,9 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
struct dentry *parent = path->dentry->d_parent;
struct mnt_idmap *idmap = mnt_idmap(path->mnt);
int inherited_flags = 0, flags = 0, i, nt_size = 0, pdacl_size;
- int rc = 0, dacloffset, pntsd_type, pntsd_size, acl_len, aces_size;
+ int rc = 0, pntsd_type, pntsd_size, acl_len, aces_size;
+ unsigned int dacloffset;
+ size_t dacl_struct_end;
u16 num_aces, ace_cnt = 0;
char *aces_base;
bool is_dir = S_ISDIR(d_inode(path->dentry)->i_mode);
@@ -1035,8 +1042,11 @@ int smb_inherit_dacl(struct ksmbd_conn *conn,
parent, &parent_pntsd);
if (pntsd_size <= 0)
return -ENOENT;
+
dacloffset = le32_to_cpu(parent_pntsd->dacloffset);
- if (!dacloffset || (dacloffset + sizeof(struct smb_acl) > pntsd_size)) {
+ if (!dacloffset ||
+ check_add_overflow(dacloffset, sizeof(struct smb_acl), &dacl_struct_end) ||
+ dacl_struct_end > (size_t)pntsd_size) {
rc = -EINVAL;
goto free_parent_pntsd;
}
@@ -1240,7 +1250,9 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
struct smb_ntsd *pntsd = NULL;
struct smb_acl *pdacl;
struct posix_acl *posix_acls;
- int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size, dacl_offset;
+ int rc = 0, pntsd_size, acl_size, aces_size, pdacl_size;
+ unsigned int dacl_offset;
+ size_t dacl_struct_end;
struct smb_sid sid;
int granted = le32_to_cpu(*pdaccess & ~FILE_MAXIMAL_ACCESS_LE);
struct smb_ace *ace;
@@ -1259,7 +1271,8 @@ int smb_check_perm_dacl(struct ksmbd_conn *conn, const struct path *path,
dacl_offset = le32_to_cpu(pntsd->dacloffset);
if (!dacl_offset ||
- (dacl_offset + sizeof(struct smb_acl) > pntsd_size))
+ check_add_overflow(dacl_offset, sizeof(struct smb_acl), &dacl_struct_end) ||
+ dacl_struct_end > (size_t)pntsd_size)
goto err_out;
pdacl = (struct smb_acl *)((char *)pntsd + le32_to_cpu(pntsd->dacloffset));
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index c3785a5434f9..4998df04ab95 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -14,6 +14,7 @@
#include <linux/mempool.h>
#include <linux/highmem.h>
#include <linux/scatterlist.h>
+#include <linux/string_choices.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
#include <rdma/rw.h>
@@ -1396,7 +1397,7 @@ static int smb_direct_rdma_xmit(struct smb_direct_transport *t,
}
ksmbd_debug(RDMA, "RDMA %s, len %#x, needed credits %#x\n",
- is_read ? "read" : "write", buf_len, credits_needed);
+ str_read_write(is_read), buf_len, credits_needed);
ret = wait_for_rw_credits(t, credits_needed);
if (ret < 0)
@@ -2241,38 +2242,16 @@ bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
struct net_device *ndev;
- if (smb_dev->ib_dev->ops.get_netdev) {
- ndev = smb_dev->ib_dev->ops.get_netdev(
- smb_dev->ib_dev, i + 1);
- if (!ndev)
- continue;
+ ndev = ib_device_get_netdev(smb_dev->ib_dev, i + 1);
+ if (!ndev)
+ continue;
- if (ndev == netdev) {
- dev_put(ndev);
- rdma_capable = true;
- goto out;
- }
+ if (ndev == netdev) {
dev_put(ndev);
- /* if ib_dev does not implement ops.get_netdev
- * check for matching infiniband GUID in hw_addr
- */
- } else if (netdev->type == ARPHRD_INFINIBAND) {
- struct netdev_hw_addr *ha;
- union ib_gid gid;
- u32 port_num;
- int ret;
-
- netdev_hw_addr_list_for_each(
- ha, &netdev->dev_addrs) {
- memcpy(&gid, ha->addr + 4, sizeof(gid));
- ret = ib_find_gid(smb_dev->ib_dev, &gid,
- &port_num, NULL);
- if (!ret) {
- rdma_capable = true;
- goto out;
- }
- }
+ rdma_capable = true;
+ goto out;
}
+ dev_put(ndev);
}
}
out:
@@ -2289,7 +2268,7 @@ out:
}
ksmbd_debug(RDMA, "netdev(%s) rdma capable : %s\n",
- netdev->name, rdma_capable ? "true" : "false");
+ netdev->name, str_true_false(rdma_capable));
return rdma_capable;
}
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 4df2afa551dc..94e12efd92f2 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -123,7 +123,7 @@ int sysfs_move_dir_ns(struct kobject *kobj, struct kobject *new_parent_kobj,
new_parent = new_parent_kobj && new_parent_kobj->sd ?
new_parent_kobj->sd : sysfs_root_kn;
- return kernfs_rename_ns(kn, new_parent, kn->name, new_ns);
+ return kernfs_rename_ns(kn, new_parent, NULL, new_ns);
}
/**
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index 6931308876c4..c3d3b079aedd 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -19,13 +19,19 @@
#include "sysfs.h"
+static struct kobject *sysfs_file_kobj(struct kernfs_node *kn)
+{
+ guard(rcu)();
+ return rcu_dereference(kn->__parent)->priv;
+}
+
/*
* Determine ktype->sysfs_ops for the given kernfs_node. This function
* must be called while holding an active reference.
*/
static const struct sysfs_ops *sysfs_file_ops(struct kernfs_node *kn)
{
- struct kobject *kobj = kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(kn);
if (kn->flags & KERNFS_LOCKDEP)
lockdep_assert_held(kn);
@@ -40,7 +46,7 @@ static const struct sysfs_ops *sysfs_file_ops(struct kernfs_node *kn)
static int sysfs_kf_seq_show(struct seq_file *sf, void *v)
{
struct kernfs_open_file *of = sf->private;
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
ssize_t count;
char *buf;
@@ -78,7 +84,7 @@ static ssize_t sysfs_kf_bin_read(struct kernfs_open_file *of, char *buf,
size_t count, loff_t pos)
{
struct bin_attribute *battr = of->kn->priv;
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
loff_t size = file_inode(of->file)->i_size;
if (!count)
@@ -105,7 +111,7 @@ static ssize_t sysfs_kf_read(struct kernfs_open_file *of, char *buf,
size_t count, loff_t pos)
{
const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
ssize_t len;
/*
@@ -131,7 +137,7 @@ static ssize_t sysfs_kf_write(struct kernfs_open_file *of, char *buf,
size_t count, loff_t pos)
{
const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
if (!count)
return 0;
@@ -144,7 +150,7 @@ static ssize_t sysfs_kf_bin_write(struct kernfs_open_file *of, char *buf,
size_t count, loff_t pos)
{
struct bin_attribute *battr = of->kn->priv;
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
loff_t size = file_inode(of->file)->i_size;
if (size) {
@@ -168,7 +174,7 @@ static int sysfs_kf_bin_mmap(struct kernfs_open_file *of,
struct vm_area_struct *vma)
{
struct bin_attribute *battr = of->kn->priv;
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
return battr->mmap(of->file, kobj, battr, vma);
}
@@ -177,7 +183,7 @@ static loff_t sysfs_kf_bin_llseek(struct kernfs_open_file *of, loff_t offset,
int whence)
{
struct bin_attribute *battr = of->kn->priv;
- struct kobject *kobj = of->kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(of->kn);
if (battr->llseek)
return battr->llseek(of->file, kobj, battr, offset, whence);
@@ -494,7 +500,7 @@ EXPORT_SYMBOL_GPL(sysfs_break_active_protection);
*/
void sysfs_unbreak_active_protection(struct kernfs_node *kn)
{
- struct kobject *kobj = kn->parent->priv;
+ struct kobject *kobj = sysfs_file_kobj(kn);
kernfs_unbreak_active_protection(kn);
kernfs_put(kn);
diff --git a/fs/ubifs/compress.c b/fs/ubifs/compress.c
index 0b48cbab8a3d..ea6f06adcd43 100644
--- a/fs/ubifs/compress.c
+++ b/fs/ubifs/compress.c
@@ -15,7 +15,8 @@
* decompression.
*/
-#include <linux/crypto.h>
+#include <crypto/acompress.h>
+#include <linux/highmem.h>
#include "ubifs.h"
/* Fake description object for the "none" compressor */
@@ -26,11 +27,8 @@ static struct ubifs_compressor none_compr = {
};
#ifdef CONFIG_UBIFS_FS_LZO
-static DEFINE_MUTEX(lzo_mutex);
-
static struct ubifs_compressor lzo_compr = {
.compr_type = UBIFS_COMPR_LZO,
- .comp_mutex = &lzo_mutex,
.name = "lzo",
.capi_name = "lzo",
};
@@ -42,13 +40,8 @@ static struct ubifs_compressor lzo_compr = {
#endif
#ifdef CONFIG_UBIFS_FS_ZLIB
-static DEFINE_MUTEX(deflate_mutex);
-static DEFINE_MUTEX(inflate_mutex);
-
static struct ubifs_compressor zlib_compr = {
.compr_type = UBIFS_COMPR_ZLIB,
- .comp_mutex = &deflate_mutex,
- .decomp_mutex = &inflate_mutex,
.name = "zlib",
.capi_name = "deflate",
};
@@ -60,13 +53,8 @@ static struct ubifs_compressor zlib_compr = {
#endif
#ifdef CONFIG_UBIFS_FS_ZSTD
-static DEFINE_MUTEX(zstd_enc_mutex);
-static DEFINE_MUTEX(zstd_dec_mutex);
-
static struct ubifs_compressor zstd_compr = {
.compr_type = UBIFS_COMPR_ZSTD,
- .comp_mutex = &zstd_enc_mutex,
- .decomp_mutex = &zstd_dec_mutex,
.name = "zstd",
.capi_name = "zstd",
};
@@ -80,6 +68,30 @@ static struct ubifs_compressor zstd_compr = {
/* All UBIFS compressors */
struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
+static int ubifs_compress_req(const struct ubifs_info *c,
+ struct acomp_req *req,
+ void *out_buf, int *out_len,
+ const char *compr_name)
+{
+ struct crypto_wait wait;
+ int in_len = req->slen;
+ int dlen = *out_len;
+ int err;
+
+ dlen = min(dlen, in_len - UBIFS_MIN_COMPRESS_DIFF);
+
+ crypto_init_wait(&wait);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ acomp_request_set_dst_dma(req, out_buf, dlen);
+ err = crypto_acomp_compress(req);
+ err = crypto_wait_req(err, &wait);
+ *out_len = req->dlen;
+ acomp_request_free(req);
+
+ return err;
+}
+
/**
* ubifs_compress - compress data.
* @c: UBIFS file-system description object
@@ -112,23 +124,14 @@ void ubifs_compress(const struct ubifs_info *c, const void *in_buf,
if (in_len < UBIFS_MIN_COMPR_LEN)
goto no_compr;
- if (compr->comp_mutex)
- mutex_lock(compr->comp_mutex);
- err = crypto_comp_compress(compr->cc, in_buf, in_len, out_buf,
- (unsigned int *)out_len);
- if (compr->comp_mutex)
- mutex_unlock(compr->comp_mutex);
- if (unlikely(err)) {
- ubifs_warn(c, "cannot compress %d bytes, compressor %s, error %d, leave data uncompressed",
- in_len, compr->name, err);
- goto no_compr;
+ {
+ ACOMP_REQUEST_ALLOC(req, compr->cc, GFP_NOFS | __GFP_NOWARN);
+
+ acomp_request_set_src_dma(req, in_buf, in_len);
+ err = ubifs_compress_req(c, req, out_buf, out_len, compr->name);
}
- /*
- * If the data compressed only slightly, it is better to leave it
- * uncompressed to improve read speed.
- */
- if (in_len - *out_len < UBIFS_MIN_COMPRESS_DIFF)
+ if (err)
goto no_compr;
return;
@@ -140,6 +143,83 @@ no_compr:
}
/**
+ * ubifs_compress_folio - compress folio.
+ * @c: UBIFS file-system description object
+ * @in_folio: data to compress
+ * @in_offset: offset into @in_folio
+ * @in_len: length of the data to compress
+ * @out_buf: output buffer where compressed data should be stored
+ * @out_len: output buffer length is returned here
+ * @compr_type: type of compression to use on enter, actually used compression
+ * type on exit
+ *
+ * This function compresses input folio @in_folio of length @in_len and
+ * stores the result in the output buffer @out_buf and the resulting length
+ * in @out_len. If the input buffer does not compress, it is just copied
+ * to the @out_buf. The same happens if @compr_type is %UBIFS_COMPR_NONE
+ * or if compression error occurred.
+ *
+ * Note, if the input buffer was not compressed, it is copied to the output
+ * buffer and %UBIFS_COMPR_NONE is returned in @compr_type.
+ */
+void ubifs_compress_folio(const struct ubifs_info *c, struct folio *in_folio,
+ size_t in_offset, int in_len, void *out_buf,
+ int *out_len, int *compr_type)
+{
+ int err;
+ struct ubifs_compressor *compr = ubifs_compressors[*compr_type];
+
+ if (*compr_type == UBIFS_COMPR_NONE)
+ goto no_compr;
+
+ /* If the input data is small, do not even try to compress it */
+ if (in_len < UBIFS_MIN_COMPR_LEN)
+ goto no_compr;
+
+ {
+ ACOMP_REQUEST_ALLOC(req, compr->cc, GFP_NOFS | __GFP_NOWARN);
+
+ acomp_request_set_src_folio(req, in_folio, in_offset, in_len);
+ err = ubifs_compress_req(c, req, out_buf, out_len, compr->name);
+ }
+
+ if (err)
+ goto no_compr;
+
+ return;
+
+no_compr:
+ memcpy_from_folio(out_buf, in_folio, in_offset, in_len);
+ *out_len = in_len;
+ *compr_type = UBIFS_COMPR_NONE;
+}
+
+static int ubifs_decompress_req(const struct ubifs_info *c,
+ struct acomp_req *req,
+ const void *in_buf, int in_len, int *out_len,
+ const char *compr_name)
+{
+ struct crypto_wait wait;
+ int err;
+
+ crypto_init_wait(&wait);
+ acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+ acomp_request_set_src_dma(req, in_buf, in_len);
+ err = crypto_acomp_decompress(req);
+ err = crypto_wait_req(err, &wait);
+ *out_len = req->dlen;
+
+ if (err)
+ ubifs_err(c, "cannot decompress %d bytes, compressor %s, error %d",
+ in_len, compr_name, err);
+
+ acomp_request_free(req);
+
+ return err;
+}
+
+/**
* ubifs_decompress - decompress data.
* @c: UBIFS file-system description object
* @in_buf: data to decompress
@@ -155,7 +235,6 @@ no_compr:
int ubifs_decompress(const struct ubifs_info *c, const void *in_buf,
int in_len, void *out_buf, int *out_len, int compr_type)
{
- int err;
struct ubifs_compressor *compr;
if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) {
@@ -176,17 +255,62 @@ int ubifs_decompress(const struct ubifs_info *c, const void *in_buf,
return 0;
}
- if (compr->decomp_mutex)
- mutex_lock(compr->decomp_mutex);
- err = crypto_comp_decompress(compr->cc, in_buf, in_len, out_buf,
- (unsigned int *)out_len);
- if (compr->decomp_mutex)
- mutex_unlock(compr->decomp_mutex);
- if (err)
- ubifs_err(c, "cannot decompress %d bytes, compressor %s, error %d",
- in_len, compr->name, err);
+ {
+ ACOMP_REQUEST_ALLOC(req, compr->cc, GFP_NOFS | __GFP_NOWARN);
- return err;
+ acomp_request_set_dst_dma(req, out_buf, *out_len);
+ return ubifs_decompress_req(c, req, in_buf, in_len, out_len,
+ compr->name);
+ }
+}
+
+/**
+ * ubifs_decompress_folio - decompress folio.
+ * @c: UBIFS file-system description object
+ * @in_buf: data to decompress
+ * @in_len: length of the data to decompress
+ * @out_folio: output folio where decompressed data should
+ * @out_offset: offset into @out_folio
+ * @out_len: output length is returned here
+ * @compr_type: type of compression
+ *
+ * This function decompresses data from buffer @in_buf into folio
+ * @out_folio. The length of the uncompressed data is returned in
+ * @out_len. This functions returns %0 on success or a negative error
+ * code on failure.
+ */
+int ubifs_decompress_folio(const struct ubifs_info *c, const void *in_buf,
+ int in_len, struct folio *out_folio,
+ size_t out_offset, int *out_len, int compr_type)
+{
+ struct ubifs_compressor *compr;
+
+ if (unlikely(compr_type < 0 || compr_type >= UBIFS_COMPR_TYPES_CNT)) {
+ ubifs_err(c, "invalid compression type %d", compr_type);
+ return -EINVAL;
+ }
+
+ compr = ubifs_compressors[compr_type];
+
+ if (unlikely(!compr->capi_name)) {
+ ubifs_err(c, "%s compression is not compiled in", compr->name);
+ return -EINVAL;
+ }
+
+ if (compr_type == UBIFS_COMPR_NONE) {
+ memcpy_to_folio(out_folio, out_offset, in_buf, in_len);
+ *out_len = in_len;
+ return 0;
+ }
+
+ {
+ ACOMP_REQUEST_ALLOC(req, compr->cc, GFP_NOFS | __GFP_NOWARN);
+
+ acomp_request_set_dst_folio(req, out_folio, out_offset,
+ *out_len);
+ return ubifs_decompress_req(c, req, in_buf, in_len, out_len,
+ compr->name);
+ }
}
/**
@@ -199,7 +323,7 @@ int ubifs_decompress(const struct ubifs_info *c, const void *in_buf,
static int __init compr_init(struct ubifs_compressor *compr)
{
if (compr->capi_name) {
- compr->cc = crypto_alloc_comp(compr->capi_name, 0, 0);
+ compr->cc = crypto_alloc_acomp(compr->capi_name, 0, 0);
if (IS_ERR(compr->cc)) {
pr_err("UBIFS error (pid %d): cannot initialize compressor %s, error %ld",
current->pid, compr->name, PTR_ERR(compr->cc));
@@ -218,7 +342,7 @@ static int __init compr_init(struct ubifs_compressor *compr)
static void compr_exit(struct ubifs_compressor *compr)
{
if (compr->capi_name)
- crypto_free_comp(compr->cc);
+ crypto_free_acomp(compr->cc);
}
/**
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index 5130123005e4..bf311c38d9a8 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -42,8 +42,8 @@
#include <linux/slab.h>
#include <linux/migrate.h>
-static int read_block(struct inode *inode, void *addr, unsigned int block,
- struct ubifs_data_node *dn)
+static int read_block(struct inode *inode, struct folio *folio, size_t offset,
+ unsigned int block, struct ubifs_data_node *dn)
{
struct ubifs_info *c = inode->i_sb->s_fs_info;
int err, len, out_len;
@@ -55,7 +55,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
if (err) {
if (err == -ENOENT)
/* Not found, so it must be a hole */
- memset(addr, 0, UBIFS_BLOCK_SIZE);
+ folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE);
return err;
}
@@ -74,8 +74,8 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
}
out_len = UBIFS_BLOCK_SIZE;
- err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
- le16_to_cpu(dn->compr_type));
+ err = ubifs_decompress_folio(c, &dn->data, dlen, folio, offset,
+ &out_len, le16_to_cpu(dn->compr_type));
if (err || len != out_len)
goto dump;
@@ -85,7 +85,7 @@ static int read_block(struct inode *inode, void *addr, unsigned int block,
* appending data). Ensure that the remainder is zeroed out.
*/
if (len < UBIFS_BLOCK_SIZE)
- memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
+ folio_zero_range(folio, offset + len, UBIFS_BLOCK_SIZE - len);
return 0;
@@ -98,27 +98,25 @@ dump:
static int do_readpage(struct folio *folio)
{
- void *addr;
int err = 0, i;
unsigned int block, beyond;
struct ubifs_data_node *dn = NULL;
struct inode *inode = folio->mapping->host;
struct ubifs_info *c = inode->i_sb->s_fs_info;
loff_t i_size = i_size_read(inode);
+ size_t offset = 0;
dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
inode->i_ino, folio->index, i_size, folio->flags);
ubifs_assert(c, !folio_test_checked(folio));
ubifs_assert(c, !folio->private);
- addr = kmap_local_folio(folio, 0);
-
block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
if (block >= beyond) {
/* Reading beyond inode */
folio_set_checked(folio);
- addr = folio_zero_tail(folio, 0, addr);
+ folio_zero_range(folio, 0, folio_size(folio));
goto out;
}
@@ -135,9 +133,9 @@ static int do_readpage(struct folio *folio)
if (block >= beyond) {
/* Reading beyond inode */
err = -ENOENT;
- memset(addr, 0, UBIFS_BLOCK_SIZE);
+ folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE);
} else {
- ret = read_block(inode, addr, block, dn);
+ ret = read_block(inode, folio, offset, block, dn);
if (ret) {
err = ret;
if (err != -ENOENT)
@@ -147,17 +145,13 @@ static int do_readpage(struct folio *folio)
int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);
if (ilen && ilen < dlen)
- memset(addr + ilen, 0, dlen - ilen);
+ folio_zero_range(folio, offset + ilen, dlen - ilen);
}
}
if (++i >= (UBIFS_BLOCKS_PER_PAGE << folio_order(folio)))
break;
block += 1;
- addr += UBIFS_BLOCK_SIZE;
- if (folio_test_highmem(folio) && (offset_in_page(addr) == 0)) {
- kunmap_local(addr - UBIFS_BLOCK_SIZE);
- addr = kmap_local_folio(folio, i * UBIFS_BLOCK_SIZE);
- }
+ offset += UBIFS_BLOCK_SIZE;
}
if (err) {
@@ -177,8 +171,6 @@ out:
kfree(dn);
if (!err)
folio_mark_uptodate(folio);
- flush_dcache_folio(folio);
- kunmap_local(addr);
return err;
}
@@ -602,18 +594,16 @@ static int populate_page(struct ubifs_info *c, struct folio *folio,
struct inode *inode = folio->mapping->host;
loff_t i_size = i_size_read(inode);
unsigned int page_block;
- void *addr, *zaddr;
+ size_t offset = 0;
pgoff_t end_index;
dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
inode->i_ino, folio->index, i_size, folio->flags);
- addr = zaddr = kmap_local_folio(folio, 0);
-
end_index = (i_size - 1) >> PAGE_SHIFT;
if (!i_size || folio->index > end_index) {
hole = 1;
- addr = folio_zero_tail(folio, 0, addr);
+ folio_zero_range(folio, 0, folio_size(folio));
goto out_hole;
}
@@ -623,7 +613,7 @@ static int populate_page(struct ubifs_info *c, struct folio *folio,
if (nn >= bu->cnt) {
hole = 1;
- memset(addr, 0, UBIFS_BLOCK_SIZE);
+ folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE);
} else if (key_block(c, &bu->zbranch[nn].key) == page_block) {
struct ubifs_data_node *dn;
@@ -645,13 +635,15 @@ static int populate_page(struct ubifs_info *c, struct folio *folio,
goto out_err;
}
- err = ubifs_decompress(c, &dn->data, dlen, addr, &out_len,
- le16_to_cpu(dn->compr_type));
+ err = ubifs_decompress_folio(
+ c, &dn->data, dlen, folio, offset, &out_len,
+ le16_to_cpu(dn->compr_type));
if (err || len != out_len)
goto out_err;
if (len < UBIFS_BLOCK_SIZE)
- memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
+ folio_zero_range(folio, offset + len,
+ UBIFS_BLOCK_SIZE - len);
nn += 1;
read = (i << UBIFS_BLOCK_SHIFT) + len;
@@ -660,23 +652,19 @@ static int populate_page(struct ubifs_info *c, struct folio *folio,
continue;
} else {
hole = 1;
- memset(addr, 0, UBIFS_BLOCK_SIZE);
+ folio_zero_range(folio, offset, UBIFS_BLOCK_SIZE);
}
if (++i >= UBIFS_BLOCKS_PER_PAGE)
break;
- addr += UBIFS_BLOCK_SIZE;
+ offset += UBIFS_BLOCK_SIZE;
page_block += 1;
- if (folio_test_highmem(folio) && (offset_in_page(addr) == 0)) {
- kunmap_local(addr - UBIFS_BLOCK_SIZE);
- addr = kmap_local_folio(folio, i * UBIFS_BLOCK_SIZE);
- }
}
if (end_index == folio->index) {
int len = i_size & (PAGE_SIZE - 1);
if (len && len < read)
- memset(zaddr + len, 0, read - len);
+ folio_zero_range(folio, len, read - len);
}
out_hole:
@@ -686,14 +674,10 @@ out_hole:
}
folio_mark_uptodate(folio);
- flush_dcache_folio(folio);
- kunmap_local(addr);
*n = nn;
return 0;
out_err:
- flush_dcache_folio(folio);
- kunmap_local(addr);
ubifs_err(c, "bad data node (block %u, inode %lu)",
page_block, inode->i_ino);
return -EINVAL;
@@ -898,7 +882,6 @@ static int do_writepage(struct folio *folio, size_t len)
{
int err = 0, blen;
unsigned int block;
- void *addr;
size_t offset = 0;
union ubifs_key key;
struct inode *inode = folio->mapping->host;
@@ -913,26 +896,19 @@ static int do_writepage(struct folio *folio, size_t len)
folio_start_writeback(folio);
- addr = kmap_local_folio(folio, offset);
block = folio->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
for (;;) {
blen = min_t(size_t, len, UBIFS_BLOCK_SIZE);
data_key_init(c, &key, inode->i_ino, block);
- err = ubifs_jnl_write_data(c, inode, &key, addr, blen);
+ err = ubifs_jnl_write_data(c, inode, &key, folio, offset, blen);
if (err)
break;
len -= blen;
if (!len)
break;
block += 1;
- addr += blen;
- if (folio_test_highmem(folio) && !offset_in_page(addr)) {
- kunmap_local(addr - blen);
- offset += PAGE_SIZE;
- addr = kmap_local_folio(folio, offset);
- }
+ offset += blen;
}
- kunmap_local(addr);
if (err) {
mapping_set_error(folio->mapping, err);
ubifs_err(c, "cannot write folio %lu of inode %lu, error %d",
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 36ba79fbd2ff..ee954e64ce7f 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -845,14 +845,16 @@ out_ro:
* @c: UBIFS file-system description object
* @inode: inode the data node belongs to
* @key: node key
- * @buf: buffer to write
+ * @folio: buffer to write
+ * @offset: offset to write at
* @len: data length (must not exceed %UBIFS_BLOCK_SIZE)
*
* This function writes a data node to the journal. Returns %0 if the data node
* was successfully written, and a negative error code in case of failure.
*/
int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
- const union ubifs_key *key, const void *buf, int len)
+ const union ubifs_key *key, struct folio *folio,
+ size_t offset, int len)
{
struct ubifs_data_node *data;
int err, lnum, offs, compr_type, out_len, compr_len, auth_len;
@@ -896,7 +898,8 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
compr_type = ui->compr_type;
out_len = compr_len = dlen - UBIFS_DATA_NODE_SZ;
- ubifs_compress(c, buf, len, &data->data, &compr_len, &compr_type);
+ ubifs_compress_folio(c, folio, offset, len, &data->data, &compr_len,
+ &compr_type);
ubifs_assert(c, compr_len <= UBIFS_BLOCK_SIZE);
if (encrypted) {
@@ -1625,7 +1628,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
int err, dlen, compr_type, out_len, data_size;
out_len = le32_to_cpu(dn->size);
- buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
+ buf = kmalloc(out_len, GFP_NOFS);
if (!buf)
return -ENOMEM;
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index 3375bbe0508c..256dbaeeb0de 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -124,13 +124,6 @@
#define OLD_ZNODE_AGE 20
#define YOUNG_ZNODE_AGE 5
-/*
- * Some compressors, like LZO, may end up with more data then the input buffer.
- * So UBIFS always allocates larger output buffer, to be sure the compressor
- * will not corrupt memory in case of worst case compression.
- */
-#define WORST_COMPR_FACTOR 2
-
#ifdef CONFIG_FS_ENCRYPTION
#define UBIFS_CIPHER_BLOCK_SIZE FSCRYPT_CONTENTS_ALIGNMENT
#else
@@ -141,7 +134,7 @@
* How much memory is needed for a buffer where we compress a data node.
*/
#define COMPRESSED_DATA_NODE_BUF_SZ \
- (UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR)
+ (UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE)
/* Maximum expected tree height for use by bottom_up_buf */
#define BOTTOM_UP_HEIGHT 64
@@ -270,6 +263,8 @@ enum {
ASSACT_PANIC,
};
+struct folio;
+
/**
* struct ubifs_old_idx - index node obsoleted since last commit start.
* @rb: rb-tree node
@@ -835,16 +830,12 @@ struct ubifs_node_range {
* struct ubifs_compressor - UBIFS compressor description structure.
* @compr_type: compressor type (%UBIFS_COMPR_LZO, etc)
* @cc: cryptoapi compressor handle
- * @comp_mutex: mutex used during compression
- * @decomp_mutex: mutex used during decompression
* @name: compressor name
* @capi_name: cryptoapi compressor name
*/
struct ubifs_compressor {
int compr_type;
- struct crypto_comp *cc;
- struct mutex *comp_mutex;
- struct mutex *decomp_mutex;
+ struct crypto_acomp *cc;
const char *name;
const char *capi_name;
};
@@ -1795,7 +1786,8 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
const struct fscrypt_name *nm, const struct inode *inode,
int deletion, int xent, int in_orphan);
int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
- const union ubifs_key *key, const void *buf, int len);
+ const union ubifs_key *key, struct folio *folio,
+ size_t offset, int len);
int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode);
int ubifs_jnl_delete_inode(struct ubifs_info *c, const struct inode *inode);
int ubifs_jnl_xrename(struct ubifs_info *c, const struct inode *fst_dir,
@@ -2095,8 +2087,14 @@ int __init ubifs_compressors_init(void);
void ubifs_compressors_exit(void);
void ubifs_compress(const struct ubifs_info *c, const void *in_buf, int in_len,
void *out_buf, int *out_len, int *compr_type);
+void ubifs_compress_folio(const struct ubifs_info *c, struct folio *folio,
+ size_t offset, int in_len, void *out_buf,
+ int *out_len, int *compr_type);
int ubifs_decompress(const struct ubifs_info *c, const void *buf, int len,
void *out, int *out_len, int compr_type);
+int ubifs_decompress_folio(const struct ubifs_info *c, const void *buf,
+ int len, struct folio *folio, size_t offset,
+ int *out_len, int compr_type);
/* sysfs.c */
int ubifs_sysfs_init(void);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index 412fe7c4d348..0d76c4f37b3e 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -69,7 +69,7 @@ static vm_fault_t udf_page_mkwrite(struct vm_fault *vmf)
goto out_unlock;
}
- block_commit_write(&folio->page, 0, end);
+ block_commit_write(folio, 0, end);
out_dirty:
folio_mark_dirty(folio);
folio_wait_stable(folio);
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 70c907fe8af9..4386dd845e40 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -810,6 +810,7 @@ static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
}
map->oflags = UDF_BLK_MAPPED;
map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
+ ret = 0;
goto out_free;
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 97c4d71115d8..d80f94346199 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -396,32 +396,6 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
goto out;
/*
- * If it's already released don't get it. This avoids to loop
- * in __get_user_pages if userfaultfd_release waits on the
- * caller of handle_userfault to release the mmap_lock.
- */
- if (unlikely(READ_ONCE(ctx->released))) {
- /*
- * Don't return VM_FAULT_SIGBUS in this case, so a non
- * cooperative manager can close the uffd after the
- * last UFFDIO_COPY, without risking to trigger an
- * involuntary SIGBUS if the process was starting the
- * userfaultfd while the userfaultfd was still armed
- * (but after the last UFFDIO_COPY). If the uffd
- * wasn't already closed when the userfault reached
- * this point, that would normally be solved by
- * userfaultfd_must_wait returning 'false'.
- *
- * If we were to return VM_FAULT_SIGBUS here, the non
- * cooperative manager would be instead forced to
- * always call UFFDIO_UNREGISTER before it can safely
- * close the uffd.
- */
- ret = VM_FAULT_NOPAGE;
- goto out;
- }
-
- /*
* Check that we can return VM_FAULT_RETRY.
*
* NOTE: it should become possible to return VM_FAULT_RETRY
@@ -457,6 +431,31 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason)
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out;
+ if (unlikely(READ_ONCE(ctx->released))) {
+ /*
+ * If a concurrent release is detected, do not return
+ * VM_FAULT_SIGBUS or VM_FAULT_NOPAGE, but instead always
+ * return VM_FAULT_RETRY with lock released proactively.
+ *
+ * If we were to return VM_FAULT_SIGBUS here, the non
+ * cooperative manager would be instead forced to
+ * always call UFFDIO_UNREGISTER before it can safely
+ * close the uffd, to avoid involuntary SIGBUS triggered.
+ *
+ * If we were to return VM_FAULT_NOPAGE, it would work for
+ * the fault path, in which the lock will be released
+ * later. However for GUP, faultin_page() does nothing
+ * special on NOPAGE, so GUP would spin retrying without
+ * releasing the mmap read lock, causing possible livelock.
+ *
+ * Here only VM_FAULT_RETRY would make sure the mmap lock
+ * be released immediately, so that the thread concurrently
+ * releasing the userfault would always make progress.
+ */
+ release_fault_lock(vmf);
+ goto out;
+ }
+
/* take the reference before dropping the mmap_lock */
userfaultfd_ctx_get(ctx);
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index fffd6fffdce0..ae0ca6858496 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -3,7 +3,7 @@ config XFS_FS
tristate "XFS filesystem support"
depends on BLOCK
select EXPORTFS
- select LIBCRC32C
+ select CRC32
select FS_IOMAP
help
XFS is a high performance journaling filesystem which originated
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 2f53ca7e12d4..726e29b837e6 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -230,7 +230,7 @@ xfs_blockgc_queue(
rcu_read_lock();
if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
queue_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work,
- msecs_to_jiffies(xfs_blockgc_secs * 1000));
+ secs_to_jiffies(xfs_blockgc_secs));
rcu_read_unlock();
}
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ce6b8ffbaa2c..ee3e0f284287 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2734,21 +2734,16 @@ xfs_mmaplock_two_inodes_and_break_dax_layout(
struct xfs_inode *ip2)
{
int error;
- bool retry;
- struct page *page;
if (ip1->i_ino > ip2->i_ino)
swap(ip1, ip2);
again:
- retry = false;
/* Lock the first inode */
xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
- error = xfs_break_dax_layouts(VFS_I(ip1), &retry);
- if (error || retry) {
+ error = xfs_break_dax_layouts(VFS_I(ip1));
+ if (error) {
xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
- if (error == 0 && retry)
- goto again;
return error;
}
@@ -2762,8 +2757,8 @@ again:
* need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
* for this nested lock case.
*/
- page = dax_layout_busy_page(VFS_I(ip2)->i_mapping);
- if (page && page_ref_count(page) != 1) {
+ error = dax_break_layout(VFS_I(ip2), 0, -1, NULL);
+ if (error) {
xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
goto again;
@@ -3007,21 +3002,11 @@ xfs_wait_dax_page(
int
xfs_break_dax_layouts(
- struct inode *inode,
- bool *retry)
+ struct inode *inode)
{
- struct page *page;
-
xfs_assert_ilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL);
- page = dax_layout_busy_page(inode->i_mapping);
- if (!page)
- return 0;
-
- *retry = true;
- return ___wait_var_event(&page->_refcount,
- atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
- 0, 0, xfs_wait_dax_page(inode));
+ return dax_break_layout_inode(inode, xfs_wait_dax_page);
}
int
@@ -3039,8 +3024,8 @@ xfs_break_layouts(
retry = false;
switch (reason) {
case BREAK_UNMAP:
- error = xfs_break_dax_layouts(inode, &retry);
- if (error || retry)
+ error = xfs_break_dax_layouts(inode);
+ if (error)
break;
fallthrough;
case BREAK_WRITE:
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 4bb7a99e0dc4..eae0159983ca 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -603,7 +603,7 @@ xfs_itruncate_extents(
return xfs_itruncate_extents_flags(tpp, ip, whichfork, new_size, 0);
}
-int xfs_break_dax_layouts(struct inode *inode, bool *retry);
+int xfs_break_dax_layouts(struct inode *inode);
int xfs_break_layouts(struct inode *inode, uint *iolock,
enum layout_break_reason reason);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 53944cc7af24..b2dd0c0bf509 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -768,6 +768,17 @@ xfs_fs_drop_inode(
return generic_drop_inode(inode);
}
+STATIC void
+xfs_fs_evict_inode(
+ struct inode *inode)
+{
+ if (IS_DAX(inode))
+ dax_break_layout_final(inode);
+
+ truncate_inode_pages_final(&inode->i_data);
+ clear_inode(inode);
+}
+
static void
xfs_mount_free(
struct xfs_mount *mp)
@@ -1275,6 +1286,7 @@ static const struct super_operations xfs_super_operations = {
.destroy_inode = xfs_fs_destroy_inode,
.dirty_inode = xfs_fs_dirty_inode,
.drop_inode = xfs_fs_drop_inode,
+ .evict_inode = xfs_fs_evict_inode,
.put_super = xfs_fs_put_super,
.sync_fs = xfs_fs_sync_fs,
.freeze_fs = xfs_fs_freeze,
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index b0857e3c1270..b7e82d85f043 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -569,8 +569,8 @@ retry_timeout_seconds_store(
if (val == -1)
cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
else {
- cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
- ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
+ cfg->retry_timeout = secs_to_jiffies(val);
+ ASSERT(secs_to_jiffies(val) < LONG_MAX);
}
return count;
}
@@ -687,8 +687,8 @@ xfs_error_sysfs_init_class(
if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
else
- cfg->retry_timeout = msecs_to_jiffies(
- init[i].retry_timeout * MSEC_PER_SEC);
+ cfg->retry_timeout =
+ secs_to_jiffies(init[i].retry_timeout);
}
return 0;