summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2019-02-09 19:20:57 -0500
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-22 17:08:15 -0400
commit3577df5f7f25f6669c4b53e76cf159d550a0fd83 (patch)
tree03bbca1bf2983e2ff5e571b14fa7454d8b53c003 /fs
parent3e0745e28363c1675a05775425312c049d5857b3 (diff)
downloadlwn-3577df5f7f25f6669c4b53e76cf159d550a0fd83.tar.gz
lwn-3577df5f7f25f6669c4b53e76cf159d550a0fd83.zip
bcachefs: serialize persistent_reserved
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs')
-rw-r--r--fs/bcachefs/bcachefs_format.h13
-rw-r--r--fs/bcachefs/buckets.c3
-rw-r--r--fs/bcachefs/journal_io.c21
-rw-r--r--fs/bcachefs/recovery.c24
-rw-r--r--fs/bcachefs/replicas.c7
-rw-r--r--fs/bcachefs/super-io.c43
6 files changed, 88 insertions, 23 deletions
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index bd41628f2995..71ba708c3e2b 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -1363,7 +1363,8 @@ static inline __u64 __bset_magic(struct bch_sb *sb)
x(prio_ptrs, 2) \
x(blacklist, 3) \
x(blacklist_v2, 4) \
- x(usage, 5)
+ x(usage, 5) \
+ x(data_usage, 6)
enum {
#define x(f, nr) BCH_JSET_ENTRY_##f = nr,
@@ -1394,7 +1395,7 @@ struct jset_entry_blacklist_v2 {
};
enum {
- FS_USAGE_REPLICAS = 0,
+ FS_USAGE_RESERVED = 0,
FS_USAGE_INODES = 1,
FS_USAGE_KEY_VERSION = 2,
FS_USAGE_NR = 3
@@ -1402,8 +1403,12 @@ enum {
struct jset_entry_usage {
struct jset_entry entry;
- __le64 sectors;
- __u8 type;
+ __le64 v;
+} __attribute__((packed));
+
+struct jset_entry_data_usage {
+ struct jset_entry entry;
+ __le64 v;
struct bch_replicas_entry r;
} __attribute__((packed));
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index f65132a0ebf4..d2e047ee29cf 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -123,6 +123,9 @@ void bch2_fs_usage_initialize(struct bch_fs *c)
nr = sizeof(struct bch_fs_usage) / sizeof(u64) + c->replicas.nr;
usage = (void *) bch2_acc_percpu_u64s((void *) c->usage[0], nr);
+ for (i = 0; i < BCH_REPLICAS_MAX; i++)
+ usage->s.reserved += usage->persistent_reserved[i];
+
for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i);
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 2f04f0074ec4..bfa1045b0eb5 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -309,6 +309,27 @@ static int journal_entry_validate_usage(struct bch_fs *c,
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
int ret = 0;
+ if (journal_entry_err_on(bytes < sizeof(*u),
+ c,
+ "invalid journal entry usage: bad size")) {
+ journal_entry_null_range(entry, vstruct_next(entry));
+ return ret;
+ }
+
+fsck_err:
+ return ret;
+}
+
+static int journal_entry_validate_data_usage(struct bch_fs *c,
+ struct jset *jset,
+ struct jset_entry *entry,
+ int write)
+{
+ struct jset_entry_data_usage *u =
+ container_of(entry, struct jset_entry_data_usage, entry);
+ unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
+ int ret = 0;
+
if (journal_entry_err_on(bytes < sizeof(*u) ||
bytes < sizeof(*u) + u->r.nr_devs,
c,
diff --git a/fs/bcachefs/recovery.c b/fs/bcachefs/recovery.c
index a9b8d565c82f..31d2bce7bb57 100644
--- a/fs/bcachefs/recovery.c
+++ b/fs/bcachefs/recovery.c
@@ -75,23 +75,32 @@ static int journal_replay_entry_early(struct bch_fs *c,
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
- switch (u->type) {
- case FS_USAGE_REPLICAS:
- ret = bch2_replicas_set_usage(c, &u->r,
- le64_to_cpu(u->sectors));
+ switch (entry->btree_id) {
+ case FS_USAGE_RESERVED:
+ if (entry->level < BCH_REPLICAS_MAX)
+ percpu_u64_set(&c->usage[0]->
+ persistent_reserved[entry->level],
+ le64_to_cpu(u->v));
break;
case FS_USAGE_INODES:
percpu_u64_set(&c->usage[0]->s.nr_inodes,
- le64_to_cpu(u->sectors));
+ le64_to_cpu(u->v));
break;
case FS_USAGE_KEY_VERSION:
atomic64_set(&c->key_version,
- le64_to_cpu(u->sectors));
+ le64_to_cpu(u->v));
break;
}
break;
}
+ case BCH_JSET_ENTRY_data_usage: {
+ struct jset_entry_data_usage *u =
+ container_of(entry, struct jset_entry_data_usage, entry);
+ ret = bch2_replicas_set_usage(c, &u->r,
+ le64_to_cpu(u->v));
+ break;
+ }
}
return ret;
@@ -156,7 +165,8 @@ static bool journal_empty(struct list_head *journal)
list_for_each_entry(i, journal, list) {
vstruct_for_each(&i->j, entry) {
if (entry->type == BCH_JSET_ENTRY_btree_root ||
- entry->type == BCH_JSET_ENTRY_usage)
+ entry->type == BCH_JSET_ENTRY_usage ||
+ entry->type == BCH_JSET_ENTRY_data_usage)
continue;
if (entry->type == BCH_JSET_ENTRY_btree_keys &&
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 52a422ac5ace..6fee8fe37688 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -312,9 +312,14 @@ static unsigned reserve_journal_replicas(struct bch_fs *c,
journal_res_u64s +=
DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64));
+ /* persistent_reserved: */
+ journal_res_u64s +=
+ DIV_ROUND_UP(sizeof(struct jset_entry_usage), sizeof(u64)) *
+ BCH_REPLICAS_MAX;
+
for_each_cpu_replicas_entry(r, e)
journal_res_u64s +=
- DIV_ROUND_UP(sizeof(struct jset_entry_usage) +
+ DIV_ROUND_UP(sizeof(struct jset_entry_data_usage) +
e->nr_devs, sizeof(u64));
return journal_res_u64s;
}
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 9e991be3d90d..0cc8565b070f 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -900,7 +900,6 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
struct jset_entry *entry,
u64 journal_seq)
{
- struct jset_entry_usage *u;
struct btree_root *r;
unsigned i;
@@ -929,24 +928,45 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
{
u64 nr_inodes = percpu_u64_get(&c->usage[0]->s.nr_inodes);
+ struct jset_entry_usage *u =
+ container_of(entry, struct jset_entry_usage, entry);
- u = container_of(entry, struct jset_entry_usage, entry);
memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
u->entry.type = BCH_JSET_ENTRY_usage;
- u->sectors = cpu_to_le64(nr_inodes);
- u->type = FS_USAGE_INODES;
+ u->entry.btree_id = FS_USAGE_INODES;
+ u->v = cpu_to_le64(nr_inodes);
entry = vstruct_next(entry);
}
{
- u = container_of(entry, struct jset_entry_usage, entry);
+ struct jset_entry_usage *u =
+ container_of(entry, struct jset_entry_usage, entry);
+
memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
u->entry.type = BCH_JSET_ENTRY_usage;
- u->sectors = cpu_to_le64(atomic64_read(&c->key_version));
- u->type = FS_USAGE_KEY_VERSION;
+ u->entry.btree_id = FS_USAGE_KEY_VERSION;
+ u->v = cpu_to_le64(atomic64_read(&c->key_version));
+
+ entry = vstruct_next(entry);
+ }
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++) {
+ struct jset_entry_usage *u =
+ container_of(entry, struct jset_entry_usage, entry);
+ u64 sectors = percpu_u64_get(&c->usage[0]->persistent_reserved[i]);
+
+ if (!sectors)
+ continue;
+
+ memset(u, 0, sizeof(*u));
+ u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
+ u->entry.type = BCH_JSET_ENTRY_usage;
+ u->entry.btree_id = FS_USAGE_RESERVED;
+ u->entry.level = i;
+ u->v = sectors;
entry = vstruct_next(entry);
}
@@ -955,13 +975,14 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i);
u64 sectors = percpu_u64_get(&c->usage[0]->data[i]);
+ struct jset_entry_data_usage *u =
+ container_of(entry, struct jset_entry_data_usage, entry);
- u = container_of(entry, struct jset_entry_usage, entry);
+ memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
sizeof(u64)) - 1;
- u->entry.type = BCH_JSET_ENTRY_usage;
- u->sectors = cpu_to_le64(sectors);
- u->type = FS_USAGE_REPLICAS;
+ u->entry.type = BCH_JSET_ENTRY_data_usage;
+ u->v = cpu_to_le64(sectors);
unsafe_memcpy(&u->r, e, replicas_entry_bytes(e),
"embedded variable length struct");