summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/damon/sysfs.c10
-rw-r--r--mm/memory.c18
-rw-r--r--mm/mempolicy.c10
-rw-r--r--mm/mseal.c3
-rw-r--r--mm/pagewalk.c25
-rw-r--r--mm/swap_state.c9
6 files changed, 59 insertions, 16 deletions
diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c
index 576d1ddd736b..6a44a2f3d8fc 100644
--- a/mm/damon/sysfs.c
+++ b/mm/damon/sysfs.c
@@ -1524,8 +1524,10 @@ static int damon_sysfs_commit_input(void *data)
if (IS_ERR(param_ctx))
return PTR_ERR(param_ctx);
test_ctx = damon_sysfs_new_test_ctx(kdamond->damon_ctx);
- if (!test_ctx)
+ if (!test_ctx) {
+ damon_destroy_ctx(param_ctx);
return -ENOMEM;
+ }
err = damon_commit_ctx(test_ctx, param_ctx);
if (err)
goto out;
@@ -1618,9 +1620,12 @@ static int damon_sysfs_repeat_call_fn(void *data)
if (!mutex_trylock(&damon_sysfs_lock))
return 0;
+ if (sysfs_kdamond->contexts->nr != 1)
+ goto out;
damon_sysfs_upd_tuned_intervals(sysfs_kdamond);
damon_sysfs_upd_schemes_stats(sysfs_kdamond);
damon_sysfs_upd_schemes_effective_quotas(sysfs_kdamond);
+out:
mutex_unlock(&damon_sysfs_lock);
return 0;
}
@@ -1747,6 +1752,9 @@ static int damon_sysfs_update_schemes_tried_regions(
static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd,
struct damon_sysfs_kdamond *kdamond)
{
+ if (cmd != DAMON_SYSFS_CMD_OFF && kdamond->contexts->nr != 1)
+ return -EINVAL;
+
switch (cmd) {
case DAMON_SYSFS_CMD_ON:
return damon_sysfs_turn_damon_on(kdamond);
diff --git a/mm/memory.c b/mm/memory.c
index 2f815a34d924..c65e82c86fed 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -6815,11 +6815,16 @@ retry:
pudp = pud_offset(p4dp, address);
pud = pudp_get(pudp);
- if (pud_none(pud))
+ if (!pud_present(pud))
goto out;
if (pud_leaf(pud)) {
lock = pud_lock(mm, pudp);
- if (!unlikely(pud_leaf(pud))) {
+ pud = pudp_get(pudp);
+
+ if (unlikely(!pud_present(pud))) {
+ spin_unlock(lock);
+ goto out;
+ } else if (unlikely(!pud_leaf(pud))) {
spin_unlock(lock);
goto retry;
}
@@ -6831,9 +6836,16 @@ retry:
pmdp = pmd_offset(pudp, address);
pmd = pmdp_get_lockless(pmdp);
+ if (!pmd_present(pmd))
+ goto out;
if (pmd_leaf(pmd)) {
lock = pmd_lock(mm, pmdp);
- if (!unlikely(pmd_leaf(pmd))) {
+ pmd = pmdp_get(pmdp);
+
+ if (unlikely(!pmd_present(pmd))) {
+ spin_unlock(lock);
+ goto out;
+ } else if (unlikely(!pmd_leaf(pmd))) {
spin_unlock(lock);
goto retry;
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0e5175f1c767..cf92bd6a8226 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -487,7 +487,13 @@ void __mpol_put(struct mempolicy *pol)
{
if (!atomic_dec_and_test(&pol->refcnt))
return;
- kmem_cache_free(policy_cache, pol);
+ /*
+ * Required to allow mmap_lock_speculative*() access, see for example
+ * futex_key_to_node_opt(). All accesses are serialized by mmap_lock,
+ * however the speculative lock section unbound by the normal lock
+ * boundaries, requiring RCU freeing.
+ */
+ kfree_rcu(pol, rcu);
}
EXPORT_SYMBOL_FOR_MODULES(__mpol_put, "kvm");
@@ -1020,7 +1026,7 @@ static int vma_replace_policy(struct vm_area_struct *vma,
}
old = vma->vm_policy;
- vma->vm_policy = new; /* protected by mmap_lock */
+ WRITE_ONCE(vma->vm_policy, new); /* protected by mmap_lock */
mpol_put(old);
return 0;
diff --git a/mm/mseal.c b/mm/mseal.c
index 316b5e1dec78..ac58643181f7 100644
--- a/mm/mseal.c
+++ b/mm/mseal.c
@@ -56,7 +56,6 @@ static int mseal_apply(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct vm_area_struct *vma, *prev;
- unsigned long curr_start = start;
VMA_ITERATOR(vmi, mm, start);
/* We know there are no gaps so this will be non-NULL. */
@@ -66,6 +65,7 @@ static int mseal_apply(struct mm_struct *mm,
prev = vma;
for_each_vma_range(vmi, vma, end) {
+ const unsigned long curr_start = MAX(vma->vm_start, start);
const unsigned long curr_end = MIN(vma->vm_end, end);
if (!(vma->vm_flags & VM_SEALED)) {
@@ -79,7 +79,6 @@ static int mseal_apply(struct mm_struct *mm,
}
prev = vma;
- curr_start = curr_end;
}
return 0;
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index a94c401ab2cf..4e7bcd975c54 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -97,6 +97,7 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
+ pud_t pudval = pudp_get(pud);
pmd_t *pmd;
unsigned long next;
const struct mm_walk_ops *ops = walk->ops;
@@ -105,6 +106,24 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
int err = 0;
int depth = real_depth(3);
+ /*
+ * For PTE handling, pte_offset_map_lock() takes care of checking
+ * whether there actually is a page table. But it also has to be
+ * very careful about concurrent page table reclaim.
+ *
+ * Similarly, we have to be careful here - a PUD entry that points
+ * to a PMD table cannot go away, so we can just walk it. But if
+ * it's something else, we need to ensure we didn't race something,
+ * so need to retry.
+ *
+ * A pertinent example of this is a PUD refault after PUD split -
+ * we will need to split again or risk accessing invalid memory.
+ */
+ if (!pud_present(pudval) || pud_leaf(pudval)) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
+
pmd = pmd_offset(pud, addr);
do {
again:
@@ -218,12 +237,12 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
else if (pud_leaf(*pud) || !pud_present(*pud))
continue; /* Nothing to do. */
- if (pud_none(*pud))
- goto again;
-
err = walk_pmd_range(pud, addr, next, walk);
if (err)
break;
+
+ if (walk->action == ACTION_AGAIN)
+ goto again;
} while (pud++, addr = next, addr != end);
return err;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 6d0eef7470be..48aff2c917c0 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -494,6 +494,10 @@ static struct folio *__swap_cache_prepare_and_add(swp_entry_t entry,
__folio_set_locked(folio);
__folio_set_swapbacked(folio);
+
+ if (!charged && mem_cgroup_swapin_charge_folio(folio, NULL, gfp, entry))
+ goto failed;
+
for (;;) {
ret = swap_cache_add_folio(folio, entry, &shadow);
if (!ret)
@@ -514,11 +518,6 @@ static struct folio *__swap_cache_prepare_and_add(swp_entry_t entry,
goto failed;
}
- if (!charged && mem_cgroup_swapin_charge_folio(folio, NULL, gfp, entry)) {
- swap_cache_del_folio(folio);
- goto failed;
- }
-
memcg1_swapin(entry, folio_nr_pages(folio));
if (shadow)
workingset_refault(folio, shadow);