summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-11-16 16:00:38 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2024-11-16 16:00:38 -0800
commit4a5df37964673effcd9f84041f7423206a5ae5f2 (patch)
tree795acf39560a12b769a6f59cf006d26ba1d1639f /mm
parentb84eeed05a8823074866924f4c072bdf2d533f5d (diff)
parentd1aa0c04294e29883d65eac6c2f72fe95cc7c049 (diff)
downloadlwn-4a5df37964673effcd9f84041f7423206a5ae5f2.tar.gz
lwn-4a5df37964673effcd9f84041f7423206a5ae5f2.zip
Merge tag 'mm-hotfixes-stable-2024-11-16-15-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull hotfixes from Andrew Morton: "10 hotfixes, 7 of which are cc:stable. All singletons, please see the changelogs for details" * tag 'mm-hotfixes-stable-2024-11-16-15-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm: revert "mm: shmem: fix data-race in shmem_getattr()" ocfs2: uncache inode which has failed entering the group mm: fix NULL pointer dereference in alloc_pages_bulk_noprof mm, doc: update read_ahead_kb for MADV_HUGEPAGE fs/proc/task_mmu: prevent integer overflow in pagemap_scan_get_args() sched/task_stack: fix object_is_on_stack() for KASAN tagged pointers crash, powerpc: default to CRASH_DUMP=n on PPC_BOOK3S_32 mm/mremap: fix address wraparound in move_page_tables() tools/mm: fix compile error mm, swap: fix allocation and scanning race with swapoff
Diffstat (limited to 'mm')
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/swapfile.c22
4 files changed, 22 insertions, 7 deletions
diff --git a/mm/mremap.c b/mm/mremap.c
index dda09e957a5d..dee98ff2bbd6 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -648,7 +648,7 @@ again:
* Prevent negative return values when {old,new}_addr was realigned
* but we broke out of the above loop for the first PMD itself.
*/
- if (len + old_addr < old_end)
+ if (old_addr < old_end - len)
return 0;
return len + old_addr - old_end; /* how much done */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 216fbbfbedcf..b6958333054d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -4607,7 +4607,8 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
gfp = alloc_gfp;
/* Find an allowed local zone that meets the low watermark. */
- for_each_zone_zonelist_nodemask(zone, z, ac.zonelist, ac.highest_zoneidx, ac.nodemask) {
+ z = ac.preferred_zoneref;
+ for_next_zone_zonelist_nodemask(zone, z, ac.highest_zoneidx, ac.nodemask) {
unsigned long mark;
if (cpusets_enabled() && (alloc_flags & ALLOC_CPUSET) &&
diff --git a/mm/shmem.c b/mm/shmem.c
index e87f5d6799a7..568bb290bdce 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1166,9 +1166,7 @@ static int shmem_getattr(struct mnt_idmap *idmap,
stat->attributes_mask |= (STATX_ATTR_APPEND |
STATX_ATTR_IMMUTABLE |
STATX_ATTR_NODUMP);
- inode_lock_shared(inode);
generic_fillattr(idmap, request_mask, inode, stat);
- inode_unlock_shared(inode);
if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
stat->blksize = HPAGE_PMD_SIZE;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 9c85bd46ab7f..b0a9071cfe1d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -664,12 +664,15 @@ static bool cluster_scan_range(struct swap_info_struct *si,
return true;
}
-static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
+static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
unsigned int start, unsigned char usage,
unsigned int order)
{
unsigned int nr_pages = 1 << order;
+ if (!(si->flags & SWP_WRITEOK))
+ return false;
+
if (cluster_is_free(ci)) {
if (nr_pages < SWAPFILE_CLUSTER) {
list_move_tail(&ci->list, &si->nonfull_clusters[order]);
@@ -690,6 +693,8 @@ static void cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster
list_move_tail(&ci->list, &si->full_clusters);
ci->flags = CLUSTER_FLAG_FULL;
}
+
+ return true;
}
static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigned long offset,
@@ -713,7 +718,10 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigne
while (offset <= end) {
if (cluster_scan_range(si, ci, offset, nr_pages)) {
- cluster_alloc_range(si, ci, offset, usage, order);
+ if (!cluster_alloc_range(si, ci, offset, usage, order)) {
+ offset = SWAP_NEXT_INVALID;
+ goto done;
+ }
*foundp = offset;
if (ci->count == SWAPFILE_CLUSTER) {
offset = SWAP_NEXT_INVALID;
@@ -805,7 +813,11 @@ new_cluster:
if (!list_empty(&si->free_clusters)) {
ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list);
offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage);
- VM_BUG_ON(!found);
+ /*
+ * Either we didn't touch the cluster due to swapoff,
+ * or the allocation must success.
+ */
+ VM_BUG_ON((si->flags & SWP_WRITEOK) && !found);
goto done;
}
@@ -1041,6 +1053,8 @@ static int cluster_alloc_swap(struct swap_info_struct *si,
VM_BUG_ON(!si->cluster_info);
+ si->flags += SWP_SCANNING;
+
while (n_ret < nr) {
unsigned long offset = cluster_alloc_swap_entry(si, order, usage);
@@ -1049,6 +1063,8 @@ static int cluster_alloc_swap(struct swap_info_struct *si,
slots[n_ret++] = swp_entry(si->type, offset);
}
+ si->flags -= SWP_SCANNING;
+
return n_ret;
}