summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-07-27 10:26:41 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-07-27 10:26:41 -0700
commit7b0acd911ca05b2555d834cd93ffcfab1ade828c (patch)
treea6ccaaabac4682cf129ad8896ae089d2bcd2971c
parent5256184b6119bd1da541d8deb487c2f9131a6c9f (diff)
parent4811f7af6090e8f5a398fbdd766f903ef6c0d787 (diff)
downloadlwn-7b0acd911ca05b2555d834cd93ffcfab1ade828c.tar.gz
lwn-7b0acd911ca05b2555d834cd93ffcfab1ade828c.zip
Merge tag 'mm-hotfixes-stable-2024-07-26-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull misc hotfixes from Andrew Morton: "11 hotfixes, 7 of which are cc:stable. 7 are MM, 4 are other" * tag 'mm-hotfixes-stable-2024-07-26-14-33' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: nilfs2: handle inconsistent state in nilfs_btnode_create_block() selftests/mm: skip test for non-LPA2 and non-LVA systems mm/page_alloc: fix pcp->count race between drain_pages_zone() vs __rmqueue_pcplist() mm: memcg: add cacheline padding after lruvec in mem_cgroup_per_node alloc_tag: outline and export free_reserved_page() decompress_bunzip2: fix rare decompression failure mm/huge_memory: avoid PMD-size page cache if needed mm: huge_memory: use !CONFIG_64BIT to relax huge page alignment on 32 bit machines mm: fix old/young bit handling in the faulting path dt-bindings: arm: update James Clark's email address MAINTAINERS: mailmap: update James Clark's email address
-rw-r--r--.mailmap1
-rw-r--r--Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml2
-rw-r--r--MAINTAINERS4
-rw-r--r--fs/nilfs2/btnode.c25
-rw-r--r--fs/nilfs2/btree.c4
-rw-r--r--include/linux/huge_mm.h12
-rw-r--r--include/linux/memcontrol.h1
-rw-r--r--include/linux/mm.h16
-rw-r--r--lib/decompress_bunzip2.c3
-rw-r--r--mm/huge_memory.c14
-rw-r--r--mm/memory.c2
-rw-r--r--mm/page_alloc.c35
-rw-r--r--tools/testing/selftests/mm/va_high_addr_switch.c16
14 files changed, 95 insertions, 42 deletions
diff --git a/.mailmap b/.mailmap
index cbdf5d63c381..e51d76df75c2 100644
--- a/.mailmap
+++ b/.mailmap
@@ -260,6 +260,7 @@ Jaegeuk Kim <jaegeuk@kernel.org> <jaegeuk@motorola.com>
Jakub Kicinski <kuba@kernel.org> <jakub.kicinski@netronome.com>
James Bottomley <jejb@mulgrave.(none)>
James Bottomley <jejb@titanic.il.steeleye.com>
+James Clark <james.clark@linaro.org> <james.clark@arm.com>
James E Wilson <wilson@specifix.com>
James Hogan <jhogan@kernel.org> <james@albanarts.com>
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
diff --git a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml
index c960c8e0a9a5..08b89b62c505 100644
--- a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-sink.yaml
@@ -30,7 +30,7 @@ description: |
maintainers:
- Mike Leach <mike.leach@linaro.org>
- Suzuki K Poulose <suzuki.poulose@arm.com>
- - James Clark <james.clark@arm.com>
+ - James Clark <james.clark@linaro.org>
- Mao Jinlong <quic_jinlmao@quicinc.com>
- Hao Zhang <quic_hazha@quicinc.com>
diff --git a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
index 6745b4cc8f1c..d50a60368e27 100644
--- a/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,coresight-dummy-source.yaml
@@ -29,7 +29,7 @@ description: |
maintainers:
- Mike Leach <mike.leach@linaro.org>
- Suzuki K Poulose <suzuki.poulose@arm.com>
- - James Clark <james.clark@arm.com>
+ - James Clark <james.clark@linaro.org>
- Mao Jinlong <quic_jinlmao@quicinc.com>
- Hao Zhang <quic_hazha@quicinc.com>
diff --git a/MAINTAINERS b/MAINTAINERS
index c0a3d9e93689..5bbcfea5e3fa 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2196,7 +2196,7 @@ N: digicolor
ARM/CORESIGHT FRAMEWORK AND DRIVERS
M: Suzuki K Poulose <suzuki.poulose@arm.com>
R: Mike Leach <mike.leach@linaro.org>
-R: James Clark <james.clark@arm.com>
+R: James Clark <james.clark@linaro.org>
L: coresight@lists.linaro.org (moderated for non-subscribers)
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -17894,7 +17894,7 @@ F: tools/perf/
PERFORMANCE EVENTS TOOLING ARM64
R: John Garry <john.g.garry@oracle.com>
R: Will Deacon <will@kernel.org>
-R: James Clark <james.clark@arm.com>
+R: James Clark <james.clark@linaro.org>
R: Mike Leach <mike.leach@linaro.org>
R: Leo Yan <leo.yan@linux.dev>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 0131d83b912d..c034080c334b 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -51,12 +51,21 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
if (unlikely(!bh))
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (unlikely(buffer_mapped(bh) || buffer_uptodate(bh) ||
buffer_dirty(bh))) {
- brelse(bh);
- BUG();
+ /*
+ * The block buffer at the specified new address was already
+ * in use. This can happen if it is a virtual block number
+ * and has been reallocated due to corruption of the bitmap
+ * used to manage its allocation state (if not, the buffer
+ * clearing of an abandoned b-tree node is missing somewhere).
+ */
+ nilfs_error(inode->i_sb,
+ "state inconsistency probably due to duplicate use of b-tree node block address %llu (ino=%lu)",
+ (unsigned long long)blocknr, inode->i_ino);
+ goto failed;
}
memset(bh->b_data, 0, i_blocksize(inode));
bh->b_bdev = inode->i_sb->s_bdev;
@@ -67,6 +76,12 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
folio_unlock(bh->b_folio);
folio_put(bh->b_folio);
return bh;
+
+failed:
+ folio_unlock(bh->b_folio);
+ folio_put(bh->b_folio);
+ brelse(bh);
+ return ERR_PTR(-EIO);
}
int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
@@ -217,8 +232,8 @@ retry:
}
nbh = nilfs_btnode_create_block(btnc, newkey);
- if (!nbh)
- return -ENOMEM;
+ if (IS_ERR(nbh))
+ return PTR_ERR(nbh);
BUG_ON(nbh == obh);
ctxt->newbh = nbh;
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index a139970e4804..862bdf23120e 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -63,8 +63,8 @@ static int nilfs_btree_get_new_block(const struct nilfs_bmap *btree,
struct buffer_head *bh;
bh = nilfs_btnode_create_block(btnc, ptr);
- if (!bh)
- return -ENOMEM;
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
set_buffer_nilfs_volatile(bh);
*bhp = bh;
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index cff002be83eb..e25d9ebfdf89 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -74,14 +74,20 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr;
#define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
/*
- * Mask of all large folio orders supported for file THP.
+ * Mask of all large folio orders supported for file THP. Folios in a DAX
+ * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
+ * it.
*/
-#define THP_ORDERS_ALL_FILE (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+#define THP_ORDERS_ALL_FILE_DAX \
+ (BIT(PMD_ORDER) | BIT(PUD_ORDER))
+#define THP_ORDERS_ALL_FILE_DEFAULT \
+ ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
/*
* Mask of all large folio orders supported for THP.
*/
-#define THP_ORDERS_ALL (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE)
+#define THP_ORDERS_ALL \
+ (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DAX | THP_ORDERS_ALL_FILE_DEFAULT)
#define TVA_SMAPS (1 << 0) /* Will be used for procfs */
#define TVA_IN_PF (1 << 1) /* Page fault handler */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7e2eb091049a..0e5bf25d324f 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -109,6 +109,7 @@ struct mem_cgroup_per_node {
/* Fields which get updated often at the end. */
struct lruvec lruvec;
+ CACHELINE_PADDING(_pad2_);
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter;
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 4563b560ced7..c4b238a20b76 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -3137,21 +3137,7 @@ extern void reserve_bootmem_region(phys_addr_t start,
phys_addr_t end, int nid);
/* Free the reserved page into the buddy system, so it gets managed. */
-static inline void free_reserved_page(struct page *page)
-{
- if (mem_alloc_profiling_enabled()) {
- union codetag_ref *ref = get_page_tag_ref(page);
-
- if (ref) {
- set_codetag_empty(ref);
- put_page_tag_ref(ref);
- }
- }
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- adjust_managed_page_count(page, 1);
-}
+void free_reserved_page(struct page *page);
#define free_highmem_page(page) free_reserved_page(page)
static inline void mark_page_reserved(struct page *page)
diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c
index 3518e7394eca..ca736166f100 100644
--- a/lib/decompress_bunzip2.c
+++ b/lib/decompress_bunzip2.c
@@ -232,7 +232,8 @@ static int INIT get_next_block(struct bunzip_data *bd)
RUNB) */
symCount = symTotal+2;
for (j = 0; j < groupCount; j++) {
- unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
+ unsigned char length[MAX_SYMBOLS];
+ unsigned short temp[MAX_HUFCODE_BITS+1];
int minLen, maxLen, pp;
/* Read Huffman code lengths for each symbol. They're
stored in a way similar to mtf; record a starting
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f9696c94e211..f4be468e06a4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -89,9 +89,17 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
bool smaps = tva_flags & TVA_SMAPS;
bool in_pf = tva_flags & TVA_IN_PF;
bool enforce_sysfs = tva_flags & TVA_ENFORCE_SYSFS;
+ unsigned long supported_orders;
+
/* Check the intersection of requested and supported orders. */
- orders &= vma_is_anonymous(vma) ?
- THP_ORDERS_ALL_ANON : THP_ORDERS_ALL_FILE;
+ if (vma_is_anonymous(vma))
+ supported_orders = THP_ORDERS_ALL_ANON;
+ else if (vma_is_dax(vma))
+ supported_orders = THP_ORDERS_ALL_FILE_DAX;
+ else
+ supported_orders = THP_ORDERS_ALL_FILE_DEFAULT;
+
+ orders &= supported_orders;
if (!orders)
return 0;
@@ -877,7 +885,7 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
loff_t off_align = round_up(off, size);
unsigned long len_pad, ret, off_sub;
- if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
+ if (!IS_ENABLED(CONFIG_64BIT) || in_compat_syscall())
return 0;
if (off_end <= off_align || (off_end - off_align) < size)
diff --git a/mm/memory.c b/mm/memory.c
index 1ff7b6f51ec1..34f8402d2046 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4780,7 +4780,7 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
{
struct vm_area_struct *vma = vmf->vma;
bool write = vmf->flags & FAULT_FLAG_WRITE;
- bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE);
+ bool prefault = !in_range(vmf->address, addr, nr * PAGE_SIZE);
pte_t entry;
flush_icache_pages(vma, page, nr);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8337926b89d4..28f80daf5c04 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2343,16 +2343,20 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
{
struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
- int count = READ_ONCE(pcp->count);
-
- while (count) {
- int to_drain = min(count, pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
- count -= to_drain;
+ int count;
+ do {
spin_lock(&pcp->lock);
- free_pcppages_bulk(zone, to_drain, pcp, 0);
+ count = pcp->count;
+ if (count) {
+ int to_drain = min(count,
+ pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
+
+ free_pcppages_bulk(zone, to_drain, pcp, 0);
+ count -= to_drain;
+ }
spin_unlock(&pcp->lock);
- }
+ } while (count);
}
/*
@@ -5815,6 +5819,23 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
return pages;
}
+void free_reserved_page(struct page *page)
+{
+ if (mem_alloc_profiling_enabled()) {
+ union codetag_ref *ref = get_page_tag_ref(page);
+
+ if (ref) {
+ set_codetag_empty(ref);
+ put_page_tag_ref(ref);
+ }
+ }
+ ClearPageReserved(page);
+ init_page_count(page);
+ __free_page(page);
+ adjust_managed_page_count(page, 1);
+}
+EXPORT_SYMBOL(free_reserved_page);
+
static int page_alloc_cpu_dead(unsigned int cpu)
{
struct zone *zone;
diff --git a/tools/testing/selftests/mm/va_high_addr_switch.c b/tools/testing/selftests/mm/va_high_addr_switch.c
index fa7eabfaf841..896b3f73fc53 100644
--- a/tools/testing/selftests/mm/va_high_addr_switch.c
+++ b/tools/testing/selftests/mm/va_high_addr_switch.c
@@ -293,6 +293,20 @@ static int run_test(struct testcase *test, int count)
return ret;
}
+#ifdef __aarch64__
+/* Check if userspace VA > 48 bits */
+static int high_address_present(void)
+{
+ void *ptr = mmap((void *)(1UL << 50), 1, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ if (ptr == MAP_FAILED)
+ return 0;
+
+ munmap(ptr, 1);
+ return 1;
+}
+#endif
+
static int supported_arch(void)
{
#if defined(__powerpc64__)
@@ -300,7 +314,7 @@ static int supported_arch(void)
#elif defined(__x86_64__)
return 1;
#elif defined(__aarch64__)
- return 1;
+ return high_address_present();
#else
return 0;
#endif