summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2024-09-26 19:11:51 +0200
committerAndrew Morton <akpm@linux-foundation.org>2024-10-09 12:47:19 -0700
commit9a8da05d7ad619beb84d0c6904c3fa7022c6fb9b (patch)
treeb80f92181d1fb56236fc273454c2f9dbdc44561b /include
parent9897713fe1077c90b4a86c9af0a878d56c8888a2 (diff)
downloadlwn-9a8da05d7ad619beb84d0c6904c3fa7022c6fb9b.tar.gz
lwn-9a8da05d7ad619beb84d0c6904c3fa7022c6fb9b.zip
Revert "mm: introduce PF_MEMALLOC_NORECLAIM, PF_MEMALLOC_NOWARN"
This reverts commit eab0af905bfc3e9c05da2ca163d76a1513159aa4. There is no existing user of those flags. PF_MEMALLOC_NOWARN is dangerous because a nested allocation context can use GFP_NOFAIL which could cause unexpected failure. Such a code would be hard to maintain because it could be deeper in the call chain. PF_MEMALLOC_NORECLAIM has been added even when it was pointed out [1] that such a allocation contex is inherently unsafe if the context doesn't fully control all allocations called from this context. While PF_MEMALLOC_NOWARN is not dangerous the way PF_MEMALLOC_NORECLAIM is it doesn't have any user and as Matthew has pointed out we are running out of those flags so better reclaim it without any real users. [1] https://lore.kernel.org/all/ZcM0xtlKbAOFjv5n@tiehlicka/ Link: https://lkml.kernel.org/r/20240926172940.167084-3-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Dave Chinner <dchinner@redhat.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christian Brauner <brauner@kernel.org> Cc: James Morris <jmorris@namei.org> Cc: Jan Kara <jack@suse.cz> Cc: Kent Overstreet <kent.overstreet@linux.dev> Cc: Paul Moore <paul@paul-moore.com> Cc: Serge E. Hallyn <serge@hallyn.com> Cc: Yafang Shao <laoar.shao@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sched/mm.h17
2 files changed, 6 insertions, 15 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e6ee4258169a..449dd64ed9ac 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1681,8 +1681,8 @@ extern struct pid *cad_pid;
* I am cleaning dirty pages from some other bdi. */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
-#define PF_MEMALLOC_NORECLAIM 0x00800000 /* All allocation requests will clear __GFP_DIRECT_RECLAIM */
-#define PF_MEMALLOC_NOWARN 0x01000000 /* All allocation requests will inherit __GFP_NOWARN */
+#define PF__HOLE__00800000 0x00800000
+#define PF__HOLE__01000000 0x01000000
#define PF__HOLE__02000000 0x02000000
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 07bb8d4181d7..928a626725e6 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -251,25 +251,16 @@ static inline gfp_t current_gfp_context(gfp_t flags)
{
unsigned int pflags = READ_ONCE(current->flags);
- if (unlikely(pflags & (PF_MEMALLOC_NOIO |
- PF_MEMALLOC_NOFS |
- PF_MEMALLOC_NORECLAIM |
- PF_MEMALLOC_NOWARN |
- PF_MEMALLOC_PIN))) {
+ if (unlikely(pflags & (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_PIN))) {
/*
- * Stronger flags before weaker flags:
- * NORECLAIM implies NOIO, which in turn implies NOFS
+ * NOIO implies both NOIO and NOFS and it is a weaker context
+ * so always make sure it makes precedence
*/
- if (pflags & PF_MEMALLOC_NORECLAIM)
- flags &= ~__GFP_DIRECT_RECLAIM;
- else if (pflags & PF_MEMALLOC_NOIO)
+ if (pflags & PF_MEMALLOC_NOIO)
flags &= ~(__GFP_IO | __GFP_FS);
else if (pflags & PF_MEMALLOC_NOFS)
flags &= ~__GFP_FS;
- if (pflags & PF_MEMALLOC_NOWARN)
- flags |= __GFP_NOWARN;
-
if (pflags & PF_MEMALLOC_PIN)
flags &= ~__GFP_MOVABLE;
}