summaryrefslogtreecommitdiff
path: root/mm/percpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/percpu.c')
-rw-r--r--mm/percpu.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/mm/percpu.c b/mm/percpu.c
index ac61e3fc5f15..b35494c8ede2 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1745,7 +1745,7 @@ void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved,
gfp = current_gfp_context(gfp);
/* whitelisted flags that can be passed to the backing allocators */
pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
- is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
+ is_atomic = !gfpflags_allow_blocking(gfp);
do_warn = !(gfp & __GFP_NOWARN);
/*
@@ -2191,7 +2191,12 @@ static void pcpu_balance_workfn(struct work_struct *work)
* to grow other chunks. This then gives pcpu_reclaim_populated() time
* to move fully free chunks to the active list to be freed if
* appropriate.
+ *
+ * Enforce GFP_NOIO allocations because we have pcpu_alloc users
+ * constrained to GFP_NOIO/NOFS contexts and they could form lock
+ * dependency through pcpu_alloc_mutex
*/
+ unsigned int flags = memalloc_noio_save();
mutex_lock(&pcpu_alloc_mutex);
spin_lock_irq(&pcpu_lock);
@@ -2202,6 +2207,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
spin_unlock_irq(&pcpu_lock);
mutex_unlock(&pcpu_alloc_mutex);
+ memalloc_noio_restore(flags);
}
/**
@@ -3071,7 +3077,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
continue;
}
/* copy and return the unused part */
- memcpy(ptr, __per_cpu_load, ai->static_size);
+ memcpy(ptr, __per_cpu_start, ai->static_size);
pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum);
}
}
@@ -3240,7 +3246,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t
flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
/* copy static data */
- memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
+ memcpy((void *)unit_addr, __per_cpu_start, ai->static_size);
}
/* we're ready, commit */