diff options
author | Ingo Molnar <mingo@kernel.org> | 2022-11-21 22:54:36 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2022-11-21 23:01:51 +0100 |
commit | 0ce096db719ebaf46d4faf93e1ed1341c1853919 (patch) | |
tree | 41c8826034eb5b430adf97d43fb0f2dce78325ba /arch/powerpc/mm/book3s64/hash_pgtable.c | |
parent | 2d08a893b87cf9b2f9dbb3afaff60ca4530d55a2 (diff) | |
parent | eb7081409f94a9a8608593d0fb63a1aa3d6f95d8 (diff) | |
download | lwn-0ce096db719ebaf46d4faf93e1ed1341c1853919.tar.gz lwn-0ce096db719ebaf46d4faf93e1ed1341c1853919.zip |
Merge tag 'v6.1-rc6' into x86/core, to resolve conflicts
Resolve conflicts between these commits in arch/x86/kernel/asm-offsets.c:
# upstream:
debc5a1ec0d1 ("KVM: x86: use a separate asm-offsets.c file")
# retbleed work in x86/core:
5d8213864ade ("x86/retbleed: Add SKL return thunk")
... and these commits in include/linux/bpf.h:
# upstram:
18acb7fac22f ("bpf: Revert ("Fix dispatcher patchable function entry to 5 bytes nop")")
# x86/core commits:
931ab63664f0 ("x86/ibt: Implement FineIBT")
bea75b33895f ("x86/Kconfig: Introduce function padding")
The latter two modify BPF_DISPATCHER_ATTRIBUTES(), which was removed upstream.
Conflicts:
arch/x86/kernel/asm-offsets.c
include/linux/bpf.h
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/powerpc/mm/book3s64/hash_pgtable.c')
-rw-r--r-- | arch/powerpc/mm/book3s64/hash_pgtable.c | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c index 747492edb75a..51f48984abca 100644 --- a/arch/powerpc/mm/book3s64/hash_pgtable.c +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c @@ -404,7 +404,8 @@ EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage); struct change_memory_parms { unsigned long start, end, newpp; - unsigned int step, nr_cpus, master_cpu; + unsigned int step, nr_cpus; + atomic_t master_cpu; atomic_t cpu_counter; }; @@ -478,7 +479,8 @@ static int change_memory_range_fn(void *data) { struct change_memory_parms *parms = data; - if (parms->master_cpu != smp_processor_id()) + // First CPU goes through, all others wait. + if (atomic_xchg(&parms->master_cpu, 1) == 1) return chmem_secondary_loop(parms); // Wait for all but one CPU (this one) to call-in @@ -516,7 +518,7 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end, chmem_parms.end = end; chmem_parms.step = step; chmem_parms.newpp = newpp; - chmem_parms.master_cpu = smp_processor_id(); + atomic_set(&chmem_parms.master_cpu, 0); cpus_read_lock(); |