From f118e9abddfae94d7ef88858159d7556e1c2f7f6 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Sat, 22 Jun 2013 13:26:09 +0800 Subject: arch: sparc: kernel: check the memory length before use strcpy(). For the related next strcpy(), the destination length is less than 512, but the source maximize length may be 'OPROMMAXPARAM' (4096) which is more than 512. One work flow may: openprom_sunos_ioctl() -> if (cmd == OPROMSETOPT) getstrings() -> will alloc buffer with size 'OPROMMAXPARAM'. opromsetopt() -> devide the buffer into 'var' and 'value' of_set_property() -> pass prom_setprop() -> pass ldom_set_var() And do not mind the additional 4 alignment buffer increasing, since 'sizeof(pkt) - sizeof(pkt.header)' is 4 alignment at least. Signed-off-by: Chen Gang Signed-off-by: David S. Miller --- arch/sparc/kernel/ds.c | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/sparc/kernel') diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 5ef48dab5636..11d460f6f9cc 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value) char *base, *p; int msg_len, loops; + if (strlen(var) + strlen(value) + 2 > + sizeof(pkt) - sizeof(pkt.header)) { + printk(KERN_ERR PFX + "contents length: %zu, which more than max: %lu," + "so could not set (%s) variable to (%s).\n", + strlen(var) + strlen(value) + 2, + sizeof(pkt) - sizeof(pkt.header), var, value); + return; + } + memset(&pkt, 0, sizeof(pkt)); pkt.header.data.tag.type = DS_DATA; pkt.header.data.handle = cp->handle; -- cgit v1.2.3 From 961246b4ed8da3bcf4ee1eb9147f341013553e3c Mon Sep 17 00:00:00 2001 From: Olivier DANET Date: Wed, 10 Jul 2013 13:56:10 -0700 Subject: [PATCH] sparc32: vm_area_struct access for old Sun SPARCs. Commit e4c6bfd2d79d063017ab19a18915f0bc759f32d9 ("mm: rearrange vm_area_struct for fewer cache misses") changed the layout of the vm_area_struct structure, it broke several SPARC32 assembly routines which used numerical constants for accessing the vm_mm field. This patch defines the VMA_VM_MM constant to replace the immediate values. Signed-off-by: Olivier DANET Signed-off-by: David S. Miller --- arch/sparc/kernel/asm-offsets.c | 2 ++ arch/sparc/mm/hypersparc.S | 8 ++++---- arch/sparc/mm/swift.S | 8 ++++---- arch/sparc/mm/tsunami.S | 6 +++--- arch/sparc/mm/viking.S | 10 +++++----- 5 files changed, 18 insertions(+), 16 deletions(-) (limited to 'arch/sparc/kernel') diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c index 961b87f99e69..f76389a32342 100644 --- a/arch/sparc/kernel/asm-offsets.c +++ b/arch/sparc/kernel/asm-offsets.c @@ -49,6 +49,8 @@ int foo(void) DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread)); BLANK(); DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context)); + BLANK(); + DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */ return 0; diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S index 44aad32eeb4e..969f96450f69 100644 --- a/arch/sparc/mm/hypersparc.S +++ b/arch/sparc/mm/hypersparc.S @@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out: /* The things we do for performance... */ hypersparc_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 #ifndef CONFIG_SMP ld [%o0 + AOFF_mm_context], %g1 cmp %g1, -1 @@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out: */ /* Verified, my ass... */ hypersparc_flush_cache_page: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %g2 #ifndef CONFIG_SMP cmp %g2, -1 @@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out: sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out: sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S index c801c3953a00..5d2b88d39424 100644 --- a/arch/sparc/mm/swift.S +++ b/arch/sparc/mm/swift.S @@ -105,7 +105,7 @@ swift_flush_cache_mm_out: .globl swift_flush_cache_range swift_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 sub %o2, %o1, %o2 sethi %hi(4096), %o3 cmp %o2, %o3 @@ -116,7 +116,7 @@ swift_flush_cache_range: .globl swift_flush_cache_page swift_flush_cache_page: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 70: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -219,7 +219,7 @@ swift_flush_sig_insns: .globl swift_flush_tlb_range .globl swift_flush_tlb_all swift_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 swift_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -233,7 +233,7 @@ swift_flush_tlb_all_out: .globl swift_flush_tlb_page swift_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S index 4e55e8f76648..bf10a345fa8b 100644 --- a/arch/sparc/mm/tsunami.S +++ b/arch/sparc/mm/tsunami.S @@ -24,7 +24,7 @@ /* Sliiick... */ tsunami_flush_cache_page: tsunami_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_cache_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -46,7 +46,7 @@ tsunami_flush_sig_insns: /* More slick stuff... */ tsunami_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -65,7 +65,7 @@ tsunami_flush_tlb_out: /* This one can be done in a fine grained manner... */ tsunami_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S index bf8ee0613ae7..852257fcc82b 100644 --- a/arch/sparc/mm/viking.S +++ b/arch/sparc/mm/viking.S @@ -108,7 +108,7 @@ viking_mxcc_flush_page: viking_flush_cache_page: viking_flush_cache_range: #ifndef CONFIG_SMP - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 #endif viking_flush_cache_mm: #ifndef CONFIG_SMP @@ -148,7 +148,7 @@ viking_flush_tlb_mm: #endif viking_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -173,7 +173,7 @@ viking_flush_tlb_range: #endif viking_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -239,7 +239,7 @@ sun4dsmp_flush_tlb_range: tst %g5 bne 3f mov SRMMU_CTX_REG, %g1 - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 @@ -265,7 +265,7 @@ sun4dsmp_flush_tlb_page: tst %g5 bne 2f mov SRMMU_CTX_REG, %g1 - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 and %o1, PAGE_MASK, %o1 -- cgit v1.2.3 From 98d1e64f95b177d0f14efbdf695a1b28e1428035 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Wed, 10 Jul 2013 16:05:12 -0700 Subject: mm: remove free_area_cache Since all architectures have been converted to use vm_unmapped_area(), there is no remaining use for the free_area_cache. Signed-off-by: Michel Lespinasse Acked-by: Rik van Riel Cc: "James E.J. Bottomley" Cc: "Luck, Tony" Cc: Benjamin Herrenschmidt Cc: David Howells Cc: Helge Deller Cc: Ivan Kokshaysky Cc: Matt Turner Cc: Paul Mackerras Cc: Richard Henderson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/mmap.c | 2 -- arch/arm64/mm/mmap.c | 2 -- arch/mips/mm/mmap.c | 2 -- arch/powerpc/mm/mmap.c | 2 -- arch/s390/mm/mmap.c | 4 ---- arch/sparc/kernel/sys_sparc_64.c | 2 -- arch/tile/mm/mmap.c | 2 -- arch/x86/ia32/ia32_aout.c | 2 -- arch/x86/mm/mmap.c | 2 -- fs/binfmt_aout.c | 2 -- fs/binfmt_elf.c | 2 -- include/linux/mm_types.h | 3 --- include/linux/sched.h | 2 -- kernel/fork.c | 4 ---- mm/mmap.c | 28 ---------------------------- mm/nommu.c | 4 ---- mm/util.c | 1 - 17 files changed, 66 deletions(-) (limited to 'arch/sparc/kernel') diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 10062ceadd1c..0c6356255fe3 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -181,11 +181,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c index 7c7be7855638..8ed6cb1a900f 100644 --- a/arch/arm64/mm/mmap.c +++ b/arch/arm64/mm/mmap.c @@ -90,11 +90,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } EXPORT_SYMBOL_GPL(arch_pick_mmap_layout); diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 7e5fe2790d8a..f1baadd56e82 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c @@ -158,11 +158,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c index 67a42ed0d2fc..cb8bdbe4972f 100644 --- a/arch/powerpc/mm/mmap.c +++ b/arch/powerpc/mm/mmap.c @@ -92,10 +92,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index 06bafec00278..40023290ee5b 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -91,11 +91,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } @@ -176,11 +174,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = s390_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = s390_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 2daaaa6eda23..51561b8b15ba 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -290,7 +290,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { /* We know it's 32-bit */ unsigned long task_size = STACK_TOP32; @@ -302,7 +301,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/tile/mm/mmap.c b/arch/tile/mm/mmap.c index f96f4cec602a..d67d91ebf63e 100644 --- a/arch/tile/mm/mmap.c +++ b/arch/tile/mm/mmap.c @@ -66,10 +66,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (!is_32bit || rlimit(RLIMIT_STACK) == RLIM_INFINITY) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(mm); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 52ff81cce008..bae3aba95b15 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -308,8 +308,6 @@ static int load_aout_binary(struct linux_binprm *bprm) (current->mm->start_data = N_DATADDR(ex)); current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); - current->mm->free_area_cache = TASK_UNMAPPED_BASE; - current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT); if (retval < 0) { diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 845df6835f9f..62c29a5bfe26 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -115,10 +115,8 @@ void arch_pick_mmap_layout(struct mm_struct *mm) if (mmap_is_legacy()) { mm->mmap_base = mmap_legacy_base(); mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index bce87694f7b0..89dec7f789a4 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -255,8 +255,6 @@ static int load_aout_binary(struct linux_binprm * bprm) (current->mm->start_data = N_DATADDR(ex)); current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); - current->mm->free_area_cache = current->mm->mmap_base; - current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); if (retval < 0) { diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index f8a0b0efda44..100edcc5e312 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -738,8 +738,6 @@ static int load_elf_binary(struct linux_binprm *bprm) /* Do this so that we can load the interpreter, if need be. We will change some of these later */ - current->mm->free_area_cache = current->mm->mmap_base; - current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), executable_stack); if (retval < 0) { diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index ace9a5f01c64..fb425aa16c01 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -330,12 +330,9 @@ struct mm_struct { unsigned long (*get_unmapped_area) (struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); - void (*unmap_area) (struct mm_struct *mm, unsigned long addr); #endif unsigned long mmap_base; /* base of mmap area */ unsigned long task_size; /* size of task vm space */ - unsigned long cached_hole_size; /* if non-zero, the largest hole below free_area_cache */ - unsigned long free_area_cache; /* first hole of size cached_hole_size or larger */ unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; atomic_t mm_users; /* How many users with user space? */ diff --git a/include/linux/sched.h b/include/linux/sched.h index f99d57e0ae47..50d04b92ceda 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -322,8 +322,6 @@ extern unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); -extern void arch_unmap_area(struct mm_struct *, unsigned long); -extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); #else static inline void arch_pick_mmap_layout(struct mm_struct *mm) {} #endif diff --git a/kernel/fork.c b/kernel/fork.c index 6e6a1c11b3e5..66635c80a813 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -365,8 +365,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) mm->locked_vm = 0; mm->mmap = NULL; mm->mmap_cache = NULL; - mm->free_area_cache = oldmm->mmap_base; - mm->cached_hole_size = ~0UL; mm->map_count = 0; cpumask_clear(mm_cpumask(mm)); mm->mm_rb = RB_ROOT; @@ -540,8 +538,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) mm->nr_ptes = 0; memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); spin_lock_init(&mm->page_table_lock); - mm->free_area_cache = TASK_UNMAPPED_BASE; - mm->cached_hole_size = ~0UL; mm_init_aio(mm); mm_init_owner(mm, p); diff --git a/mm/mmap.c b/mm/mmap.c index f81311173b4d..fbad7b091090 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1878,15 +1878,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, } #endif -void arch_unmap_area(struct mm_struct *mm, unsigned long addr) -{ - /* - * Is this a new hole at the lowest possible address? - */ - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) - mm->free_area_cache = addr; -} - /* * This mmap-allocator allocates new areas top-down from below the * stack's low limit (the base): @@ -1943,19 +1934,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, } #endif -void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) -{ - /* - * Is this a new hole at the highest possible address? - */ - if (addr > mm->free_area_cache) - mm->free_area_cache = addr; - - /* dont allow allocations above current base */ - if (mm->free_area_cache > mm->mmap_base) - mm->free_area_cache = mm->mmap_base; -} - unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) @@ -2376,7 +2354,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, { struct vm_area_struct **insertion_point; struct vm_area_struct *tail_vma = NULL; - unsigned long addr; insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; @@ -2393,11 +2370,6 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, } else mm->highest_vm_end = prev ? prev->vm_end : 0; tail_vma->vm_next = NULL; - if (mm->unmap_area == arch_unmap_area) - addr = prev ? prev->vm_end : mm->mmap_base; - else - addr = vma ? vma->vm_start : mm->mmap_base; - mm->unmap_area(mm, addr); mm->mmap_cache = NULL; /* Kill the cache. */ } diff --git a/mm/nommu.c b/mm/nommu.c index e44e6e0a125c..ecd1f158548e 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1871,10 +1871,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr, return -ENOMEM; } -void arch_unmap_area(struct mm_struct *mm, unsigned long addr) -{ -} - void unmap_mapping_range(struct address_space *mapping, loff_t const holebegin, loff_t const holelen, int even_cows) diff --git a/mm/util.c b/mm/util.c index ab1424dbe2e6..7441c41d00f6 100644 --- a/mm/util.c +++ b/mm/util.c @@ -295,7 +295,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } #endif -- cgit v1.2.3 From 2066aadd53c563445039d6490b685783816270ec Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Mon, 17 Jun 2013 15:43:14 -0400 Subject: sparc: delete __cpuinit/__CPUINIT usage from all users The __cpuinit type of throwaway sections might have made sense some time ago when RAM was more constrained, but now the savings do not offset the cost and complications. For example, the fix in commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time") is a good example of the nasty type of bugs that can be created with improper use of the various __init prefixes. After a discussion on LKML[1] it was decided that cpuinit should go the way of devinit and be phased out. Once all the users are gone, we can then finally remove the macros themselves from linux/init.h. Note that some harmless section mismatch warnings may result, since notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c) are flagged as __cpuinit -- so if we remove the __cpuinit from arch specific callers, we will also get section mismatch warnings. As an intermediate step, we intend to turn the linux/init.h cpuinit content into no-ops as early as possible, since that will get rid of these warnings. In any case, they are temporary and harmless. This removes all the arch/sparc uses of the __cpuinit macros from C files and removes __CPUINIT from assembly files. Note that even though arch/sparc/kernel/trampoline_64.S has instances of ".previous" in it, they are all paired off against explicit ".section" directives, and not implicitly paired with __CPUINIT (unlike mips and arm were). [1] https://lkml.org/lkml/2013/5/20/589 Cc: "David S. Miller" Cc: sparclinux@vger.kernel.org Signed-off-by: Paul Gortmaker --- arch/sparc/kernel/ds.c | 11 ++++------- arch/sparc/kernel/entry.h | 2 +- arch/sparc/kernel/hvtramp.S | 1 - arch/sparc/kernel/irq_64.c | 5 +++-- arch/sparc/kernel/leon_smp.c | 10 +++++----- arch/sparc/kernel/mdesc.c | 34 +++++++++++++++++----------------- arch/sparc/kernel/smp_32.c | 20 ++++++++++---------- arch/sparc/kernel/smp_64.c | 9 +++++---- arch/sparc/kernel/sun4d_smp.c | 6 +++--- arch/sparc/kernel/sun4m_smp.c | 6 +++--- arch/sparc/kernel/sysfs.c | 4 ++-- arch/sparc/kernel/trampoline_32.S | 3 --- arch/sparc/kernel/trampoline_64.S | 2 -- arch/sparc/mm/init_64.c | 2 +- arch/sparc/mm/srmmu.c | 12 ++++++------ 15 files changed, 60 insertions(+), 67 deletions(-) (limited to 'arch/sparc/kernel') diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 11d460f6f9cc..62d6b153ffa2 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -528,10 +528,8 @@ static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus, } } -static int __cpuinit dr_cpu_configure(struct ds_info *dp, - struct ds_cap_state *cp, - u64 req_num, - cpumask_t *mask) +static int dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp, + u64 req_num, cpumask_t *mask) { struct ds_data *resp; int resp_len, ncpus, cpu; @@ -627,9 +625,8 @@ static int dr_cpu_unconfigure(struct ds_info *dp, return 0; } -static void __cpuinit dr_cpu_data(struct ds_info *dp, - struct ds_cap_state *cp, - void *buf, int len) +static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, + int len) { struct ds_data *data = buf; struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index cc3c5cb47cda..9c179fbfb219 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h @@ -250,7 +250,7 @@ extern struct ino_bucket *ivector_table; extern unsigned long ivector_table_pa; extern void init_irqwork_curcpu(void); -extern void __cpuinit sun4v_register_mondo_queues(int this_cpu); +extern void sun4v_register_mondo_queues(int this_cpu); #endif /* CONFIG_SPARC32 */ #endif /* _ENTRY_H */ diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S index 605c960b2fa6..4eb1a5a1d544 100644 --- a/arch/sparc/kernel/hvtramp.S +++ b/arch/sparc/kernel/hvtramp.S @@ -16,7 +16,6 @@ #include #include - __CPUINIT .align 8 .globl hv_cpu_startup, hv_cpu_startup_end diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 9bcbbe2c4e7e..d4840cec2c55 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -835,7 +835,8 @@ void notrace init_irqwork_curcpu(void) * Therefore you cannot make any OBP calls, not even prom_printf, * from these two routines. */ -static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) +static void notrace register_one_mondo(unsigned long paddr, unsigned long type, + unsigned long qmask) { unsigned long num_entries = (qmask + 1) / 64; unsigned long status; @@ -848,7 +849,7 @@ static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned l } } -void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu) +void notrace sun4v_register_mondo_queues(int this_cpu) { struct trap_per_cpu *tb = &trap_block[this_cpu]; diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index d7aa524b7283..6edf955f987c 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c @@ -54,7 +54,7 @@ extern ctxd_t *srmmu_ctx_table_phys; static int smp_processors_ready; extern volatile unsigned long cpu_callin_map[NR_CPUS]; extern cpumask_t smp_commenced_mask; -void __cpuinit leon_configure_cache_smp(void); +void leon_configure_cache_smp(void); static void leon_ipi_init(void); /* IRQ number of LEON IPIs */ @@ -69,12 +69,12 @@ static inline unsigned long do_swap(volatile unsigned long *ptr, return val; } -void __cpuinit leon_cpu_pre_starting(void *arg) +void leon_cpu_pre_starting(void *arg) { leon_configure_cache_smp(); } -void __cpuinit leon_cpu_pre_online(void *arg) +void leon_cpu_pre_online(void *arg) { int cpuid = hard_smp_processor_id(); @@ -106,7 +106,7 @@ void __cpuinit leon_cpu_pre_online(void *arg) extern struct linux_prom_registers smp_penguin_ctable; -void __cpuinit leon_configure_cache_smp(void) +void leon_configure_cache_smp(void) { unsigned long cfg = sparc_leon3_get_dcachecfg(); int me = smp_processor_id(); @@ -186,7 +186,7 @@ void __init leon_boot_cpus(void) } -int __cpuinit leon_boot_one_cpu(int i, struct task_struct *idle) +int leon_boot_one_cpu(int i, struct task_struct *idle) { int timeout; diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 831c001604e8..b90bf23e3aab 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -571,9 +571,7 @@ static void __init report_platform_properties(void) mdesc_release(hp); } -static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c, - struct mdesc_handle *hp, - u64 mp) +static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp) { const u64 *level = mdesc_get_property(hp, mp, "level", NULL); const u64 *size = mdesc_get_property(hp, mp, "size", NULL); @@ -616,7 +614,7 @@ static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c, } } -static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) +static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) { u64 a; @@ -649,7 +647,7 @@ static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id } } -static void __cpuinit set_core_ids(struct mdesc_handle *hp) +static void set_core_ids(struct mdesc_handle *hp) { int idx; u64 mp; @@ -674,7 +672,7 @@ static void __cpuinit set_core_ids(struct mdesc_handle *hp) } } -static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) +static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) { u64 a; @@ -693,7 +691,7 @@ static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id } } -static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) +static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) { int idx; u64 mp; @@ -714,14 +712,14 @@ static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_u } } -static void __cpuinit set_proc_ids(struct mdesc_handle *hp) +static void set_proc_ids(struct mdesc_handle *hp) { __set_proc_ids(hp, "exec_unit"); __set_proc_ids(hp, "exec-unit"); } -static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask, - unsigned long def, unsigned long max) +static void get_one_mondo_bits(const u64 *p, unsigned int *mask, + unsigned long def, unsigned long max) { u64 val; @@ -742,8 +740,8 @@ use_default: *mask = ((1U << def) * 64U) - 1U; } -static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp, - struct trap_per_cpu *tb) +static void get_mondo_data(struct mdesc_handle *hp, u64 mp, + struct trap_per_cpu *tb) { static int printed; const u64 *val; @@ -769,7 +767,7 @@ static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp, } } -static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask) +static void *mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask) { struct mdesc_handle *hp = mdesc_grab(); void *ret = NULL; @@ -799,7 +797,8 @@ out: return ret; } -static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) +static void *record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, + void *arg) { ncpus_probed++; #ifdef CONFIG_SMP @@ -808,7 +807,7 @@ static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpui return NULL; } -void __cpuinit mdesc_populate_present_mask(cpumask_t *mask) +void mdesc_populate_present_mask(cpumask_t *mask) { if (tlb_type != hypervisor) return; @@ -841,7 +840,8 @@ void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask) mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask); } -static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) +static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, + void *arg) { const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); struct trap_per_cpu *tb; @@ -890,7 +890,7 @@ static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpu return NULL; } -void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask) +void mdesc_fill_in_cpu_data(cpumask_t *mask) { struct mdesc_handle *hp; diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index e3f2b81c23f1..a102bfba6ea8 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -39,7 +39,7 @@ #include "kernel.h" #include "irq.h" -volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; +volatile unsigned long cpu_callin_map[NR_CPUS] = {0,}; cpumask_t smp_commenced_mask = CPU_MASK_NONE; @@ -53,7 +53,7 @@ const struct sparc32_ipi_ops *sparc32_ipi_ops; * instruction which is much better... */ -void __cpuinit smp_store_cpu_info(int id) +void smp_store_cpu_info(int id) { int cpu_node; int mid; @@ -120,7 +120,7 @@ void cpu_panic(void) panic("SMP bolixed\n"); } -struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 }; +struct linux_prom_registers smp_penguin_ctable = { 0 }; void smp_send_reschedule(int cpu) { @@ -259,10 +259,10 @@ void __init smp_prepare_boot_cpu(void) set_cpu_possible(cpuid, true); } -int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - extern int __cpuinit smp4m_boot_one_cpu(int, struct task_struct *); - extern int __cpuinit smp4d_boot_one_cpu(int, struct task_struct *); + extern int smp4m_boot_one_cpu(int, struct task_struct *); + extern int smp4d_boot_one_cpu(int, struct task_struct *); int ret=0; switch(sparc_cpu_model) { @@ -297,7 +297,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) return ret; } -void __cpuinit arch_cpu_pre_starting(void *arg) +void arch_cpu_pre_starting(void *arg) { local_ops->cache_all(); local_ops->tlb_all(); @@ -317,7 +317,7 @@ void __cpuinit arch_cpu_pre_starting(void *arg) } } -void __cpuinit arch_cpu_pre_online(void *arg) +void arch_cpu_pre_online(void *arg) { unsigned int cpuid = hard_smp_processor_id(); @@ -344,7 +344,7 @@ void __cpuinit arch_cpu_pre_online(void *arg) } } -void __cpuinit sparc_start_secondary(void *arg) +void sparc_start_secondary(void *arg) { unsigned int cpu; @@ -375,7 +375,7 @@ void __cpuinit sparc_start_secondary(void *arg) BUG(); } -void __cpuinit smp_callin(void) +void smp_callin(void) { sparc_start_secondary(NULL); } diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 77539eda928c..e142545244f2 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -87,7 +87,7 @@ extern void setup_sparc64_timer(void); static volatile unsigned long callin_flag = 0; -void __cpuinit smp_callin(void) +void smp_callin(void) { int cpuid = hard_smp_processor_id(); @@ -281,7 +281,8 @@ static unsigned long kimage_addr_to_ra(void *p) return kern_base + (val - KERNBASE); } -static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp) +static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, + void **descrp) { extern unsigned long sparc64_ttable_tl0; extern unsigned long kern_locked_tte_data; @@ -342,7 +343,7 @@ extern unsigned long sparc64_cpu_startup; */ static struct thread_info *cpu_new_thread = NULL; -static int __cpuinit smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) +static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) { unsigned long entry = (unsigned long)(&sparc64_cpu_startup); @@ -1266,7 +1267,7 @@ void smp_fill_in_sib_core_maps(void) } } -int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { int ret = smp_boot_one_cpu(cpu, tidle); diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index c9eb82f23d92..d5c319553fd0 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -50,7 +50,7 @@ static inline void show_leds(int cpuid) "i" (ASI_M_CTL)); } -void __cpuinit sun4d_cpu_pre_starting(void *arg) +void sun4d_cpu_pre_starting(void *arg) { int cpuid = hard_smp_processor_id(); @@ -62,7 +62,7 @@ void __cpuinit sun4d_cpu_pre_starting(void *arg) cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000); } -void __cpuinit sun4d_cpu_pre_online(void *arg) +void sun4d_cpu_pre_online(void *arg) { unsigned long flags; int cpuid; @@ -118,7 +118,7 @@ void __init smp4d_boot_cpus(void) local_ops->cache_all(); } -int __cpuinit smp4d_boot_one_cpu(int i, struct task_struct *idle) +int smp4d_boot_one_cpu(int i, struct task_struct *idle) { unsigned long *entry = &sun4d_cpu_startup; int timeout; diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 8a65f158153d..d3408e72d20c 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -34,11 +34,11 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val) return val; } -void __cpuinit sun4m_cpu_pre_starting(void *arg) +void sun4m_cpu_pre_starting(void *arg) { } -void __cpuinit sun4m_cpu_pre_online(void *arg) +void sun4m_cpu_pre_online(void *arg) { int cpuid = hard_smp_processor_id(); @@ -75,7 +75,7 @@ void __init smp4m_boot_cpus(void) local_ops->cache_all(); } -int __cpuinit smp4m_boot_one_cpu(int i, struct task_struct *idle) +int smp4m_boot_one_cpu(int i, struct task_struct *idle) { unsigned long *entry = &sun4m_cpu_startup; int timeout; diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index 654e8aad3bbe..c21c673e5f7c 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c @@ -246,7 +246,7 @@ static void unregister_cpu_online(unsigned int cpu) } #endif -static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, +static int sysfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; @@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata sysfs_cpu_nb = { +static struct notifier_block sysfs_cpu_nb = { .notifier_call = sysfs_cpu_notify, }; diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S index 6cdb08cdabf0..76dcbd3c988a 100644 --- a/arch/sparc/kernel/trampoline_32.S +++ b/arch/sparc/kernel/trampoline_32.S @@ -18,7 +18,6 @@ .globl sun4m_cpu_startup .globl sun4d_cpu_startup - __CPUINIT .align 4 /* When we start up a cpu for the first time it enters this routine. @@ -94,7 +93,6 @@ smp_panic: /* CPUID in bootbus can be found at PA 0xff0140000 */ #define SUN4D_BOOTBUS_CPUID 0xf0140000 - __CPUINIT .align 4 sun4d_cpu_startup: @@ -146,7 +144,6 @@ sun4d_cpu_startup: b,a smp_panic - __CPUINIT .align 4 .global leon_smp_cpu_startup, smp_penguin_ctable diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S index 2e973a26fbda..e0b1e13a0736 100644 --- a/arch/sparc/kernel/trampoline_64.S +++ b/arch/sparc/kernel/trampoline_64.S @@ -32,13 +32,11 @@ itlb_load: dtlb_load: .asciz "SUNW,dtlb-load" - /* XXX __cpuinit this thing XXX */ #define TRAMP_STACK_SIZE 1024 .align 16 tramp_stack: .skip TRAMP_STACK_SIZE - __CPUINIT .align 8 .globl sparc64_cpu_startup, sparc64_cpu_startup_end sparc64_cpu_startup: diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index a9c42a7ffb6a..ed82edad1a39 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1694,7 +1694,7 @@ static void __init sun4v_ktsb_init(void) #endif } -void __cpuinit sun4v_ktsb_register(void) +void sun4v_ktsb_register(void) { unsigned long pa, ret; diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 036c2797dece..5d721df48a72 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -858,7 +858,7 @@ static void __init map_kernel(void) } } -void (*poke_srmmu)(void) __cpuinitdata = NULL; +void (*poke_srmmu)(void) = NULL; extern unsigned long bootmem_init(unsigned long *pages_avail); @@ -1055,7 +1055,7 @@ static void __init init_vac_layout(void) (int)vac_cache_size, (int)vac_line_size); } -static void __cpuinit poke_hypersparc(void) +static void poke_hypersparc(void) { volatile unsigned long clear; unsigned long mreg = srmmu_get_mmureg(); @@ -1107,7 +1107,7 @@ static void __init init_hypersparc(void) hypersparc_setup_blockops(); } -static void __cpuinit poke_swift(void) +static void poke_swift(void) { unsigned long mreg; @@ -1287,7 +1287,7 @@ static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long } -static void __cpuinit poke_turbosparc(void) +static void poke_turbosparc(void) { unsigned long mreg = srmmu_get_mmureg(); unsigned long ccreg; @@ -1350,7 +1350,7 @@ static void __init init_turbosparc(void) poke_srmmu = poke_turbosparc; } -static void __cpuinit poke_tsunami(void) +static void poke_tsunami(void) { unsigned long mreg = srmmu_get_mmureg(); @@ -1391,7 +1391,7 @@ static void __init init_tsunami(void) tsunami_setup_blockops(); } -static void __cpuinit poke_viking(void) +static void poke_viking(void) { unsigned long mreg = srmmu_get_mmureg(); static int smp_catch; -- cgit v1.2.3