From af5ca3f4ec5cc4432a42a73b050dd8898ce8fd00 Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Thu, 20 Dec 2007 02:09:39 +0100 Subject: Driver core: change sysdev classes to use dynamic kobject names All kobjects require a dynamically allocated name now. We no longer need to keep track if the name is statically assigned, we can just unconditionally free() all kobject names on cleanup. Signed-off-by: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/i8259.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 471013577108..197d7977de35 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -238,7 +238,7 @@ static int i8259A_shutdown(struct sys_device *dev) } static struct sysdev_class i8259_sysdev_class = { - set_kset_name("i8259"), + .name = "i8259", .resume = i8259A_resume, .shutdown = i8259A_shutdown, }; -- cgit v1.2.3 From 86ef5c9a8edd78e6bf92879f32329d89b2d55b5a Mon Sep 17 00:00:00 2001 From: Gautham R Shenoy Date: Fri, 25 Jan 2008 21:08:02 +0100 Subject: cpu-hotplug: replace lock_cpu_hotplug() with get_online_cpus() Replace all lock_cpu_hotplug/unlock_cpu_hotplug from the kernel and use get_online_cpus and put_online_cpus instead as it highlights the refcount semantics in these operations. The new API guarantees protection against the cpu-hotplug operation, but it doesn't guarantee serialized access to any of the local data structures. Hence the changes needs to be reviewed. In case of pseries_add_processor/pseries_remove_processor, use cpu_maps_update_begin()/cpu_maps_update_done() as we're modifying the cpu_present_map there. Signed-off-by: Gautham R Shenoy Signed-off-by: Ingo Molnar --- Documentation/cpu-hotplug.txt | 11 ++++++----- arch/mips/kernel/mips-mt-fpaff.c | 10 +++++----- arch/powerpc/platforms/pseries/hotplug-cpu.c | 8 ++++---- arch/powerpc/platforms/pseries/rtasd.c | 8 ++++---- arch/x86/kernel/cpu/mtrr/main.c | 8 ++++---- arch/x86/kernel/microcode.c | 16 ++++++++-------- drivers/lguest/x86/core.c | 8 ++++---- drivers/s390/char/sclp_config.c | 4 ++-- include/linux/cpu.h | 8 ++++---- kernel/cpu.c | 10 +++++----- kernel/cpuset.c | 14 +++++++------- kernel/rcutorture.c | 6 +++--- kernel/sched.c | 4 ++-- kernel/stop_machine.c | 4 ++-- net/core/flow.c | 4 ++-- 15 files changed, 62 insertions(+), 61 deletions(-) (limited to 'arch/mips/kernel') diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt index a741f658a3c9..fb94f5a71b68 100644 --- a/Documentation/cpu-hotplug.txt +++ b/Documentation/cpu-hotplug.txt @@ -109,12 +109,13 @@ Never use anything other than cpumask_t to represent bitmap of CPUs. for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask. #include - lock_cpu_hotplug() and unlock_cpu_hotplug(): + get_online_cpus() and put_online_cpus(): -The above calls are used to inhibit cpu hotplug operations. While holding the -cpucontrol mutex, cpu_online_map will not change. If you merely need to avoid -cpus going away, you could also use preempt_disable() and preempt_enable() -for those sections. Just remember the critical section cannot call any +The above calls are used to inhibit cpu hotplug operations. While the +cpu_hotplug.refcount is non zero, the cpu_online_map will not change. +If you merely need to avoid cpus going away, you could also use +preempt_disable() and preempt_enable() for those sections. +Just remember the critical section cannot call any function that can sleep or schedule this process away. The preempt_disable() will work as long as stop_machine_run() is used to take a cpu down. diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 892665bb12b1..bb4f00c0cbe9 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -58,13 +58,13 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) return -EFAULT; - lock_cpu_hotplug(); + get_online_cpus(); read_lock(&tasklist_lock); p = find_process_by_pid(pid); if (!p) { read_unlock(&tasklist_lock); - unlock_cpu_hotplug(); + put_online_cpus(); return -ESRCH; } @@ -106,7 +106,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, out_unlock: put_task_struct(p); - unlock_cpu_hotplug(); + put_online_cpus(); return retval; } @@ -125,7 +125,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, if (len < real_len) return -EINVAL; - lock_cpu_hotplug(); + get_online_cpus(); read_lock(&tasklist_lock); retval = -ESRCH; @@ -140,7 +140,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, out_unlock: read_unlock(&tasklist_lock); - unlock_cpu_hotplug(); + put_online_cpus(); if (retval) return retval; if (copy_to_user(user_mask_ptr, &mask, real_len)) diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 412e6b42986f..c4ad54e0f288 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c @@ -153,7 +153,7 @@ static int pseries_add_processor(struct device_node *np) for (i = 0; i < nthreads; i++) cpu_set(i, tmp); - lock_cpu_hotplug(); + cpu_maps_update_begin(); BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map)); @@ -190,7 +190,7 @@ static int pseries_add_processor(struct device_node *np) } err = 0; out_unlock: - unlock_cpu_hotplug(); + cpu_maps_update_done(); return err; } @@ -211,7 +211,7 @@ static void pseries_remove_processor(struct device_node *np) nthreads = len / sizeof(u32); - lock_cpu_hotplug(); + cpu_maps_update_begin(); for (i = 0; i < nthreads; i++) { for_each_present_cpu(cpu) { if (get_hard_smp_processor_id(cpu) != intserv[i]) @@ -225,7 +225,7 @@ static void pseries_remove_processor(struct device_node *np) printk(KERN_WARNING "Could not find cpu to remove " "with physical id 0x%x\n", intserv[i]); } - unlock_cpu_hotplug(); + cpu_maps_update_done(); } static int pseries_smp_notifier(struct notifier_block *nb, diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c index 73401c820110..e3078ce41518 100644 --- a/arch/powerpc/platforms/pseries/rtasd.c +++ b/arch/powerpc/platforms/pseries/rtasd.c @@ -382,7 +382,7 @@ static void do_event_scan_all_cpus(long delay) { int cpu; - lock_cpu_hotplug(); + get_online_cpus(); cpu = first_cpu(cpu_online_map); for (;;) { set_cpus_allowed(current, cpumask_of_cpu(cpu)); @@ -390,15 +390,15 @@ static void do_event_scan_all_cpus(long delay) set_cpus_allowed(current, CPU_MASK_ALL); /* Drop hotplug lock, and sleep for the specified delay */ - unlock_cpu_hotplug(); + put_online_cpus(); msleep_interruptible(delay); - lock_cpu_hotplug(); + get_online_cpus(); cpu = next_cpu(cpu, cpu_online_map); if (cpu == NR_CPUS) break; } - unlock_cpu_hotplug(); + put_online_cpus(); } static int rtasd(void *unused) diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 3b20613325dc..beb45c9c0835 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c @@ -349,7 +349,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, replace = -1; /* No CPU hotplug when we change MTRR entries */ - lock_cpu_hotplug(); + get_online_cpus(); /* Search for existing MTRR */ mutex_lock(&mtrr_mutex); for (i = 0; i < num_var_ranges; ++i) { @@ -405,7 +405,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, error = i; out: mutex_unlock(&mtrr_mutex); - unlock_cpu_hotplug(); + put_online_cpus(); return error; } @@ -495,7 +495,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) max = num_var_ranges; /* No CPU hotplug when we change MTRR entries */ - lock_cpu_hotplug(); + get_online_cpus(); mutex_lock(&mtrr_mutex); if (reg < 0) { /* Search for existing MTRR */ @@ -536,7 +536,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) error = reg; out: mutex_unlock(&mtrr_mutex); - unlock_cpu_hotplug(); + put_online_cpus(); return error; } /** diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c index 09c315214a5e..40cfd5488719 100644 --- a/arch/x86/kernel/microcode.c +++ b/arch/x86/kernel/microcode.c @@ -436,7 +436,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ return -EINVAL; } - lock_cpu_hotplug(); + get_online_cpus(); mutex_lock(µcode_mutex); user_buffer = (void __user *) buf; @@ -447,7 +447,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ ret = (ssize_t)len; mutex_unlock(µcode_mutex); - unlock_cpu_hotplug(); + put_online_cpus(); return ret; } @@ -658,14 +658,14 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz) old = current->cpus_allowed; - lock_cpu_hotplug(); + get_online_cpus(); set_cpus_allowed(current, cpumask_of_cpu(cpu)); mutex_lock(µcode_mutex); if (uci->valid) err = cpu_request_microcode(cpu); mutex_unlock(µcode_mutex); - unlock_cpu_hotplug(); + put_online_cpus(); set_cpus_allowed(current, old); } if (err) @@ -817,9 +817,9 @@ static int __init microcode_init (void) return PTR_ERR(microcode_pdev); } - lock_cpu_hotplug(); + get_online_cpus(); error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver); - unlock_cpu_hotplug(); + put_online_cpus(); if (error) { microcode_dev_exit(); platform_device_unregister(microcode_pdev); @@ -839,9 +839,9 @@ static void __exit microcode_exit (void) unregister_hotcpu_notifier(&mc_cpu_notifier); - lock_cpu_hotplug(); + get_online_cpus(); sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver); - unlock_cpu_hotplug(); + put_online_cpus(); platform_device_unregister(microcode_pdev); } diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 482aec2a9631..96d0fd07c57d 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -459,7 +459,7 @@ void __init lguest_arch_host_init(void) /* We don't need the complexity of CPUs coming and going while we're * doing this. */ - lock_cpu_hotplug(); + get_online_cpus(); if (cpu_has_pge) { /* We have a broader idea of "global". */ /* Remember that this was originally set (for cleanup). */ cpu_had_pge = 1; @@ -469,20 +469,20 @@ void __init lguest_arch_host_init(void) /* Turn off the feature in the global feature set. */ clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); } - unlock_cpu_hotplug(); + put_online_cpus(); }; /*:*/ void __exit lguest_arch_host_fini(void) { /* If we had PGE before we started, turn it back on now. */ - lock_cpu_hotplug(); + get_online_cpus(); if (cpu_had_pge) { set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); /* adjust_pge's argument "1" means set PGE. */ on_each_cpu(adjust_pge, (void *)1, 0, 1); } - unlock_cpu_hotplug(); + put_online_cpus(); } diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 5322e5e54a98..9dc77f14fa52 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c @@ -29,12 +29,12 @@ static void sclp_cpu_capability_notify(struct work_struct *work) struct sys_device *sysdev; printk(KERN_WARNING TAG "cpu capability changed.\n"); - lock_cpu_hotplug(); + get_online_cpus(); for_each_online_cpu(cpu) { sysdev = get_cpu_sysdev(cpu); kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); } - unlock_cpu_hotplug(); + put_online_cpus(); } static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index a40247e4d462..3a3ff1c5cbef 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -100,8 +100,8 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) mutex_unlock(cpu_hp_mutex); } -extern void lock_cpu_hotplug(void); -extern void unlock_cpu_hotplug(void); +extern void get_online_cpus(void); +extern void put_online_cpus(void); #define hotcpu_notifier(fn, pri) { \ static struct notifier_block fn##_nb = \ { .notifier_call = fn, .priority = pri }; \ @@ -118,8 +118,8 @@ static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) { } -#define lock_cpu_hotplug() do { } while (0) -#define unlock_cpu_hotplug() do { } while (0) +#define get_online_cpus() do { } while (0) +#define put_online_cpus() do { } while (0) #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) /* These aren't inline functions due to a GCC bug. */ #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) diff --git a/kernel/cpu.c b/kernel/cpu.c index 656dc3fcbbae..b0c4152995f8 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -48,7 +48,7 @@ void __init cpu_hotplug_init(void) #ifdef CONFIG_HOTPLUG_CPU -void lock_cpu_hotplug(void) +void get_online_cpus(void) { might_sleep(); if (cpu_hotplug.active_writer == current) @@ -58,9 +58,9 @@ void lock_cpu_hotplug(void) mutex_unlock(&cpu_hotplug.lock); } -EXPORT_SYMBOL_GPL(lock_cpu_hotplug); +EXPORT_SYMBOL_GPL(get_online_cpus); -void unlock_cpu_hotplug(void) +void put_online_cpus(void) { if (cpu_hotplug.active_writer == current) return; @@ -73,7 +73,7 @@ void unlock_cpu_hotplug(void) mutex_unlock(&cpu_hotplug.lock); } -EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); +EXPORT_SYMBOL_GPL(put_online_cpus); #endif /* CONFIG_HOTPLUG_CPU */ @@ -110,7 +110,7 @@ void cpu_maps_update_done(void) * non zero and goes to sleep again. * * However, this is very difficult to achieve in practice since - * lock_cpu_hotplug() not an api which is called all that often. + * get_online_cpus() not an api which is called all that often. * */ static void cpu_hotplug_begin(void) diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 50f5dc463688..cfaf6419d817 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -537,10 +537,10 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b) * * Call with cgroup_mutex held. May take callback_mutex during * call due to the kfifo_alloc() and kmalloc() calls. May nest - * a call to the lock_cpu_hotplug()/unlock_cpu_hotplug() pair. + * a call to the get_online_cpus()/put_online_cpus() pair. * Must not be called holding callback_mutex, because we must not - * call lock_cpu_hotplug() while holding callback_mutex. Elsewhere - * the kernel nests callback_mutex inside lock_cpu_hotplug() calls. + * call get_online_cpus() while holding callback_mutex. Elsewhere + * the kernel nests callback_mutex inside get_online_cpus() calls. * So the reverse nesting would risk an ABBA deadlock. * * The three key local variables below are: @@ -691,9 +691,9 @@ restart: rebuild: /* Have scheduler rebuild sched domains */ - lock_cpu_hotplug(); + get_online_cpus(); partition_sched_domains(ndoms, doms); - unlock_cpu_hotplug(); + put_online_cpus(); done: if (q && !IS_ERR(q)) @@ -1617,10 +1617,10 @@ static struct cgroup_subsys_state *cpuset_create( * * If the cpuset being removed has its flag 'sched_load_balance' * enabled, then simulate turning sched_load_balance off, which - * will call rebuild_sched_domains(). The lock_cpu_hotplug() + * will call rebuild_sched_domains(). The get_online_cpus() * call in rebuild_sched_domains() must not be made while holding * callback_mutex. Elsewhere the kernel nests callback_mutex inside - * lock_cpu_hotplug() calls. So the reverse nesting would risk an + * get_online_cpus() calls. So the reverse nesting would risk an * ABBA deadlock. */ diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index c3e165c2318f..fd599829e72a 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -726,11 +726,11 @@ static void rcu_torture_shuffle_tasks(void) cpumask_t tmp_mask = CPU_MASK_ALL; int i; - lock_cpu_hotplug(); + get_online_cpus(); /* No point in shuffling if there is only one online CPU (ex: UP) */ if (num_online_cpus() == 1) { - unlock_cpu_hotplug(); + put_online_cpus(); return; } @@ -762,7 +762,7 @@ static void rcu_torture_shuffle_tasks(void) else rcu_idle_cpu--; - unlock_cpu_hotplug(); + put_online_cpus(); } /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the diff --git a/kernel/sched.c b/kernel/sched.c index 86e55a9c2de6..672aa68bfeac 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -7152,7 +7152,7 @@ static int load_balance_monitor(void *unused) int i, cpu, balanced = 1; /* Prevent cpus going down or coming up */ - lock_cpu_hotplug(); + get_online_cpus(); /* lockout changes to doms_cur[] array */ lock_doms_cur(); /* @@ -7186,7 +7186,7 @@ static int load_balance_monitor(void *unused) rcu_read_unlock(); unlock_doms_cur(); - unlock_cpu_hotplug(); + put_online_cpus(); if (!balanced) timeout = sysctl_sched_min_bal_int_shares; diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 319821ef78af..51b5ee53571a 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -203,13 +203,13 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) int ret; /* No CPUs can come up or down during this. */ - lock_cpu_hotplug(); + get_online_cpus(); p = __stop_machine_run(fn, data, cpu); if (!IS_ERR(p)) ret = kthread_stop(p); else ret = PTR_ERR(p); - unlock_cpu_hotplug(); + put_online_cpus(); return ret; } diff --git a/net/core/flow.c b/net/core/flow.c index 3ed2b4b1d6d4..6489f4e24ecf 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -293,7 +293,7 @@ void flow_cache_flush(void) static DEFINE_MUTEX(flow_flush_sem); /* Don't want cpus going down or up during this. */ - lock_cpu_hotplug(); + get_online_cpus(); mutex_lock(&flow_flush_sem); atomic_set(&info.cpuleft, num_online_cpus()); init_completion(&info.completion); @@ -305,7 +305,7 @@ void flow_cache_flush(void) wait_for_completion(&info.completion); mutex_unlock(&flow_flush_sem); - unlock_cpu_hotplug(); + put_online_cpus(); } static void __devinit flow_cache_cpu_prepare(int cpu) -- cgit v1.2.3 From 01ba2bdc6b639764745ff678caf3fb9e5bcd745a Mon Sep 17 00:00:00 2001 From: Sam Ravnborg Date: Sun, 20 Jan 2008 14:15:03 +0100 Subject: all archs: consolidate init and exit sections in vmlinux.lds.h This patch consolidate all definitions of .init.text, .init.data and .exit.text, .exit.data section definitions in the generic vmlinux.lds.h. This is a preparational patch - alone it does not buy us much good. Signed-off-by: Sam Ravnborg --- arch/alpha/kernel/vmlinux.lds.S | 8 ++++---- arch/arm/kernel/vmlinux.lds.S | 10 +++++----- arch/avr32/kernel/vmlinux.lds.S | 8 ++++---- arch/blackfin/kernel/vmlinux.lds.S | 8 ++++---- arch/cris/arch-v10/vmlinux.lds.S | 8 ++++---- arch/cris/arch-v32/vmlinux.lds.S | 8 ++++---- arch/frv/kernel/vmlinux.lds.S | 14 +++++++------- arch/h8300/kernel/vmlinux.lds.S | 8 ++++---- arch/ia64/kernel/vmlinux.lds.S | 8 ++++---- arch/m32r/kernel/vmlinux.lds.S | 12 ++++++------ arch/m68k/kernel/vmlinux-std.lds | 8 ++++---- arch/m68k/kernel/vmlinux-sun3.lds | 8 ++++---- arch/m68knommu/kernel/vmlinux.lds.S | 8 ++++---- arch/mips/kernel/vmlinux.lds.S | 8 ++++---- arch/parisc/kernel/vmlinux.lds.S | 8 ++++---- arch/powerpc/kernel/vmlinux.lds.S | 10 ++++++---- arch/ppc/kernel/vmlinux.lds.S | 8 ++++---- arch/s390/kernel/vmlinux.lds.S | 8 ++++---- arch/sh/kernel/vmlinux_32.lds.S | 8 ++++---- arch/sh/kernel/vmlinux_64.lds.S | 8 ++++---- arch/sparc/kernel/vmlinux.lds.S | 8 ++++---- arch/sparc64/kernel/vmlinux.lds.S | 8 ++++---- arch/um/kernel/dyn.lds.S | 4 ++-- arch/um/kernel/uml.lds.S | 4 ++-- arch/v850/kernel/vmlinux.lds.S | 10 +++++----- arch/x86/kernel/vmlinux_32.lds.S | 14 ++++++++++---- arch/x86/kernel/vmlinux_64.lds.S | 19 +++++++++++++------ arch/xtensa/kernel/vmlinux.lds.S | 9 +++++---- include/asm-generic/vmlinux.lds.h | 7 +++++++ 29 files changed, 140 insertions(+), 117 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/alpha/kernel/vmlinux.lds.S b/arch/alpha/kernel/vmlinux.lds.S index 55c05b511f4c..f13249be17c5 100644 --- a/arch/alpha/kernel/vmlinux.lds.S +++ b/arch/alpha/kernel/vmlinux.lds.S @@ -46,11 +46,11 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } .init.data : { - *(.init.data) + INIT_DATA } . = ALIGN(16); @@ -136,8 +136,8 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 30f732c7fdb5..4898bdcfe7dd 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -30,7 +30,7 @@ SECTIONS } .init : { /* Init code and data */ - *(.init.text) + INIT_TEXT _einittext = .; __proc_info_begin = .; *(.proc.info.init) @@ -70,15 +70,15 @@ SECTIONS __per_cpu_end = .; #ifndef CONFIG_XIP_KERNEL __init_begin = _stext; - *(.init.data) + INIT_DATA . = ALIGN(4096); __init_end = .; #endif } /DISCARD/ : { /* Exit code and data */ - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) #ifndef CONFIG_MMU *(.fixup) @@ -130,7 +130,7 @@ SECTIONS #ifdef CONFIG_XIP_KERNEL . = ALIGN(4096); __init_begin = .; - *(.init.data) + INIT_DATA . = ALIGN(4096); __init_end = .; #endif diff --git a/arch/avr32/kernel/vmlinux.lds.S b/arch/avr32/kernel/vmlinux.lds.S index 11f08e35a2eb..481cfd40c053 100644 --- a/arch/avr32/kernel/vmlinux.lds.S +++ b/arch/avr32/kernel/vmlinux.lds.S @@ -27,19 +27,19 @@ SECTIONS __init_begin = .; _sinittext = .; *(.text.reset) - *(.init.text) + INIT_TEXT /* * .exit.text is discarded at runtime, not * link time, to deal with references from * __bug_table */ - *(.exit.text) + EXIT_TEXT _einittext = .; . = ALIGN(4); __tagtable_begin = .; *(.taglist.init) __tagtable_end = .; - *(.init.data) + INIT_DATA . = ALIGN(16); __setup_start = .; *(.init.setup) @@ -135,7 +135,7 @@ SECTIONS * thrown away, as cleanup code is never called unless it's a module. */ /DISCARD/ : { - *(.exit.data) + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 9b75bc83c71f..858722421b40 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S @@ -91,13 +91,13 @@ SECTIONS { . = ALIGN(PAGE_SIZE); __sinittext = .; - *(.init.text) + INIT_TEXT __einittext = .; } .init.data : { . = ALIGN(16); - *(.init.data) + INIT_DATA } .init.setup : { @@ -198,8 +198,8 @@ SECTIONS /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } } diff --git a/arch/cris/arch-v10/vmlinux.lds.S b/arch/cris/arch-v10/vmlinux.lds.S index 97a7876ed681..93c9f0ea286b 100644 --- a/arch/cris/arch-v10/vmlinux.lds.S +++ b/arch/cris/arch-v10/vmlinux.lds.S @@ -57,10 +57,10 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(16); __setup_start = .; .init.setup : { *(.init.setup) } @@ -109,8 +109,8 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.text.exit) - *(.data.exit) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/cris/arch-v32/vmlinux.lds.S b/arch/cris/arch-v32/vmlinux.lds.S index b076c134c0bb..fead8c59ea63 100644 --- a/arch/cris/arch-v32/vmlinux.lds.S +++ b/arch/cris/arch-v32/vmlinux.lds.S @@ -61,10 +61,10 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(16); __setup_start = .; .init.setup : { *(.init.setup) } @@ -124,8 +124,8 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.text.exit) - *(.data.exit) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/frv/kernel/vmlinux.lds.S b/arch/frv/kernel/vmlinux.lds.S index a17a81d58bf6..f42b328b1dd0 100644 --- a/arch/frv/kernel/vmlinux.lds.S +++ b/arch/frv/kernel/vmlinux.lds.S @@ -28,14 +28,14 @@ SECTIONS .init.text : { *(.text.head) #ifndef CONFIG_DEBUG_INFO - *(.init.text) - *(.exit.text) - *(.exit.data) + INIT_TEXT + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) #endif } _einittext = .; - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(8); __setup_start = .; @@ -106,8 +106,8 @@ SECTIONS LOCK_TEXT #ifdef CONFIG_DEBUG_INFO *( - .init.text - .exit.text + INIT_TEXT + EXIT_TEXT .exitcall.exit ) #endif @@ -138,7 +138,7 @@ SECTIONS .data : { /* Data */ DATA_DATA *(.data.*) - *(.exit.data) + EXIT_DATA CONSTRUCTORS } diff --git a/arch/h8300/kernel/vmlinux.lds.S b/arch/h8300/kernel/vmlinux.lds.S index a2e72d495551..43a87b9085b6 100644 --- a/arch/h8300/kernel/vmlinux.lds.S +++ b/arch/h8300/kernel/vmlinux.lds.S @@ -110,9 +110,9 @@ SECTIONS . = ALIGN(0x4) ; ___init_begin = .; __sinittext = .; - *(.init.text) + INIT_TEXT __einittext = .; - *(.init.data) + INIT_DATA . = ALIGN(0x4) ; ___setup_start = .; *(.init.setup) @@ -124,8 +124,8 @@ SECTIONS ___con_initcall_start = .; *(.con_initcall.init) ___con_initcall_end = .; - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA #if defined(CONFIG_BLK_DEV_INITRD) . = ALIGN(4); ___initramfs_start = .; diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 757e419ebcf8..80622acc95de 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -27,8 +27,8 @@ SECTIONS { /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) *(.IA_64.unwind.exit.text) *(.IA_64.unwind_info.exit.text) @@ -119,12 +119,12 @@ SECTIONS .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) - { *(.init.data) } + { INIT_DATA } #ifdef CONFIG_BLK_DEV_INITRD .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) diff --git a/arch/m32r/kernel/vmlinux.lds.S b/arch/m32r/kernel/vmlinux.lds.S index 942a8c7a4417..41b07854fcc6 100644 --- a/arch/m32r/kernel/vmlinux.lds.S +++ b/arch/m32r/kernel/vmlinux.lds.S @@ -76,10 +76,10 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(16); __setup_start = .; .init.setup : { *(.init.setup) } @@ -100,8 +100,8 @@ SECTIONS .altinstr_replacement : { *(.altinstr_replacement) } /* .exit.text is discard at runtime, not link time, to deal with references from .altinstructions and .eh_frame */ - .exit.text : { *(.exit.text) } - .exit.data : { *(.exit.data) } + .exit.text : { EXIT_TEXT } + .exit.data : { EXIT_DATA } #ifdef CONFIG_BLK_DEV_INITRD . = ALIGN(4096); @@ -124,8 +124,8 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index 59fe285865ec..7537cc5e6159 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds @@ -45,10 +45,10 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(16); __setup_start = .; .init.setup : { *(.init.setup) } @@ -82,8 +82,8 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index 4adffefb5c48..cdc313e7c299 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds @@ -38,10 +38,10 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(16); __setup_start = .; .init.setup : { *(.init.setup) } @@ -77,8 +77,8 @@ __init_begin = .; /* Sections to be discarded */ /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S index 07a0055602f4..b44edb08e212 100644 --- a/arch/m68knommu/kernel/vmlinux.lds.S +++ b/arch/m68knommu/kernel/vmlinux.lds.S @@ -143,9 +143,9 @@ SECTIONS { . = ALIGN(4096); __init_begin = .; _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; - *(.init.data) + INIT_DATA . = ALIGN(16); __setup_start = .; *(.init.setup) @@ -170,8 +170,8 @@ SECTIONS { } > INIT /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 5fc2398bdb76..b5470ceb418b 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -114,11 +114,11 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } .init.data : { - *(.init.data) + INIT_DATA } . = ALIGN(16); .init.setup : { @@ -144,10 +144,10 @@ SECTIONS * references from .rodata */ .exit.text : { - *(.exit.text) + EXIT_TEXT } .exit.data : { - *(.exit.data) + EXIT_DATA } #if defined(CONFIG_BLK_DEV_INITRD) . = ALIGN(_PAGE_SIZE); diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index 40d0ff9b81ab..50b4a3a25d0a 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -172,11 +172,11 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } .init.data : { - *(.init.data) + INIT_DATA } . = ALIGN(16); .init.setup : { @@ -215,10 +215,10 @@ SECTIONS * from .altinstructions and .eh_frame */ .exit.text : { - *(.exit.text) + EXIT_TEXT } .exit.data : { - *(.exit.data) + EXIT_DATA } #ifdef CONFIG_BLK_DEV_INITRD . = ALIGN(PAGE_SIZE); diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index f66fa5d966b0..0afb9e31d2a0 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -23,7 +23,7 @@ SECTIONS /* Sections to be discarded. */ /DISCARD/ : { *(.exitcall.exit) - *(.exit.data) + EXIT_DATA } . = KERNELBASE; @@ -76,17 +76,19 @@ SECTIONS .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } /* .exit.text is discarded at runtime, not link time, * to deal with references from __bug_table */ - .exit.text : { *(.exit.text) } + .exit.text : { + EXIT_TEXT + } .init.data : { - *(.init.data); + INIT_DATA __vtop_table_begin = .; *(.vtop_fixup); __vtop_table_end = .; diff --git a/arch/ppc/kernel/vmlinux.lds.S b/arch/ppc/kernel/vmlinux.lds.S index 98c1212674f6..52b64fcbdfc5 100644 --- a/arch/ppc/kernel/vmlinux.lds.S +++ b/arch/ppc/kernel/vmlinux.lds.S @@ -97,14 +97,14 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } /* .exit.text is discarded at runtime, not link time, to deal with references from __bug_table */ - .exit.text : { *(.exit.text) } + .exit.text : { EXIT_TEXT } .init.data : { - *(.init.data); + INIT_DATA __vtop_table_begin = .; *(.vtop_fixup); __vtop_table_end = .; @@ -164,6 +164,6 @@ SECTIONS /* Sections to be discarded. */ /DISCARD/ : { *(.exitcall.exit) - *(.exit.data) + EXIT_DATA } } diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 936159199346..7d43c3cd3ef3 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -97,7 +97,7 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } /* @@ -105,11 +105,11 @@ SECTIONS * to deal with references from __bug_table */ .exit.text : { - *(.exit.text) + EXIT_TEXT } .init.data : { - *(.init.data) + INIT_DATA } . = ALIGN(0x100); .init.setup : { @@ -156,7 +156,7 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.data) + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S index d549fac6d3e7..c7113786ecd4 100644 --- a/arch/sh/kernel/vmlinux_32.lds.S +++ b/arch/sh/kernel/vmlinux_32.lds.S @@ -84,9 +84,9 @@ SECTIONS . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; _sinittext = .; - .init.text : { *(.init.text) } + .init.text : { INIT_TEXT } _einittext = .; - .init.data : { *(.init.data) } + .init.data : { INIT_DATA } . = ALIGN(16); __setup_start = .; @@ -122,8 +122,8 @@ SECTIONS * .exit.text is discarded at runtime, not link time, to deal with * references from __bug_table */ - .exit.text : { *(.exit.text) } - .exit.data : { *(.exit.data) } + .exit.text : { EXIT_TEXT } + .exit.data : { EXIT_DATA } . = ALIGN(PAGE_SIZE); .bss : { diff --git a/arch/sh/kernel/vmlinux_64.lds.S b/arch/sh/kernel/vmlinux_64.lds.S index 2fd0f7401484..3f1bd6392bb3 100644 --- a/arch/sh/kernel/vmlinux_64.lds.S +++ b/arch/sh/kernel/vmlinux_64.lds.S @@ -96,9 +96,9 @@ SECTIONS . = ALIGN(PAGE_SIZE); /* Init code and data */ __init_begin = .; _sinittext = .; - .init.text : C_PHYS(.init.text) { *(.init.text) } + .init.text : C_PHYS(.init.text) { INIT_TEXT } _einittext = .; - .init.data : C_PHYS(.init.data) { *(.init.data) } + .init.data : C_PHYS(.init.data) { INIT_DATA } . = ALIGN(L1_CACHE_BYTES); /* Better if Cache Line aligned */ __setup_start = .; .init.setup : C_PHYS(.init.setup) { *(.init.setup) } @@ -134,8 +134,8 @@ SECTIONS * .exit.text is discarded at runtime, not link time, to deal with * references from __bug_table */ - .exit.text : C_PHYS(.exit.text) { *(.exit.text) } - .exit.data : C_PHYS(.exit.data) { *(.exit.data) } + .exit.text : C_PHYS(.exit.text) { EXIT_TEXT } + .exit.data : C_PHYS(.exit.data) { EXIT_DATA } . = ALIGN(PAGE_SIZE); .bss : C_PHYS(.bss) { diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index a8b4200f9cc3..216147d6e61f 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -48,12 +48,12 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } __init_text_end = .; .init.data : { - *(.init.data) + INIT_DATA } . = ALIGN(16); .init.setup : { @@ -102,8 +102,8 @@ SECTIONS _end = . ; PROVIDE (end = .); /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S index 9fcd503bc04a..01f809617e5e 100644 --- a/arch/sparc64/kernel/vmlinux.lds.S +++ b/arch/sparc64/kernel/vmlinux.lds.S @@ -56,11 +56,11 @@ SECTIONS .init.text : { __init_begin = .; _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } .init.data : { - *(.init.data) + INIT_DATA } . = ALIGN(16); .init.setup : { @@ -137,8 +137,8 @@ SECTIONS PROVIDE (end = .); /DISCARD/ : { - *(.exit.text) - *(.exit.data) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/arch/um/kernel/dyn.lds.S b/arch/um/kernel/dyn.lds.S index 3866f4960f04..26090b7f323e 100644 --- a/arch/um/kernel/dyn.lds.S +++ b/arch/um/kernel/dyn.lds.S @@ -17,7 +17,7 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } @@ -84,7 +84,7 @@ SECTIONS #include "asm/common.lds.S" - init.data : { *(.init.data) } + init.data : { INIT_DATA } /* Ensure the __preinit_array_start label is properly aligned. We could instead move the label definition inside the section, but diff --git a/arch/um/kernel/uml.lds.S b/arch/um/kernel/uml.lds.S index 13df191e2b41..5828c1d54505 100644 --- a/arch/um/kernel/uml.lds.S +++ b/arch/um/kernel/uml.lds.S @@ -23,7 +23,7 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } . = ALIGN(4096); @@ -48,7 +48,7 @@ SECTIONS #include "asm/common.lds.S" - init.data : { *(init.data) } + init.data : { INIT_DATA } .data : { . = ALIGN(KERNEL_STACK_SIZE); /* init_task */ diff --git a/arch/v850/kernel/vmlinux.lds.S b/arch/v850/kernel/vmlinux.lds.S index 6172599b4ce2..d08cd1d27f27 100644 --- a/arch/v850/kernel/vmlinux.lds.S +++ b/arch/v850/kernel/vmlinux.lds.S @@ -114,7 +114,7 @@ #define DATA_CONTENTS \ __sdata = . ; \ DATA_DATA \ - *(.exit.data) /* 2.5 convention */ \ + EXIT_DATA /* 2.5 convention */ \ *(.data.exit) /* 2.4 convention */ \ . = ALIGN (16) ; \ *(.data.cacheline_aligned) \ @@ -157,9 +157,9 @@ . = ALIGN (4096) ; \ __init_start = . ; \ __sinittext = .; \ - *(.init.text) /* 2.5 convention */ \ + INIT_TEXT /* 2.5 convention */ \ __einittext = .; \ - *(.init.data) \ + INIT_DATA \ *(.text.init) /* 2.4 convention */ \ *(.data.init) \ INITCALL_CONTENTS \ @@ -170,7 +170,7 @@ #define ROMK_INIT_RAM_CONTENTS \ . = ALIGN (4096) ; \ __init_start = . ; \ - *(.init.data) /* 2.5 convention */ \ + INIT_DATA /* 2.5 convention */ \ *(.data.init) /* 2.4 convention */ \ __init_end = . ; \ . = ALIGN (4096) ; @@ -179,7 +179,7 @@ should go into ROM. */ #define ROMK_INIT_ROM_CONTENTS \ _sinittext = .; \ - *(.init.text) /* 2.5 convention */ \ + INIT_TEXT /* 2.5 convention */ \ _einittext = .; \ *(.text.init) /* 2.4 convention */ \ INITCALL_CONTENTS \ diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S index 7d72cce00529..84c913f38f98 100644 --- a/arch/x86/kernel/vmlinux_32.lds.S +++ b/arch/x86/kernel/vmlinux_32.lds.S @@ -131,10 +131,12 @@ SECTIONS .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { __init_begin = .; _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) } + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { + INIT_DATA + } . = ALIGN(16); .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { __setup_start = .; @@ -169,8 +171,12 @@ SECTIONS } /* .exit.text is discard at runtime, not link time, to deal with references from .altinstructions and .eh_frame */ - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) } - .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) } + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { + EXIT_TEXT + } + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { + EXIT_DATA + } #if defined(CONFIG_BLK_DEV_INITRD) . = ALIGN(4096); .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S index ba8ea97abd21..ea5386944e67 100644 --- a/arch/x86/kernel/vmlinux_64.lds.S +++ b/arch/x86/kernel/vmlinux_64.lds.S @@ -155,12 +155,15 @@ SECTIONS __init_begin = .; .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { _sinittext = .; - *(.init.text) + INIT_TEXT _einittext = .; } - __initdata_begin = .; - .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { *(.init.data) } - __initdata_end = .; + .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { + __initdata_begin = .; + INIT_DATA + __initdata_end = .; + } + . = ALIGN(16); __setup_start = .; .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { *(.init.setup) } @@ -187,8 +190,12 @@ SECTIONS } /* .exit.text is discard at runtime, not link time, to deal with references from .altinstructions and .eh_frame */ - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { *(.exit.text) } - .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { *(.exit.data) } + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { + EXIT_TEXT + } + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { + EXIT_DATA + } /* vdso blob that is mapped into user space */ vdso_start = . ; diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index ac4ed52034db..7d0f55a4982d 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -136,13 +136,13 @@ SECTIONS __init_begin = .; .init.text : { _sinittext = .; - *(.init.literal) *(.init.text) + *(.init.literal) INIT_TEXT _einittext = .; } .init.data : { - *(.init.data) + INIT_DATA . = ALIGN(0x4); __tagtable_begin = .; *(.taglist) @@ -278,8 +278,9 @@ SECTIONS /* Sections to be discarded */ /DISCARD/ : { - *(.exit.literal .exit.text) - *(.exit.data) + *(.exit.literal) + EXIT_TEXT + EXIT_DATA *(.exitcall.exit) } diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 9f584cc5c5fb..ae0166e83490 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -183,6 +183,13 @@ *(.kprobes.text) \ VMLINUX_SYMBOL(__kprobes_text_end) = .; +/* init and exit section handling */ +#define INIT_TEXT *(.init.text) +#define INIT_DATA *(.init.data) +#define EXIT_TEXT *(.exit.text) +#define EXIT_DATA *(.exit.data) + + /* DWARF debug sections. Symbols in the DWARF debugging sections are relative to the beginning of the section so we begin them at 0. */ -- cgit v1.2.3 From 2b22c034d04d3632a339d14d5803c8f94e412608 Mon Sep 17 00:00:00 2001 From: Alejandro Martinez Ruiz Date: Mon, 22 Oct 2007 21:36:44 +0200 Subject: [MIPS] Converting most array size calculations to use ARRAY_SIZE(). Signed-off-by: Alejandro Martinez Ruiz Signed-off-by: Ralf Baechle --- arch/mips/au1000/common/dbdma.c | 2 +- arch/mips/kernel/kspd.c | 3 +-- arch/mips/philips/pnx8550/common/setup.c | 2 +- arch/mips/vr41xx/nec-cmbvr4133/setup.c | 2 +- 4 files changed, 4 insertions(+), 5 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/au1000/common/dbdma.c b/arch/mips/au1000/common/dbdma.c index edf91f41a786..428ed275a0f6 100644 --- a/arch/mips/au1000/common/dbdma.c +++ b/arch/mips/au1000/common/dbdma.c @@ -179,7 +179,7 @@ static dbdev_tab_t dbdev_tab[] = { { 0, 0, 0, 0, 0, 0, 0 }, }; -#define DBDEV_TAB_SIZE (sizeof(dbdev_tab) / sizeof(dbdev_tab_t)) +#define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab) static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS]; diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index d2c2e00e5864..f6704ab16306 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c @@ -161,8 +161,7 @@ static unsigned int translate_open_flags(int flags) int i; unsigned int ret = 0; - for (i = 0; i < (sizeof(open_flags_table) / sizeof(struct apsp_table)); - i++) { + for (i = 0; i < ARRAY_SIZE(open_flags_table); i++) { if( (flags & open_flags_table[i].sp) ) { ret |= open_flags_table[i].ap; } diff --git a/arch/mips/philips/pnx8550/common/setup.c b/arch/mips/philips/pnx8550/common/setup.c index 2ce298f4d19a..92d764c97701 100644 --- a/arch/mips/philips/pnx8550/common/setup.c +++ b/arch/mips/philips/pnx8550/common/setup.c @@ -74,7 +74,7 @@ struct resource standard_io_resources[] = { }, }; -#define STANDARD_IO_RESOURCES (sizeof(standard_io_resources)/sizeof(struct resource)) +#define STANDARD_IO_RESOURCES ARRAY_SIZE(standard_io_resources) extern struct resource pci_io_resource; extern struct resource pci_mem_resource; diff --git a/arch/mips/vr41xx/nec-cmbvr4133/setup.c b/arch/mips/vr41xx/nec-cmbvr4133/setup.c index 58e47686b499..f14bea8daf90 100644 --- a/arch/mips/vr41xx/nec-cmbvr4133/setup.c +++ b/arch/mips/vr41xx/nec-cmbvr4133/setup.c @@ -50,7 +50,7 @@ static struct mtd_partition cmbvr4133_mtd_parts[] = { } }; -#define number_partitions (sizeof(cmbvr4133_mtd_parts)/sizeof(struct mtd_partition)) +#define number_partitions ARRAY_SIZE(cmbvr4133_mtd_parts) #endif extern void i8259_init(void); -- cgit v1.2.3 From 20d60d9973c3b441902b0a3f4f6f7e7ade08f77d Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Tue, 23 Oct 2007 12:43:11 +0100 Subject: [MIPS] R4000/R4400 errata workarounds This is the gereric part of R4000/R4400 errata workarounds. They include compiler and assembler support as well as some source code modifications to address the problems with some combinations of multiply/divide+shift instructions as well as the daddi and daddiu instructions. Changes included are as follows: 1. New Kconfig options to select workarounds by platforms as necessary. 2. Arch top-level Makefile to pass necessary options to the compiler; also incompatible configurations are detected (-mno-sym32 unsupported as horribly intrusive for little gain). 3. Bug detection updated and shuffled -- the multiply/divide+shift problem is lethal enough that if not worked around it makes the kernel crash in time_init() because of a division by zero; the daddiu erratum might also trigger early potentially, though I have not observed it. On the other hand the daddi detection code requires the exception subsystem to have been initialised (and is there mainly for information). 4. r4k_daddiu_bug() added so that the existence of the erratum can be queried by code at the run time as necessary; useful for generated code like TLB fault and copy/clear page handlers. 5. __udelay() updated as it uses multiplication in inline assembly. Note that -mdaddi requires modified toolchain (which has been maintained by myself and available from my site for ~4years now -- versions covered are GCC 2.95.4 - 4.1.2 and binutils from 2.13 onwards). The -mfix-r4000 and -mfix-r4400 have been standard for a while though. Signed-off-by: Maciej W. Rozycki Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 16 +++++++++++ arch/mips/Makefile | 12 ++++++--- arch/mips/kernel/cpu-bugs64.c | 47 ++++++++++++++++---------------- arch/mips/kernel/setup.c | 4 ++- include/asm-mips/bugs.h | 25 +++++++++++++++++ include/asm-mips/delay.h | 12 +++++++-- include/asm-mips/war.h | 62 +++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 148 insertions(+), 30 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 12bf96334174..11bc17ce0ebf 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -91,6 +91,9 @@ config MACH_DECSTATION select BOOT_ELF32 select CEVT_R4K select CSRC_R4K + select CPU_DADDI_WORKAROUNDS if 64BIT + select CPU_R4000_WORKAROUNDS if 64BIT + select CPU_R4400_WORKAROUNDS if 64BIT select DMA_NONCOHERENT select NO_IOPORT select IRQ_CPU @@ -1605,6 +1608,19 @@ config CPU_HAS_SYNC config GENERIC_CLOCKEVENTS_BROADCAST bool +# +# CPU non-features +# +config CPU_DADDI_WORKAROUNDS + bool + +config CPU_R4000_WORKAROUNDS + bool + select CPU_R4400_WORKAROUNDS + +config CPU_R4400_WORKAROUNDS + bool + # # Use the generic interrupt handling code in kernel/irq/: # diff --git a/arch/mips/Makefile b/arch/mips/Makefile index a1f8d8b96b03..a49127af33c8 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -141,6 +141,10 @@ cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \ -Wa,--trap +cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,) +cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,) +cflags-$(CONFIG_CPU_DADDI_WORKAROUNDS) += $(call cc-option,-mno-daddi,) + ifdef CONFIG_CPU_SB1 ifdef CONFIG_SB1_PASS_1_WORKAROUNDS MODFLAGS += -msb1-pass1-workarounds @@ -602,9 +606,11 @@ ifdef CONFIG_64BIT endif endif - ifeq ($(KBUILD_SYM32), y) - ifeq ($(call cc-option-yn,-msym32), y) - cflags-y += -msym32 -DKBUILD_64BIT_SYM32 + ifeq ($(KBUILD_SYM32)$(call cc-option-yn,-msym32), yy) + cflags-y += -msym32 -DKBUILD_64BIT_SYM32 + else + ifeq ($(CONFIG_CPU_DADDI_WORKAROUNDS), y) + $(error CONFIG_CPU_DADDI_WORKAROUNDS unsupported without -msym32) endif endif endif diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index af78456d4138..417bb3e336ac 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c @@ -18,6 +18,15 @@ #include #include +static char bug64hit[] __initdata = + "reliable operation impossible!\n%s"; +static char nowar[] __initdata = + "Please report to ."; +static char r4kwar[] __initdata = + "Enable CPU_R4000_WORKAROUNDS to rectify."; +static char daddiwar[] __initdata = + "Enable CPU_DADDI_WORKAROUNDS to rectify."; + static inline void align_mod(const int align, const int mod) { asm volatile( @@ -155,13 +164,7 @@ static inline void check_mult_sh(void) } printk("no.\n"); - panic("Reliable operation impossible!\n" -#ifndef CONFIG_CPU_R4000 - "Configure for R4000 to enable the workaround." -#else - "Please report to ." -#endif - ); + panic(bug64hit, !R4000_WAR ? r4kwar : nowar); } static volatile int daddi_ov __initdata = 0; @@ -233,15 +236,11 @@ static inline void check_daddi(void) } printk("no.\n"); - panic("Reliable operation impossible!\n" -#if !defined(CONFIG_CPU_R4000) && !defined(CONFIG_CPU_R4400) - "Configure for R4000 or R4400 to enable the workaround." -#else - "Please report to ." -#endif - ); + panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); } +int daddiu_bug __initdata = -1; + static inline void check_daddiu(void) { long v, w, tmp; @@ -281,7 +280,9 @@ static inline void check_daddiu(void) : "=&r" (v), "=&r" (w), "=&r" (tmp) : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); - if (v == w) { + daddiu_bug = v != w; + + if (!daddiu_bug) { printk("no.\n"); return; } @@ -303,18 +304,16 @@ static inline void check_daddiu(void) } printk("no.\n"); - panic("Reliable operation impossible!\n" -#if !defined(CONFIG_CPU_R4000) && !defined(CONFIG_CPU_R4400) - "Configure for R4000 or R4400 to enable the workaround." -#else - "Please report to ." -#endif - ); + panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); } -void __init check_bugs64(void) +void __init check_bugs64_early(void) { check_mult_sh(); - check_daddi(); check_daddiu(); } + +void __init check_bugs64(void) +{ + check_daddi(); +} diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index f8a535afce39..7b4418dd5857 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -8,7 +8,7 @@ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle * Copyright (C) 1996 Stoned Elipot * Copyright (C) 1999 Silicon Graphics, Inc. - * Copyright (C) 2000 2001, 2002 Maciej W. Rozycki + * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki */ #include #include @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -561,6 +562,7 @@ void __init setup_arch(char **cmdline_p) } #endif cpu_report(); + check_bugs_early(); #if defined(CONFIG_VT) #if defined(CONFIG_VGA_CONSOLE) diff --git a/include/asm-mips/bugs.h b/include/asm-mips/bugs.h index 0d7f9c1f5546..9dc10df32078 100644 --- a/include/asm-mips/bugs.h +++ b/include/asm-mips/bugs.h @@ -1,19 +1,34 @@ /* * This is included by init/main.c to check for architecture-dependent bugs. * + * Copyright (C) 2007 Maciej W. Rozycki + * * Needs: * void check_bugs(void); */ #ifndef _ASM_BUGS_H #define _ASM_BUGS_H +#include #include + #include #include +extern int daddiu_bug; + +extern void check_bugs64_early(void); + extern void check_bugs32(void); extern void check_bugs64(void); +static inline void check_bugs_early(void) +{ +#ifdef CONFIG_64BIT + check_bugs64_early(); +#endif +} + static inline void check_bugs(void) { unsigned int cpu = smp_processor_id(); @@ -25,4 +40,14 @@ static inline void check_bugs(void) #endif } +static inline int r4k_daddiu_bug(void) +{ +#ifdef CONFIG_64BIT + WARN_ON(daddiu_bug < 0); + return daddiu_bug != 0; +#else + return 0; +#endif +} + #endif /* _ASM_BUGS_H */ diff --git a/include/asm-mips/delay.h b/include/asm-mips/delay.h index fab32131e9b4..de5105d05f1e 100644 --- a/include/asm-mips/delay.h +++ b/include/asm-mips/delay.h @@ -6,13 +6,16 @@ * Copyright (C) 1994 by Waldorf Electronics * Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki */ #ifndef _ASM_DELAY_H #define _ASM_DELAY_H #include #include + #include +#include static inline void __delay(unsigned long loops) { @@ -50,7 +53,7 @@ static inline void __delay(unsigned long loops) static inline void __udelay(unsigned long usecs, unsigned long lpj) { - unsigned long lo; + unsigned long hi, lo; /* * The rates of 128 is rounded wrongly by the catchall case @@ -70,11 +73,16 @@ static inline void __udelay(unsigned long usecs, unsigned long lpj) : "=h" (usecs), "=l" (lo) : "r" (usecs), "r" (lpj) : GCC_REG_ACCUM); - else if (sizeof(long) == 8) + else if (sizeof(long) == 8 && !R4000_WAR) __asm__("dmultu\t%2, %3" : "=h" (usecs), "=l" (lo) : "r" (usecs), "r" (lpj) : GCC_REG_ACCUM); + else if (sizeof(long) == 8 && R4000_WAR) + __asm__("dmultu\t%3, %4\n\tmfhi\t%0" + : "=r" (usecs), "=h" (hi), "=l" (lo) + : "r" (usecs), "r" (lpj) + : GCC_REG_ACCUM); __delay(usecs); } diff --git a/include/asm-mips/war.h b/include/asm-mips/war.h index d2808edfd4e9..22361d5e3bf0 100644 --- a/include/asm-mips/war.h +++ b/include/asm-mips/war.h @@ -4,12 +4,74 @@ * for more details. * * Copyright (C) 2002, 2004, 2007 by Ralf Baechle + * Copyright (C) 2007 Maciej W. Rozycki */ #ifndef _ASM_WAR_H #define _ASM_WAR_H #include +/* + * Work around certain R4000 CPU errata (as implemented by GCC): + * + * - A double-word or a variable shift may give an incorrect result + * if executed immediately after starting an integer division: + * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0", + * erratum #28 + * "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0", erratum + * #19 + * + * - A double-word or a variable shift may give an incorrect result + * if executed while an integer multiplication is in progress: + * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0", + * errata #16 & #28 + * + * - An integer division may give an incorrect result if started in + * a delay slot of a taken branch or a jump: + * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0", + * erratum #52 + */ +#ifdef CONFIG_CPU_R4000_WORKAROUNDS +#define R4000_WAR 1 +#else +#define R4000_WAR 0 +#endif + +/* + * Work around certain R4400 CPU errata (as implemented by GCC): + * + * - A double-word or a variable shift may give an incorrect result + * if executed immediately after starting an integer division: + * "MIPS R4400MC Errata, Processor Revision 1.0", erratum #10 + * "MIPS R4400MC Errata, Processor Revision 2.0 & 3.0", erratum #4 + */ +#ifdef CONFIG_CPU_R4400_WORKAROUNDS +#define R4400_WAR 1 +#else +#define R4400_WAR 0 +#endif + +/* + * Work around the "daddi" and "daddiu" CPU errata: + * + * - The `daddi' instruction fails to trap on overflow. + * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0", + * erratum #23 + * + * - The `daddiu' instruction can produce an incorrect result. + * "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0", + * erratum #41 + * "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0", erratum + * #15 + * "MIPS R4400PC/SC Errata, Processor Revision 1.0", erratum #7 + * "MIPS R4400MC Errata, Processor Revision 1.0", erratum #5 + */ +#ifdef CONFIG_CPU_DADDI_WORKAROUNDS +#define DADDI_WAR 1 +#else +#define DADDI_WAR 0 +#endif + /* * Another R4600 erratum. Due to the lack of errata information the exact * technical details aren't known. I've experimentally found that disabling -- cgit v1.2.3 From 619b6e18fce20e4b2d0082cde989f37e1be7b3e1 Mon Sep 17 00:00:00 2001 From: "Maciej W. Rozycki" Date: Tue, 23 Oct 2007 12:43:25 +0100 Subject: [MIPS] R4000/R4400 daddiu erratum workaround This complements the generic R4000/R4400 errata workaround code and adds bits for the daddiu problem. In most places it just modifies handwritten assembly code so that the assembler is allowed to use a temporary register as daddiu may now be treated as a macro that expands to a sequence of li and daddu. It is the AT register or, where AT is unavailable or used explicitly for another purpose, an explicitly-named register is selected, using the .set at= feature added recently to gas. This feature is only used if CONFIG_CPU_DADDI_WORKAROUNDS has been set, so if the workaround remains disabled, the required version of binutils stays unchanged. Similarly, daddiu instructions put in branch delay slots in noreorder fragments are now taken out of them and the assembler is allowed to reorder them itself as possible (which it does making the whole idea of scheduling them into delay slots manually questionable). Also in the very few places where such a simple conversion was not possible, a handcoded longer sequence is implemented. Other than that there are changes to code responsible for building the TLB fault and page clear/copy handlers to avoid daddiu as appropriate. These are only effective if the erratum is verified to be present at the run time. Finally there is a trivial update to __delay(), because it uses daddiu in a branch delay slot. Signed-off-by: Maciej W. Rozycki Signed-off-by: Ralf Baechle --- arch/mips/kernel/genex.S | 8 ++++- arch/mips/lib/csum_partial.S | 61 ++++++++++++++++++++++++++++++------- arch/mips/lib/memcpy-inatomic.S | 25 ++++++++++++---- arch/mips/lib/memcpy.S | 60 +++++++++++++++++++++++++++++-------- arch/mips/lib/memset.S | 11 ++++++- arch/mips/lib/strncpy_user.S | 4 +-- arch/mips/mm/pg-r4k.c | 66 +++++++++++++++++++---------------------- arch/mips/mm/tlbex.c | 42 ++++++++++++++++++-------- include/asm-mips/delay.h | 11 ++++++- include/asm-mips/stackframe.h | 9 ++++++ include/asm-mips/uaccess.h | 13 ++++++-- 11 files changed, 227 insertions(+), 83 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index e76a76bf0b3d..c6ada98ee042 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -6,7 +6,7 @@ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2001 MIPS Technologies, Inc. - * Copyright (C) 2002 Maciej W. Rozycki + * Copyright (C) 2002, 2007 Maciej W. Rozycki */ #include @@ -471,7 +471,13 @@ NESTED(nmi_handler, PT_SIZE, sp) jr k0 rfe #else +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS LONG_ADDIU k0, 4 /* stall on $k0 */ +#else + .set at=v1 + LONG_ADDIU k0, 4 + .set noat +#endif MTC0 k0, CP0_EPC /* I hope three instructions between MTC0 and ERET are enough... */ ori k1, _THREAD_MASK diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S index c0a77fe038be..957a82484e3e 100644 --- a/arch/mips/lib/csum_partial.S +++ b/arch/mips/lib/csum_partial.S @@ -7,6 +7,7 @@ * * Copyright (C) 1998, 1999 Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki */ #include #include @@ -52,9 +53,12 @@ #define UNIT(unit) ((unit)*NBYTES) #define ADDC(sum,reg) \ + .set push; \ + .set noat; \ ADD sum, reg; \ sltu v1, sum, reg; \ - ADD sum, v1 + ADD sum, v1; \ + .set pop #define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ LOAD _t0, (offset + UNIT(0))(src); \ @@ -178,8 +182,10 @@ move_128bytes: CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4) CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4) LONG_SUBU t8, t8, 0x01 + .set reorder /* DADDI_WAR */ + PTR_ADDU src, src, 0x80 bnez t8, move_128bytes - PTR_ADDU src, src, 0x80 + .set noreorder 1: beqz t2, 1f @@ -208,8 +214,10 @@ end_words: lw t0, (src) LONG_SUBU t8, t8, 0x1 ADDC(sum, t0) + .set reorder /* DADDI_WAR */ + PTR_ADDU src, src, 0x4 bnez t8, end_words - PTR_ADDU src, src, 0x4 + .set noreorder /* unknown src alignment and < 8 bytes to go */ small_csumcpy: @@ -246,6 +254,8 @@ small_csumcpy: 1: ADDC(sum, t1) /* fold checksum */ + .set push + .set noat #ifdef USE_DOUBLE dsll32 v1, sum, 0 daddu sum, v1 @@ -266,6 +276,7 @@ small_csumcpy: srl sum, sum, 8 or sum, v1 andi sum, 0xffff + .set pop 1: .set reorder /* Add the passed partial csum. */ @@ -373,7 +384,11 @@ small_csumcpy: #define ADDRMASK (NBYTES-1) +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS .set noat +#else + .set at=v1 +#endif LEAF(__csum_partial_copy_user) PTR_ADDU AT, src, len /* See (1) above. */ @@ -441,8 +456,10 @@ EXC( STORE t6, UNIT(6)(dst), s_exc) ADDC(sum, t6) EXC( STORE t7, UNIT(7)(dst), s_exc) ADDC(sum, t7) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 8*NBYTES bgez len, 1b - ADD dst, dst, 8*NBYTES + .set noreorder ADD len, 8*NBYTES # revert len (see above) /* @@ -471,8 +488,10 @@ EXC( STORE t2, UNIT(2)(dst), s_exc) ADDC(sum, t2) EXC( STORE t3, UNIT(3)(dst), s_exc) ADDC(sum, t3) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES beqz len, done - ADD dst, dst, 4*NBYTES + .set noreorder less_than_4units: /* * rem = len % NBYTES @@ -485,8 +504,10 @@ EXC( LOAD t0, 0(src), l_exc) SUB len, len, NBYTES EXC( STORE t0, 0(dst), s_exc) ADDC(sum, t0) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES bne rem, len, 1b - ADD dst, dst, NBYTES + .set noreorder /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) @@ -572,8 +593,10 @@ EXC( STORE t2, UNIT(2)(dst), s_exc) ADDC(sum, t2) EXC( STORE t3, UNIT(3)(dst), s_exc) ADDC(sum, t3) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES bne len, rem, 1b - ADD dst, dst, 4*NBYTES + .set noreorder cleanup_src_unaligned: beqz len, done @@ -587,8 +610,10 @@ EXC( LDREST t0, REST(0)(src), l_exc_copy) SUB len, len, NBYTES EXC( STORE t0, 0(dst), s_exc) ADDC(sum, t0) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES bne len, rem, 1b - ADD dst, dst, NBYTES + .set noreorder copy_bytes_checklen: beqz len, done @@ -631,6 +656,8 @@ copy_bytes_done: ADDC(sum, t2) done: /* fold checksum */ + .set push + .set noat #ifdef USE_DOUBLE dsll32 v1, sum, 0 daddu sum, v1 @@ -651,6 +678,7 @@ done: srl sum, sum, 8 or sum, v1 andi sum, 0xffff + .set pop 1: .set reorder ADDC(sum, psum) @@ -678,8 +706,10 @@ EXC( lbu t1, 0(src), l_exc) SLLV t1, t1, t2 addu t2, SHIFT_INC ADDC(sum, t1) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 1 bne src, t0, 1b - ADD dst, dst, 1 + .set noreorder l_exc: LOAD t0, TI_TASK($28) nop @@ -697,12 +727,22 @@ l_exc: * Clear len bytes starting at dst. Can't call __bzero because it * might modify len. An inefficient loop for these rare times... */ + .set reorder /* DADDI_WAR */ + SUB src, len, 1 beqz len, done - SUB src, len, 1 + .set noreorder 1: sb zero, 0(dst) ADD dst, dst, 1 + .set push + .set noat +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS bnez src, 1b SUB src, src, 1 +#else + li v1, 1 + bnez src, 1b + SUB src, src, v1 +#endif li v1, -EFAULT b done sw v1, (errptr) @@ -712,4 +752,5 @@ s_exc: li v1, -EFAULT jr ra sw v1, (errptr) + .set pop END(__csum_partial_copy_user) diff --git a/arch/mips/lib/memcpy-inatomic.S b/arch/mips/lib/memcpy-inatomic.S index 3a534b2baa0f..d1b08f5d6860 100644 --- a/arch/mips/lib/memcpy-inatomic.S +++ b/arch/mips/lib/memcpy-inatomic.S @@ -9,6 +9,7 @@ * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. * Copyright (C) 2002 Broadcom, Inc. * memcpy/copy_user author: Mark Vandevoorde + * Copyright (C) 2007 Maciej W. Rozycki * * Mnemonic names for arguments to memcpy/__copy_user */ @@ -175,7 +176,11 @@ .text .set noreorder +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS .set noat +#else + .set at=v1 +#endif /* * A combined memcpy/__copy_user @@ -268,8 +273,10 @@ EXC( LOAD t3, UNIT(3)(src), l_exc_copy) STORE t1, UNIT(1)(dst) STORE t2, UNIT(2)(dst) STORE t3, UNIT(3)(dst) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES beqz len, done - ADD dst, dst, 4*NBYTES + .set noreorder less_than_4units: /* * rem = len % NBYTES @@ -281,8 +288,10 @@ EXC( LOAD t0, 0(src), l_exc) ADD src, src, NBYTES SUB len, len, NBYTES STORE t0, 0(dst) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES bne rem, len, 1b - ADD dst, dst, NBYTES + .set noreorder /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) @@ -361,8 +370,10 @@ EXC( LDREST t3, REST(3)(src), l_exc_copy) STORE t2, UNIT(2)(dst) STORE t3, UNIT(3)(dst) PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES bne len, rem, 1b - ADD dst, dst, 4*NBYTES + .set noreorder cleanup_src_unaligned: beqz len, done @@ -375,8 +386,10 @@ EXC( LDREST t0, REST(0)(src), l_exc_copy) ADD src, src, NBYTES SUB len, len, NBYTES STORE t0, 0(dst) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES bne len, rem, 1b - ADD dst, dst, NBYTES + .set noreorder copy_bytes_checklen: beqz len, done @@ -424,8 +437,10 @@ l_exc_copy: EXC( lb t1, 0(src), l_exc) ADD src, src, 1 sb t1, 0(dst) # can't fault -- we're copy_from_user + .set reorder /* DADDI_WAR */ + ADD dst, dst, 1 bne src, t0, 1b - ADD dst, dst, 1 + .set noreorder l_exc: LOAD t0, TI_TASK($28) nop diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index a526c62cb76a..aded7b159052 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -9,6 +9,7 @@ * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc. * Copyright (C) 2002 Broadcom, Inc. * memcpy/copy_user author: Mark Vandevoorde + * Copyright (C) 2007 Maciej W. Rozycki * * Mnemonic names for arguments to memcpy/__copy_user */ @@ -175,7 +176,11 @@ .text .set noreorder +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS .set noat +#else + .set at=v1 +#endif /* * A combined memcpy/__copy_user @@ -271,8 +276,10 @@ EXC( STORE t0, UNIT(0)(dst), s_exc_p4u) EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES beqz len, done - ADD dst, dst, 4*NBYTES + .set noreorder less_than_4units: /* * rem = len % NBYTES @@ -284,8 +291,10 @@ EXC( LOAD t0, 0(src), l_exc) ADD src, src, NBYTES SUB len, len, NBYTES EXC( STORE t0, 0(dst), s_exc_p1u) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES bne rem, len, 1b - ADD dst, dst, NBYTES + .set noreorder /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) @@ -364,8 +373,10 @@ EXC( STORE t1, UNIT(1)(dst), s_exc_p3u) EXC( STORE t2, UNIT(2)(dst), s_exc_p2u) EXC( STORE t3, UNIT(3)(dst), s_exc_p1u) PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed) + .set reorder /* DADDI_WAR */ + ADD dst, dst, 4*NBYTES bne len, rem, 1b - ADD dst, dst, 4*NBYTES + .set noreorder cleanup_src_unaligned: beqz len, done @@ -378,8 +389,10 @@ EXC( LDREST t0, REST(0)(src), l_exc_copy) ADD src, src, NBYTES SUB len, len, NBYTES EXC( STORE t0, 0(dst), s_exc_p1u) + .set reorder /* DADDI_WAR */ + ADD dst, dst, NBYTES bne len, rem, 1b - ADD dst, dst, NBYTES + .set noreorder copy_bytes_checklen: beqz len, done @@ -427,8 +440,10 @@ l_exc_copy: EXC( lb t1, 0(src), l_exc) ADD src, src, 1 sb t1, 0(dst) # can't fault -- we're copy_from_user + .set reorder /* DADDI_WAR */ + ADD dst, dst, 1 bne src, t0, 1b - ADD dst, dst, 1 + .set noreorder l_exc: LOAD t0, TI_TASK($28) nop @@ -446,20 +461,33 @@ l_exc: * Clear len bytes starting at dst. Can't call __bzero because it * might modify len. An inefficient loop for these rare times... */ + .set reorder /* DADDI_WAR */ + SUB src, len, 1 beqz len, done - SUB src, len, 1 + .set noreorder 1: sb zero, 0(dst) ADD dst, dst, 1 +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS bnez src, 1b SUB src, src, 1 +#else + .set push + .set noat + li v1, 1 + bnez src, 1b + SUB src, src, v1 + .set pop +#endif jr ra nop -#define SEXC(n) \ -s_exc_p ## n ## u: \ - jr ra; \ - ADD len, len, n*NBYTES +#define SEXC(n) \ + .set reorder; /* DADDI_WAR */ \ +s_exc_p ## n ## u: \ + ADD len, len, n*NBYTES; \ + jr ra; \ + .set noreorder SEXC(8) SEXC(7) @@ -471,8 +499,10 @@ SEXC(2) SEXC(1) s_exc_p1: + .set reorder /* DADDI_WAR */ + ADD len, len, 1 jr ra - ADD len, len, 1 + .set noreorder s_exc: jr ra nop @@ -502,8 +532,10 @@ r_end_bytes: SUB a2, a2, 0x1 sb t0, -1(a0) SUB a1, a1, 0x1 + .set reorder /* DADDI_WAR */ + SUB a0, a0, 0x1 bnez a2, r_end_bytes - SUB a0, a0, 0x1 + .set noreorder r_out: jr ra @@ -514,8 +546,10 @@ r_end_bytes_up: SUB a2, a2, 0x1 sb t0, (a0) ADD a1, a1, 0x1 + .set reorder /* DADDI_WAR */ + ADD a0, a0, 0x1 bnez a2, r_end_bytes_up - ADD a0, a0, 0x1 + .set noreorder jr ra move a2, zero diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index 3f8b8b3d0b23..3bf38422342f 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -5,6 +5,7 @@ * * Copyright (C) 1998, 1999, 2000 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki */ #include #include @@ -74,8 +75,16 @@ FEXPORT(__bzero) bnez t0, small_memset andi t0, a0, LONGMASK /* aligned? */ +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS beqz t0, 1f PTR_SUBU t0, LONGSIZE /* alignment in bytes */ +#else + .set noat + li AT, LONGSIZE + beqz t0, 1f + PTR_SUBU t0, AT /* alignment in bytes */ + .set at +#endif #ifdef __MIPSEB__ EX(LONG_S_L, a1, (a0), first_fixup) /* make word/dword aligned */ @@ -106,7 +115,7 @@ memset_partial: .set noat LONG_SRL AT, t0, 1 PTR_SUBU t1, AT - .set noat + .set at #endif jr t1 PTR_ADDU a0, t0 /* dest ptr */ diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S index d16c76fbfac7..5c8fb9d6b7f9 100644 --- a/arch/mips/lib/strncpy_user.S +++ b/arch/mips/lib/strncpy_user.S @@ -41,9 +41,9 @@ FEXPORT(__strncpy_from_user_nocheck_asm) beqz t0, 2f sb t0, (a0) PTR_ADDIU v0, 1 - bne v0, a2, 1b - PTR_ADDIU a0, 1 .set reorder + PTR_ADDIU a0, 1 + bne v0, a2, 1b 2: PTR_ADDU t0, a1, v0 xor t0, a1 bltz t0, fault diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c index 4f770ac885ce..9185fbf37c0d 100644 --- a/arch/mips/mm/pg-r4k.c +++ b/arch/mips/mm/pg-r4k.c @@ -4,6 +4,7 @@ * for more details. * * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2007 Maciej W. Rozycki */ #include #include @@ -12,6 +13,7 @@ #include #include +#include #include #include #include @@ -255,64 +257,58 @@ static inline void build_store_reg(int reg) __build_store_reg(reg); } -static inline void build_addiu_a2_a0(unsigned long offset) +static inline void build_addiu_rt_rs(unsigned int rt, unsigned int rs, + unsigned long offset) { union mips_instruction mi; BUG_ON(offset > 0x7fff); - mi.i_format.opcode = cpu_has_64bit_gp_regs ? daddiu_op : addiu_op; - mi.i_format.rs = 4; /* $a0 */ - mi.i_format.rt = 6; /* $a2 */ - mi.i_format.simmediate = offset; + if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) { + mi.i_format.opcode = addiu_op; + mi.i_format.rs = 0; /* $zero */ + mi.i_format.rt = 25; /* $t9 */ + mi.i_format.simmediate = offset; + emit_instruction(mi); + mi.r_format.opcode = spec_op; + mi.r_format.rs = rs; + mi.r_format.rt = 25; /* $t9 */ + mi.r_format.rd = rt; + mi.r_format.re = 0; + mi.r_format.func = daddu_op; + } else { + mi.i_format.opcode = cpu_has_64bit_gp_regs ? + daddiu_op : addiu_op; + mi.i_format.rs = rs; + mi.i_format.rt = rt; + mi.i_format.simmediate = offset; + } emit_instruction(mi); } -static inline void build_addiu_a2(unsigned long offset) +static inline void build_addiu_a2_a0(unsigned long offset) { - union mips_instruction mi; - - BUG_ON(offset > 0x7fff); - - mi.i_format.opcode = cpu_has_64bit_gp_regs ? daddiu_op : addiu_op; - mi.i_format.rs = 6; /* $a2 */ - mi.i_format.rt = 6; /* $a2 */ - mi.i_format.simmediate = offset; + build_addiu_rt_rs(6, 4, offset); /* $a2, $a0, offset */ +} - emit_instruction(mi); +static inline void build_addiu_a2(unsigned long offset) +{ + build_addiu_rt_rs(6, 6, offset); /* $a2, $a2, offset */ } static inline void build_addiu_a1(unsigned long offset) { - union mips_instruction mi; - - BUG_ON(offset > 0x7fff); - - mi.i_format.opcode = cpu_has_64bit_gp_regs ? daddiu_op : addiu_op; - mi.i_format.rs = 5; /* $a1 */ - mi.i_format.rt = 5; /* $a1 */ - mi.i_format.simmediate = offset; + build_addiu_rt_rs(5, 5, offset); /* $a1, $a1, offset */ load_offset -= offset; - - emit_instruction(mi); } static inline void build_addiu_a0(unsigned long offset) { - union mips_instruction mi; - - BUG_ON(offset > 0x7fff); - - mi.i_format.opcode = cpu_has_64bit_gp_regs ? daddiu_op : addiu_op; - mi.i_format.rs = 4; /* $a0 */ - mi.i_format.rt = 4; /* $a0 */ - mi.i_format.simmediate = offset; + build_addiu_rt_rs(4, 4, offset); /* $a0, $a0, offset */ store_offset -= offset; - - emit_instruction(mi); } static inline void build_bne(unsigned int *dest) diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 511107f92d9c..f8925ba0b39e 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -6,7 +6,7 @@ * Synthesize TLB refill handlers at runtime. * * Copyright (C) 2004,2005,2006 by Thiemo Seufer - * Copyright (C) 2005 Maciej W. Rozycki + * Copyright (C) 2005, 2007 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) * * ... and the days got worse and worse and now you see @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -293,7 +294,7 @@ static void __init build_insn(u32 **buf, enum opcode opc, ...) break; } - if (!ip) + if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) panic("Unsupported TLB synthesizer instruction %d", opc); op = ip->match; @@ -525,23 +526,33 @@ L_LA(_r3000_write_probe_fail) #define i_ssnop(buf) i_sll(buf, 0, 0, 1) #define i_ehb(buf) i_sll(buf, 0, 0, 3) -#ifdef CONFIG_64BIT static __init int __maybe_unused in_compat_space_p(long addr) { /* Is this address in 32bit compat space? */ +#ifdef CONFIG_64BIT return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); +#else + return 1; +#endif } static __init int __maybe_unused rel_highest(long val) { +#ifdef CONFIG_64BIT return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; +#else + return 0; +#endif } static __init int __maybe_unused rel_higher(long val) { +#ifdef CONFIG_64BIT return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; -} +#else + return 0; #endif +} static __init int rel_hi(long val) { @@ -555,7 +566,6 @@ static __init int rel_lo(long val) static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr) { -#ifdef CONFIG_64BIT if (!in_compat_space_p(addr)) { i_lui(buf, rs, rel_highest(addr)); if (rel_higher(addr)) @@ -567,16 +577,18 @@ static __init void i_LA_mostly(u32 **buf, unsigned int rs, long addr) } else i_dsll32(buf, rs, rs, 0); } else -#endif i_lui(buf, rs, rel_hi(addr)); } -static __init void __maybe_unused i_LA(u32 **buf, unsigned int rs, - long addr) +static __init void __maybe_unused i_LA(u32 **buf, unsigned int rs, long addr) { i_LA_mostly(buf, rs, addr); - if (rel_lo(addr)) - i_ADDIU(buf, rs, rs, rel_lo(addr)); + if (rel_lo(addr)) { + if (!in_compat_space_p(addr)) + i_daddiu(buf, rs, rs, rel_lo(addr)); + else + i_addiu(buf, rs, rs, rel_lo(addr)); + } } /* @@ -1085,7 +1097,10 @@ build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r, } else { i_LA_mostly(p, ptr, modd); il_b(p, r, label_vmalloc_done); - i_daddiu(p, ptr, ptr, rel_lo(modd)); + if (in_compat_space_p(modd)) + i_addiu(p, ptr, ptr, rel_lo(modd)); + else + i_daddiu(p, ptr, ptr, rel_lo(modd)); } l_vmalloc(l, *p); @@ -1106,7 +1121,10 @@ build_get_pgd_vmalloc64(u32 **p, struct label **l, struct reloc **r, } else { i_LA_mostly(p, ptr, swpd); il_b(p, r, label_vmalloc_done); - i_daddiu(p, ptr, ptr, rel_lo(swpd)); + if (in_compat_space_p(swpd)) + i_addiu(p, ptr, ptr, rel_lo(swpd)); + else + i_daddiu(p, ptr, ptr, rel_lo(swpd)); } } diff --git a/include/asm-mips/delay.h b/include/asm-mips/delay.h index de5105d05f1e..b0bccd2c4ed5 100644 --- a/include/asm-mips/delay.h +++ b/include/asm-mips/delay.h @@ -28,7 +28,7 @@ static inline void __delay(unsigned long loops) " .set reorder \n" : "=r" (loops) : "0" (loops)); - else if (sizeof(long) == 8) + else if (sizeof(long) == 8 && !DADDI_WAR) __asm__ __volatile__ ( " .set noreorder \n" " .align 3 \n" @@ -37,6 +37,15 @@ static inline void __delay(unsigned long loops) " .set reorder \n" : "=r" (loops) : "0" (loops)); + else if (sizeof(long) == 8 && DADDI_WAR) + __asm__ __volatile__ ( + " .set noreorder \n" + " .align 3 \n" + "1: bnez %0, 1b \n" + " dsubu %0, %2 \n" + " .set reorder \n" + : "=r" (loops) + : "0" (loops), "r" (1)); } diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index fb41a8d76392..051e1af0bb95 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h @@ -6,6 +6,7 @@ * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle * Copyright (C) 1994, 1995, 1996 Paul M. Antoine. * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki */ #ifndef _ASM_STACKFRAME_H #define _ASM_STACKFRAME_H @@ -145,8 +146,16 @@ .set reorder /* Called from user mode, new stack. */ get_saved_sp +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 8: move k0, sp PTR_SUBU sp, k1, PT_SIZE +#else + .set at=k0 +8: PTR_SUBU k1, PT_SIZE + .set noat + move k0, sp + move sp, k1 +#endif LONG_S k0, PT_R29(sp) LONG_S $3, PT_R3(sp) /* diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h index c30c718994c9..66523d610950 100644 --- a/include/asm-mips/uaccess.h +++ b/include/asm-mips/uaccess.h @@ -5,6 +5,7 @@ * * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2007 Maciej W. Rozycki */ #ifndef _ASM_UACCESS_H #define _ASM_UACCESS_H @@ -387,6 +388,12 @@ extern void __put_user_unknown(void); "jal\t" #destination "\n\t" #endif +#ifndef CONFIG_CPU_DADDI_WORKAROUNDS +#define DADDI_SCRATCH "$0" +#else +#define DADDI_SCRATCH "$3" +#endif + extern size_t __copy_user(void *__to, const void *__from, size_t __n); #define __invoke_copy_to_user(to, from, n) \ @@ -403,7 +410,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ - "memory"); \ + DADDI_SCRATCH, "memory"); \ __cu_len_r; \ }) @@ -512,7 +519,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ - "memory"); \ + DADDI_SCRATCH, "memory"); \ __cu_len_r; \ }) @@ -535,7 +542,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ - "memory"); \ + DADDI_SCRATCH, "memory"); \ __cu_len_r; \ }) -- cgit v1.2.3 From adfb8998669a77c7942f220adf1c1fe8ced9deb6 Mon Sep 17 00:00:00 2001 From: Lucas Woods Date: Tue, 6 Nov 2007 07:13:47 +1100 Subject: [MIPS] Remove duplicate includes. Signed-off-by: Lucas Woods Signed-off-by: Ralf Baechle --- arch/mips/kernel/rtlx.c | 1 - arch/mips/kernel/vpe.c | 1 - 2 files changed, 2 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 1ba00c15505b..0233798f7155 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index c06eb812a95e..eed2dc4273e0 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -53,7 +53,6 @@ #include #include #include -#include typedef void *vpe_handle; -- cgit v1.2.3 From 0ab7aefc4d43a6dee26c891b41ef9c7a67d2379b Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 2 Mar 2007 20:42:04 +0000 Subject: [MIPS] MT: Scheduler support for SMT Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 14 ++++++++++++++ arch/mips/kernel/proc.c | 1 + arch/mips/kernel/smp-mt.c | 6 +++++- arch/mips/kernel/smp.c | 30 ++++++++++++++++++++++++++++++ include/asm-mips/cpu-info.h | 5 ++++- include/asm-mips/smp.h | 3 +++ include/asm-mips/topology.h | 16 ++++++++++++++++ 7 files changed, 73 insertions(+), 2 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 11bc17ce0ebf..d5a89f3fdfd3 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1442,6 +1442,7 @@ config MIPS_MT_SMP select MIPS_MT select NR_CPUS_DEFAULT_2 select SMP + select SYS_SUPPORTS_SCHED_SMT if SMP select SYS_SUPPORTS_SMP help This is a kernel model which is also known a VSMP or lately @@ -1468,6 +1469,19 @@ endchoice config MIPS_MT bool +config SCHED_SMT + bool "SMT (multithreading) scheduler support" + depends on SYS_SUPPORTS_SCHED_SMT + default n + help + SMT scheduler support improves the CPU scheduler's decision making + when dealing with MIPS MT enabled cores at a cost of slightly + increased overhead in some places. If unsure say N here. + +config SYS_SUPPORTS_SCHED_SMT + bool + + config SYS_SUPPORTS_MULTITHREADING bool diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 6e6e947cce1e..34dd22838fdb 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -62,6 +62,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ); seq_printf(m, "shadow register sets\t: %d\n", cpu_data[n].srsets); + seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", cpu_has_vce ? "%u" : "not available"); diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 94e210cc6cb6..2ab0b7eeaa7e 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -30,7 +31,6 @@ #include #include #include -#include #include #include #include @@ -223,6 +223,7 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0) void __init plat_smp_setup(void) { unsigned int mvpconf0, ntc, tc, ncpu = 0; + unsigned int nvpe; #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ @@ -242,6 +243,9 @@ void __init plat_smp_setup(void) mvpconf0 = read_c0_mvpconf0(); ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; + nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; + smp_num_siblings = nvpe; + /* we'll always have more TC's than VPE's, so loop setting everything to a sensible state */ for (tc = 0; tc <= ntc; tc++) { diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 63989e9df4f9..335be9bcf0dc 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -56,6 +56,34 @@ EXPORT_SYMBOL(cpu_online_map); extern void __init calibrate_delay(void); extern void cpu_idle(void); +/* Number of TCs (or siblings in Intel speak) per CPU core */ +int smp_num_siblings = 1; +EXPORT_SYMBOL(smp_num_siblings); + +/* representing the TCs (or siblings in Intel speak) of each logical CPU */ +cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(cpu_sibling_map); + +/* representing cpus for which sibling maps can be computed */ +static cpumask_t cpu_sibling_setup_map; + +static inline void set_cpu_sibling_map(int cpu) +{ + int i; + + cpu_set(cpu, cpu_sibling_setup_map); + + if (smp_num_siblings > 1) { + for_each_cpu_mask(i, cpu_sibling_setup_map) { + if (cpu_data[cpu].core == cpu_data[i].core) { + cpu_set(i, cpu_sibling_map[cpu]); + cpu_set(cpu, cpu_sibling_map[i]); + } + } + } else + cpu_set(cpu, cpu_sibling_map[cpu]); +} + /* * First C code run on the secondary CPUs after being started up by * the master. @@ -85,6 +113,7 @@ asmlinkage __cpuinit void start_secondary(void) cpu_data[cpu].udelay_val = loops_per_jiffy; prom_smp_finish(); + set_cpu_sibling_map(cpu); cpu_set(cpu, cpu_callin_map); @@ -258,6 +287,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) init_new_context(current, &init_mm); current_thread_info()->cpu = 0; plat_prepare_cpus(max_cpus); + set_cpu_sibling_map(0); #ifndef CONFIG_HOTPLUG_CPU cpu_present_map = cpu_possible_map; #endif diff --git a/include/asm-mips/cpu-info.h b/include/asm-mips/cpu-info.h index ed5c02c6afbb..0c5a358863f3 100644 --- a/include/asm-mips/cpu-info.h +++ b/include/asm-mips/cpu-info.h @@ -55,6 +55,7 @@ struct cpuinfo_mips { struct cache_desc scache; /* Secondary cache */ struct cache_desc tcache; /* Tertiary/split secondary cache */ int srsets; /* Shadow register sets */ + int core; /* physical core number */ #if defined(CONFIG_MIPS_MT_SMTC) /* * In the MIPS MT "SMTC" model, each TC is considered @@ -63,8 +64,10 @@ struct cpuinfo_mips { * to all TCs within the same VPE. */ int vpe_id; /* Virtual Processor number */ - int tc_id; /* Thread Context number */ #endif /* CONFIG_MIPS_MT */ +#ifdef CONFIG_MIPS_MT_SMTC + int tc_id; /* Thread Context number */ +#endif void *data; /* Additional data */ } __attribute__((aligned(SMP_CACHE_BYTES))); diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h index dc770025a9b0..23265879cee9 100644 --- a/include/asm-mips/smp.h +++ b/include/asm-mips/smp.h @@ -20,6 +20,9 @@ #include #include +extern int smp_num_siblings; +extern cpumask_t cpu_sibling_map[]; + #define raw_smp_processor_id() (current_thread_info()->cpu) /* Map from cpu id to sequential logical cpu number. This will only diff --git a/include/asm-mips/topology.h b/include/asm-mips/topology.h index 0440fb9f2180..259145e07e97 100644 --- a/include/asm-mips/topology.h +++ b/include/asm-mips/topology.h @@ -1 +1,17 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2007 by Ralf Baechle + */ +#ifndef __ASM_TOPOLOGY_H +#define __ASM_TOPOLOGY_H + #include + +#ifdef CONFIG_SMP +#define smt_capable() (smp_num_siblings > 1) +#endif + +#endif /* __ASM_TOPOLOGY_H */ -- cgit v1.2.3 From 19388fb092d89e179575bd0b44f51b57e175edf5 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 29 Jan 2008 10:14:57 +0000 Subject: [MIPS] Cleanup pcspeaker platform device registration. Move registration into the actual platform code instead of making a desparate attempt at sharing the hand full of likes of code in pcspeaker.c. Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 6 ------ arch/mips/jazz/setup.c | 7 +++++++ arch/mips/kernel/pcspeaker.c | 28 ---------------------------- arch/mips/qemu/q-setup.c | 15 +++++++++++++++ arch/mips/sni/pcit.c | 7 +++++++ 5 files changed, 29 insertions(+), 34 deletions(-) delete mode 100644 arch/mips/kernel/pcspeaker.c (limited to 'arch/mips/kernel') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index d5a89f3fdfd3..8cbdfd255670 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -132,7 +132,6 @@ config MACH_JAZZ select I8253 select I8259 select ISA - select PCSPEAKER select SYS_HAS_CPU_R4X00 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL @@ -378,7 +377,6 @@ config QEMU select I8259 select IRQ_CPU select ISA - select PCSPEAKER select SWAP_IO_SPACE select SYS_HAS_CPU_MIPS32_R1 select SYS_HAS_EARLY_PRINTK @@ -590,7 +588,6 @@ config SNI_RM select I8253 select I8259 select ISA - select PCSPEAKER select SWAP_IO_SPACE if CPU_BIG_ENDIAN select SYS_HAS_CPU_R4X00 select SYS_HAS_CPU_R5000 @@ -1995,9 +1992,6 @@ config MMU config I8253 bool -config PCSPEAKER - bool - config ZONE_DMA32 bool diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c index a7857973ca03..a7947199c99b 100644 --- a/arch/mips/jazz/setup.c +++ b/arch/mips/jazz/setup.c @@ -200,12 +200,19 @@ static struct platform_device jazz_cmos_pdev = { .resource = jazz_cmos_rsrc }; +static struct platform_device pcspeaker_pdev = { + .name = "pcspkr", + .id = -1, +}; + static int __init jazz_setup_devinit(void) { platform_device_register(&jazz_serial8250_device); platform_device_register(&jazz_esp_pdev); platform_device_register(&jazz_sonic_pdev); platform_device_register(&jazz_cmos_pdev); + platform_device_register(&pcspeaker_pdev); + return 0; } diff --git a/arch/mips/kernel/pcspeaker.c b/arch/mips/kernel/pcspeaker.c deleted file mode 100644 index 475df6904219..000000000000 --- a/arch/mips/kernel/pcspeaker.c +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (C) 2006 IBM Corporation - * - * Implements device information for i8253 timer chip - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation - */ - -#include - -static __init int add_pcspkr(void) -{ - struct platform_device *pd; - int ret; - - pd = platform_device_alloc("pcspkr", -1); - if (!pd) - return -ENOMEM; - - ret = platform_device_add(pd); - if (ret) - platform_device_put(pd); - - return ret; -} -device_initcall(add_pcspkr); diff --git a/arch/mips/qemu/q-setup.c b/arch/mips/qemu/q-setup.c index 969cedc8d8b9..9f0c2d3c67ea 100644 --- a/arch/mips/qemu/q-setup.c +++ b/arch/mips/qemu/q-setup.c @@ -1,4 +1,5 @@ #include +#include #include #include @@ -20,3 +21,17 @@ void __init plat_mem_setup(void) { qemu_reboot_setup(); } + +static struct platform_device pcspeaker_pdev = { + .name = "pcspkr", + .id = -1, +}; + +static int __init qemu_platform_devinit(void) +{ + platform_device_register(&pcspeaker_pdev); + + return 0; +} + +device_initcall(qemu_platform_devinit); diff --git a/arch/mips/sni/pcit.c b/arch/mips/sni/pcit.c index 416f397c768b..e5f12cf96e8e 100644 --- a/arch/mips/sni/pcit.c +++ b/arch/mips/sni/pcit.c @@ -76,6 +76,11 @@ static struct platform_device pcit_cmos_device = { .resource = pcit_cmos_rsrc }; +static struct platform_device pcit_pcspeaker_pdev = { + .name = "pcspkr", + .id = -1, +}; + static struct resource sni_io_resource = { .start = 0x00000000UL, .end = 0x03bfffffUL, @@ -277,11 +282,13 @@ static int __init snirm_pcit_setup_devinit(void) case SNI_BRD_PCI_TOWER: platform_device_register(&pcit_serial8250_device); platform_device_register(&pcit_cmos_device); + platform_device_register(&pcit_pcspeaker_pdev); break; case SNI_BRD_PCI_TOWER_CPLUS: platform_device_register(&pcit_cplus_serial8250_device); platform_device_register(&pcit_cmos_device); + platform_device_register(&pcit_pcspeaker_pdev); break; } return 0; -- cgit v1.2.3 From 87353d8ac39c52784da605ecbe965ecdfad609ad Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Mon, 19 Nov 2007 12:23:51 +0000 Subject: [MIPS] SMP: Call platform methods via ops structure. Signed-off-by: Ralf Baechle --- arch/mips/Kconfig | 5 + arch/mips/fw/arc/init.c | 8 ++ arch/mips/kernel/mips-mt.c | 1 - arch/mips/kernel/setup.c | 3 +- arch/mips/kernel/smp-mt.c | 193 +++++++++++++++++-------------- arch/mips/kernel/smp.c | 23 ++-- arch/mips/kernel/smtc-proc.c | 1 - arch/mips/kernel/smtc.c | 1 - arch/mips/mips-boards/generic/init.c | 8 ++ arch/mips/mips-boards/malta/malta_smtc.c | 66 ++++++----- arch/mips/mipssim/Makefile | 2 +- arch/mips/mipssim/sim_setup.c | 16 ++- arch/mips/mipssim/sim_smp.c | 123 -------------------- arch/mips/mipssim/sim_smtc.c | 117 +++++++++++++++++++ arch/mips/pmc-sierra/yosemite/prom.c | 5 + arch/mips/pmc-sierra/yosemite/smp.c | 149 +++++++++++++----------- arch/mips/qemu/q-smp.c | 40 ++++--- arch/mips/sgi-ip27/ip27-init.c | 1 - arch/mips/sgi-ip27/ip27-klnuma.c | 1 - arch/mips/sgi-ip27/ip27-smp.c | 109 ++++++++++------- arch/mips/sibyte/bcm1480/smp.c | 101 ++++++++++++++-- arch/mips/sibyte/cfe/Makefile | 1 - arch/mips/sibyte/cfe/setup.c | 11 ++ arch/mips/sibyte/cfe/smp.c | 110 ------------------ arch/mips/sibyte/sb1250/smp.c | 100 ++++++++++++++-- include/asm-mips/sibyte/sb1250.h | 2 - include/asm-mips/smp-ops.h | 56 +++++++++ include/asm-mips/smp.h | 61 +--------- 28 files changed, 755 insertions(+), 559 deletions(-) delete mode 100644 arch/mips/mipssim/sim_smp.c create mode 100644 arch/mips/mipssim/sim_smtc.c delete mode 100644 arch/mips/sibyte/cfe/smp.c create mode 100644 include/asm-mips/smp-ops.h (limited to 'arch/mips/kernel') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 8cbdfd255670..b211e7961f28 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1441,6 +1441,7 @@ config MIPS_MT_SMP select SMP select SYS_SUPPORTS_SCHED_SMT if SMP select SYS_SUPPORTS_SMP + select SMP_UP help This is a kernel model which is also known a VSMP or lately has been marketesed into SMVP. @@ -1457,6 +1458,7 @@ config MIPS_MT_SMTC select NR_CPUS_DEFAULT_8 select SMP select SYS_SUPPORTS_SMP + select SMP_UP help This is a kernel model which is known a SMTC or lately has been marketesed into SMVP. @@ -1735,6 +1737,9 @@ config SMP If you don't know what to do here, say N. +config SMP_UP + bool + config SYS_SUPPORTS_SMP bool diff --git a/arch/mips/fw/arc/init.c b/arch/mips/fw/arc/init.c index e2f75b13312f..3ad8788b6eaa 100644 --- a/arch/mips/fw/arc/init.c +++ b/arch/mips/fw/arc/init.c @@ -12,6 +12,7 @@ #include #include +#include #undef DEBUG_PROM_INIT @@ -48,4 +49,11 @@ void __init prom_init(void) ArcRead(0, &c, 1, &cnt); ArcEnterInteractiveMode(); #endif +#ifdef CONFIG_SGI_IP27 + { + extern struct plat_smp_ops ip27_smp_ops; + + register_smp_ops(&ip27_smp_ops); + } +#endif } diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 3d6b1ec1f328..640fb0cc6e39 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 7b4418dd5857..269c252d956f 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -29,6 +29,7 @@ #include #include #include +#include #include struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; @@ -575,9 +576,7 @@ void __init setup_arch(char **cmdline_p) arch_mem_init(cmdline_p); resource_init(); -#ifdef CONFIG_SMP plat_smp_setup(); -#endif } static int __init fpu_disable(char *s) diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index 2ab0b7eeaa7e..89e6f6aa5166 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -215,72 +215,67 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0) write_tc_c0_tchalt(TCHALT_H); } -/* - * Common setup before any secondaries are started - * Make sure all CPU's are in a sensible state before we boot any of the - * secondarys - */ -void __init plat_smp_setup(void) +static void vsmp_send_ipi_single(int cpu, unsigned int action) { - unsigned int mvpconf0, ntc, tc, ncpu = 0; - unsigned int nvpe; + int i; + unsigned long flags; + int vpflags; -#ifdef CONFIG_MIPS_MT_FPAFF - /* If we have an FPU, enroll ourselves in the FPU-full mask */ - if (cpu_has_fpu) - cpu_set(0, mt_fpu_cpumask); -#endif /* CONFIG_MIPS_MT_FPAFF */ - if (!cpu_has_mipsmt) - return; + local_irq_save(flags); - /* disable MT so we can configure */ - dvpe(); - dmt(); + vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ - /* Put MVPE's into 'configuration state' */ - set_c0_mvpcontrol(MVPCONTROL_VPC); + switch (action) { + case SMP_CALL_FUNCTION: + i = C_SW1; + break; - mvpconf0 = read_c0_mvpconf0(); - ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; + case SMP_RESCHEDULE_YOURSELF: + default: + i = C_SW0; + break; + } - nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; - smp_num_siblings = nvpe; + /* 1:1 mapping of vpe and tc... */ + settc(cpu); + write_vpe_c0_cause(read_vpe_c0_cause() | i); + evpe(vpflags); - /* we'll always have more TC's than VPE's, so loop setting everything - to a sensible state */ - for (tc = 0; tc <= ntc; tc++) { - settc(tc); + local_irq_restore(flags); +} - smp_tc_init(tc, mvpconf0); - ncpu = smp_vpe_init(tc, mvpconf0, ncpu); - } +static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action) +{ + unsigned int i; - /* Release config state */ - clear_c0_mvpcontrol(MVPCONTROL_VPC); + for_each_cpu_mask(i, mask) + vsmp_send_ipi_single(i, action); +} - /* We'll wait until starting the secondaries before starting MVPE */ +static void __cpuinit vsmp_init_secondary(void) +{ + /* Enable per-cpu interrupts */ - printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); + /* This is Malta specific: IPI,performance and timer inetrrupts */ + write_c0_status((read_c0_status() & ~ST0_IM ) | + (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7)); } -void __init plat_prepare_cpus(unsigned int max_cpus) +static void __cpuinit vsmp_smp_finish(void) { - mips_mt_set_cpuoptions(); - - /* set up ipi interrupts */ - if (cpu_has_vint) { - set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); - set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); - } + write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); - cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ; - cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ; +#ifdef CONFIG_MIPS_MT_FPAFF + /* If we have an FPU, enroll ourselves in the FPU-full mask */ + if (cpu_has_fpu) + cpu_set(smp_processor_id(), mt_fpu_cpumask); +#endif /* CONFIG_MIPS_MT_FPAFF */ - setup_irq(cpu_ipi_resched_irq, &irq_resched); - setup_irq(cpu_ipi_call_irq, &irq_call); + local_irq_enable(); +} - set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); - set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); +static void vsmp_cpus_done(void) +{ } /* @@ -291,7 +286,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus) * (unsigned long)idle->thread_info the gp * assumes a 1:1 mapping of TC => VPE */ -void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) +static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle) { struct thread_info *gp = task_thread_info(idle); dvpe(); @@ -325,57 +320,81 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) evpe(EVPE_ENABLE); } -void __cpuinit prom_init_secondary(void) -{ - /* Enable per-cpu interrupts */ - - /* This is Malta specific: IPI,performance and timer inetrrupts */ - write_c0_status((read_c0_status() & ~ST0_IM ) | - (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7)); -} - -void __cpuinit prom_smp_finish(void) +/* + * Common setup before any secondaries are started + * Make sure all CPU's are in a sensible state before we boot any of the + * secondarys + */ +static void __init vsmp_smp_setup(void) { - write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); + unsigned int mvpconf0, ntc, tc, ncpu = 0; + unsigned int nvpe; #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ if (cpu_has_fpu) - cpu_set(smp_processor_id(), mt_fpu_cpumask); + cpu_set(0, mt_fpu_cpumask); #endif /* CONFIG_MIPS_MT_FPAFF */ + if (!cpu_has_mipsmt) + return; - local_irq_enable(); -} + /* disable MT so we can configure */ + dvpe(); + dmt(); -void prom_cpus_done(void) -{ -} + /* Put MVPE's into 'configuration state' */ + set_c0_mvpcontrol(MVPCONTROL_VPC); -void core_send_ipi(int cpu, unsigned int action) -{ - int i; - unsigned long flags; - int vpflags; + mvpconf0 = read_c0_mvpconf0(); + ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; - local_irq_save(flags); + nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; + smp_num_siblings = nvpe; - vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ + /* we'll always have more TC's than VPE's, so loop setting everything + to a sensible state */ + for (tc = 0; tc <= ntc; tc++) { + settc(tc); - switch (action) { - case SMP_CALL_FUNCTION: - i = C_SW1; - break; + smp_tc_init(tc, mvpconf0); + ncpu = smp_vpe_init(tc, mvpconf0, ncpu); + } - case SMP_RESCHEDULE_YOURSELF: - default: - i = C_SW0; - break; + /* Release config state */ + clear_c0_mvpcontrol(MVPCONTROL_VPC); + + /* We'll wait until starting the secondaries before starting MVPE */ + + printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); +} + +static void __init vsmp_prepare_cpus(unsigned int max_cpus) +{ + mips_mt_set_cpuoptions(); + + /* set up ipi interrupts */ + if (cpu_has_vint) { + set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch); + set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch); } - /* 1:1 mapping of vpe and tc... */ - settc(cpu); - write_vpe_c0_cause(read_vpe_c0_cause() | i); - evpe(vpflags); + cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ; + cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ; - local_irq_restore(flags); + setup_irq(cpu_ipi_resched_irq, &irq_resched); + setup_irq(cpu_ipi_call_irq, &irq_call); + + set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); + set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); } + +struct plat_smp_ops vsmp_smp_ops = { + .send_ipi_single = vsmp_send_ipi_single, + .send_ipi_mask = vsmp_send_ipi_mask, + .init_secondary = vsmp_init_secondary, + .smp_finish = vsmp_smp_finish, + .cpus_done = vsmp_cpus_done, + .boot_secondary = vsmp_boot_secondary, + .smp_setup = vsmp_smp_setup, + .prepare_cpus = vsmp_prepare_cpus, +}; diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 335be9bcf0dc..1e5dfc28294a 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #ifdef CONFIG_MIPS_MT_SMTC @@ -84,6 +83,16 @@ static inline void set_cpu_sibling_map(int cpu) cpu_set(cpu, cpu_sibling_map[cpu]); } +struct plat_smp_ops *mp_ops; + +__cpuinit void register_smp_ops(struct plat_smp_ops *ops) +{ + if (ops) + printk(KERN_WARNING "Overriding previous set SMP ops\n"); + + mp_ops = ops; +} + /* * First C code run on the secondary CPUs after being started up by * the master. @@ -100,7 +109,7 @@ asmlinkage __cpuinit void start_secondary(void) cpu_report(); per_cpu_trap_init(); mips_clockevent_init(); - prom_init_secondary(); + mp_ops->init_secondary(); /* * XXX parity protection should be folded in here when it's converted @@ -112,7 +121,7 @@ asmlinkage __cpuinit void start_secondary(void) cpu = smp_processor_id(); cpu_data[cpu].udelay_val = loops_per_jiffy; - prom_smp_finish(); + mp_ops->smp_finish(); set_cpu_sibling_map(cpu); cpu_set(cpu, cpu_callin_map); @@ -184,7 +193,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), smp_mb(); /* Send a message to all other CPUs and wait for them to respond */ - core_send_ipi_mask(mask, SMP_CALL_FUNCTION); + mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); /* Wait for response */ /* FIXME: lock-up detection, backtrace on lock-up */ @@ -278,7 +287,7 @@ void smp_send_stop(void) void __init smp_cpus_done(unsigned int max_cpus) { - prom_cpus_done(); + mp_ops->cpus_done(); } /* called from main before smp_init() */ @@ -286,7 +295,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { init_new_context(current, &init_mm); current_thread_info()->cpu = 0; - plat_prepare_cpus(max_cpus); + mp_ops->prepare_cpus(max_cpus); set_cpu_sibling_map(0); #ifndef CONFIG_HOTPLUG_CPU cpu_present_map = cpu_possible_map; @@ -325,7 +334,7 @@ int __cpuinit __cpu_up(unsigned int cpu) if (IS_ERR(idle)) panic(KERN_ERR "Fork failed for CPU %d", cpu); - prom_boot_secondary(cpu, idle); + mp_ops->boot_secondary(cpu, idle); /* * Trust is futile. We should really have timeouts ... diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c index 6f3709996172..fe256559c997 100644 --- a/arch/mips/kernel/smtc-proc.c +++ b/arch/mips/kernel/smtc-proc.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 9c92d42996cb..85f700e58131 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/mips-boards/generic/init.c b/arch/mips/mips-boards/generic/init.c index 30f1f54cb68b..1695dca5506b 100644 --- a/arch/mips/mips-boards/generic/init.c +++ b/arch/mips/mips-boards/generic/init.c @@ -250,6 +250,8 @@ void __init mips_ejtag_setup(void) flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); } +extern struct plat_smp_ops msmtc_smp_ops; + void __init prom_init(void) { prom_argc = fw_arg0; @@ -416,4 +418,10 @@ void __init prom_init(void) #ifdef CONFIG_SERIAL_8250_CONSOLE console_config(); #endif +#ifdef CONFIG_MIPS_MT_SMP + register_smp_ops(&vsmp_smp_ops); +#endif +#ifdef CONFIG_MIPS_MT_SMTC + register_smp_ops(&msmtc_smp_ops); +#endif } diff --git a/arch/mips/mips-boards/malta/malta_smtc.c b/arch/mips/mips-boards/malta/malta_smtc.c index 5c980f4a48fe..6f051ca243fa 100644 --- a/arch/mips/mips-boards/malta/malta_smtc.c +++ b/arch/mips/mips-boards/malta/malta_smtc.c @@ -15,26 +15,24 @@ * Cause the specified action to be performed on a targeted "CPU" */ -void core_send_ipi(int cpu, unsigned int action) +static void msmtc_send_ipi_single(int cpu, unsigned int action) { /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ smtc_send_ipi(cpu, LINUX_SMP_IPI, action); } -/* - * Platform "CPU" startup hook - */ - -void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) +static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action) { - smtc_boot_secondary(cpu, idle); + unsigned int i; + + for_each_cpu_mask(i, mask) + msmtc_send_ipi_single(i, action); } /* * Post-config but pre-boot cleanup entry point */ - -void __cpuinit prom_init_secondary(void) +static void __cpuinit msmtc_init_secondary(void) { void smtc_init_secondary(void); int myvpe; @@ -50,45 +48,61 @@ void __cpuinit prom_init_secondary(void) set_c0_status(0x100 << cp0_perfcount_irq); } - smtc_init_secondary(); + smtc_init_secondary(); } /* - * Platform SMP pre-initialization - * - * As noted above, we can assume a single CPU for now - * but it may be multithreaded. + * Platform "CPU" startup hook */ - -void __cpuinit plat_smp_setup(void) +static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle) { - if (read_c0_config3() & (1<<2)) - mipsmt_build_cpu_map(0); + smtc_boot_secondary(cpu, idle); } -void __init plat_prepare_cpus(unsigned int max_cpus) +/* + * SMP initialization finalization entry point + */ +static void __cpuinit msmtc_smp_finish(void) { - if (read_c0_config3() & (1<<2)) - mipsmt_prepare_cpus(); + smtc_smp_finish(); } /* - * SMP initialization finalization entry point + * Hook for after all CPUs are online */ -void __cpuinit prom_smp_finish(void) +static void msmtc_cpus_done(void) { - smtc_smp_finish(); } /* - * Hook for after all CPUs are online + * Platform SMP pre-initialization + * + * As noted above, we can assume a single CPU for now + * but it may be multithreaded. */ -void prom_cpus_done(void) +static void __init msmtc_smp_setup(void) { + mipsmt_build_cpu_map(0); } +static void __init msmtc_prepare_cpus(unsigned int max_cpus) +{ + mipsmt_prepare_cpus(); +} + +struct plat_smp_ops msmtc_smp_ops = { + .send_ipi_single = msmtc_send_ipi_single, + .send_ipi_mask = msmtc_send_ipi_mask, + .init_secondary = msmtc_init_secondary, + .smp_finish = msmtc_smp_finish, + .cpus_done = msmtc_cpus_done, + .boot_secondary = msmtc_boot_secondary, + .smp_setup = msmtc_smp_setup, + .prepare_cpus = msmtc_prepare_cpus, +}; + #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF /* * IRQ affinity hook diff --git a/arch/mips/mipssim/Makefile b/arch/mips/mipssim/Makefile index 75568b584df4..57f43c1c7882 100644 --- a/arch/mips/mipssim/Makefile +++ b/arch/mips/mipssim/Makefile @@ -21,6 +21,6 @@ obj-y := sim_platform.o sim_setup.o sim_mem.o sim_time.o sim_int.o \ sim_cmdline.o obj-$(CONFIG_EARLY_PRINTK) += sim_console.o -obj-$(CONFIG_SMP) += sim_smp.o +obj-$(CONFIG_MIPS_MT_SMTC) += sim_smtc.o EXTRA_CFLAGS += -Werror diff --git a/arch/mips/mipssim/sim_setup.c b/arch/mips/mipssim/sim_setup.c index 452c129d02c1..d49fe73426b7 100644 --- a/arch/mips/mipssim/sim_setup.c +++ b/arch/mips/mipssim/sim_setup.c @@ -60,6 +60,8 @@ void __init plat_mem_setup(void) #endif } +extern struct plat_smp_ops ssmtc_smp_ops; + void __init prom_init(void) { set_io_port_base(0xbfd00000); @@ -67,8 +69,20 @@ void __init prom_init(void) pr_info("\nLINUX started...\n"); prom_init_cmdline(); prom_meminit(); -} +#ifdef CONFIG_MIPS_MT_SMP + if (cpu_has_mipsmt) + register_smp_ops(&vsmp_smp_ops); + else + register_smp_ops(&up_smp_ops); +#endif +#ifdef CONFIG_MIPS_MT_SMTC + if (cpu_has_mipsmt) + register_smp_ops(&ssmtc_smp_ops); + else + register_smp_ops(&up_smp_ops); +#endif +} static void __init serial_init(void) { diff --git a/arch/mips/mipssim/sim_smp.c b/arch/mips/mipssim/sim_smp.c deleted file mode 100644 index ccbbccac23ef..000000000000 --- a/arch/mips/mipssim/sim_smp.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - */ -/* - * Simulator Platform-specific hooks for SMP operation - */ -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#ifdef CONFIG_MIPS_MT_SMTC -#include -#endif /* CONFIG_MIPS_MT_SMTC */ - -/* VPE/SMP Prototype implements platform interfaces directly */ -#if !defined(CONFIG_MIPS_MT_SMP) - -/* - * Cause the specified action to be performed on a targeted "CPU" - */ - -void core_send_ipi(int cpu, unsigned int action) -{ -#ifdef CONFIG_MIPS_MT_SMTC - smtc_send_ipi(cpu, LINUX_SMP_IPI, action); -#endif /* CONFIG_MIPS_MT_SMTC */ -/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ - -} - -/* - * Platform "CPU" startup hook - */ - -void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) -{ -#ifdef CONFIG_MIPS_MT_SMTC - smtc_boot_secondary(cpu, idle); -#endif /* CONFIG_MIPS_MT_SMTC */ -} - -/* - * Post-config but pre-boot cleanup entry point - */ - -void __cpuinit prom_init_secondary(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - void smtc_init_secondary(void); - - smtc_init_secondary(); -#endif /* CONFIG_MIPS_MT_SMTC */ -} - -void plat_smp_setup(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - if (read_c0_config3() & (1 << 2)) - mipsmt_build_cpu_map(0); -#endif /* CONFIG_MIPS_MT_SMTC */ -} - -/* - * Platform SMP pre-initialization - */ - -void plat_prepare_cpus(unsigned int max_cpus) -{ -#ifdef CONFIG_MIPS_MT_SMTC - /* - * As noted above, we can assume a single CPU for now - * but it may be multithreaded. - */ - - if (read_c0_config3() & (1 << 2)) { - mipsmt_prepare_cpus(); - } -#endif /* CONFIG_MIPS_MT_SMTC */ -} - -/* - * SMP initialization finalization entry point - */ - -void __cpuinit prom_smp_finish(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - smtc_smp_finish(); -#endif /* CONFIG_MIPS_MT_SMTC */ -} - -/* - * Hook for after all CPUs are online - */ - -void prom_cpus_done(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - -#endif /* CONFIG_MIPS_MT_SMTC */ -} -#endif /* CONFIG_MIPS32R2_MT_SMP */ diff --git a/arch/mips/mipssim/sim_smtc.c b/arch/mips/mipssim/sim_smtc.c new file mode 100644 index 000000000000..d6e4f656ad14 --- /dev/null +++ b/arch/mips/mipssim/sim_smtc.c @@ -0,0 +1,117 @@ +/* + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. + * + * This program is free software; you can distribute it and/or modify it + * under the terms of the GNU General Public License (Version 2) as + * published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * + */ +/* + * Simulator Platform-specific hooks for SMTC operation + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +/* VPE/SMP Prototype implements platform interfaces directly */ + +/* + * Cause the specified action to be performed on a targeted "CPU" + */ + +static void ssmtc_send_ipi_single(int cpu, unsigned int action) +{ + smtc_send_ipi(cpu, LINUX_SMP_IPI, action); + /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ +} + +static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action) +{ + unsigned int i; + + for_each_cpu_mask(i, mask) + ssmtc_send_ipi_single(i, action); +} + +/* + * Post-config but pre-boot cleanup entry point + */ +static void __cpuinit ssmtc_init_secondary(void) +{ + void smtc_init_secondary(void); + + smtc_init_secondary(); +} + +/* + * SMP initialization finalization entry point + */ +static void __cpuinit ssmtc_smp_finish(void) +{ + smtc_smp_finish(); +} + +/* + * Hook for after all CPUs are online + */ +static void ssmtc_cpus_done(void) +{ +} + +/* + * Platform "CPU" startup hook + */ +static void __cpuinit ssmtc_boot_secondary(int cpu, struct task_struct *idle) +{ + smtc_boot_secondary(cpu, idle); +} + +static void __init ssmtc_smp_setup(void) +{ + if (read_c0_config3() & (1 << 2)) + mipsmt_build_cpu_map(0); +} + +/* + * Platform SMP pre-initialization + */ +static void ssmtc_prepare_cpus(unsigned int max_cpus) +{ + /* + * As noted above, we can assume a single CPU for now + * but it may be multithreaded. + */ + + if (read_c0_config3() & (1 << 2)) { + mipsmt_prepare_cpus(); + } +} + +struct plat_smp_ops ssmtc_smp_ops = { + .send_ipi_single = ssmtc_send_ipi_single, + .send_ipi_mask = ssmtc_send_ipi_mask, + .init_secondary = ssmtc_init_secondary, + .smp_finish = ssmtc_smp_finish, + .cpus_done = ssmtc_cpus_done, + .boot_secondary = ssmtc_boot_secondary, + .smp_setup = ssmtc_smp_setup, + .prepare_cpus = ssmtc_prepare_cpus, +}; diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c index 96d3ff051d3d..35dc435846a6 100644 --- a/arch/mips/pmc-sierra/yosemite/prom.c +++ b/arch/mips/pmc-sierra/yosemite/prom.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -78,6 +79,8 @@ static void prom_halt(void) __asm__(".set\tmips3\n\t" "wait\n\t" ".set\tmips0"); } +extern struct plat_smp_ops yos_smp_ops; + /* * Init routine which accepts the variables from PMON */ @@ -127,6 +130,8 @@ void __init prom_init(void) } prom_grab_secondary(); + + register_smp_ops(&yos_smp_ops); } void __init prom_free_prom_memory(void) diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c index b0f12cd2968a..653f3ec61cab 100644 --- a/arch/mips/pmc-sierra/yosemite/smp.c +++ b/arch/mips/pmc-sierra/yosemite/smp.c @@ -42,70 +42,6 @@ void __init prom_grab_secondary(void) launchstack + LAUNCHSTACK_SIZE, 0); } -/* - * Detect available CPUs, populate phys_cpu_present_map before smp_init - * - * We don't want to start the secondary CPU yet nor do we have a nice probing - * feature in PMON so we just assume presence of the secondary core. - */ -void __init plat_smp_setup(void) -{ - int i; - - cpus_clear(phys_cpu_present_map); - - for (i = 0; i < 2; i++) { - cpu_set(i, phys_cpu_present_map); - __cpu_number_map[i] = i; - __cpu_logical_map[i] = i; - } -} - -void __init plat_prepare_cpus(unsigned int max_cpus) -{ - /* - * Be paranoid. Enable the IPI only if we're really about to go SMP. - */ - if (cpus_weight(cpu_possible_map)) - set_c0_status(STATUSF_IP5); -} - -/* - * Firmware CPU startup hook - * Complicated by PMON's weird interface which tries to minimic the UNIX fork. - * It launches the next * available CPU and copies some information on the - * stack so the first thing we do is throw away that stuff and load useful - * values into the registers ... - */ -void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) -{ - unsigned long gp = (unsigned long) task_thread_info(idle); - unsigned long sp = __KSTK_TOS(idle); - - secondary_sp = sp; - secondary_gp = gp; - - spin_unlock(&launch_lock); -} - -/* Hook for after all CPUs are online */ -void prom_cpus_done(void) -{ -} - -/* - * After we've done initial boot, this function is called to allow the - * board code to clean up state, if needed - */ -void __cpuinit prom_init_secondary(void) -{ - set_c0_status(ST0_CO | ST0_IE | ST0_IM); -} - -void __cpuinit prom_smp_finish(void) -{ -} - void titan_mailbox_irq(void) { int cpu = smp_processor_id(); @@ -133,7 +69,7 @@ void titan_mailbox_irq(void) /* * Send inter-processor interrupt */ -void core_send_ipi(int cpu, unsigned int action) +static void yos_send_ipi_single(int cpu, unsigned int action) { /* * Generate an INTMSG so that it can be sent over to the @@ -159,3 +95,86 @@ void core_send_ipi(int cpu, unsigned int action) break; } } + +static void yos_send_ipi_mask(cpumask_t mask, unsigned int action) +{ + unsigned int i; + + for_each_cpu_mask(i, mask) + yos_send_ipi_single(i, action); +} + +/* + * After we've done initial boot, this function is called to allow the + * board code to clean up state, if needed + */ +static void __cpuinit yos_init_secondary(void) +{ + set_c0_status(ST0_CO | ST0_IE | ST0_IM); +} + +static void __cpuinit yos_smp_finish(void) +{ +} + +/* Hook for after all CPUs are online */ +static void yos_cpus_done(void) +{ +} + +/* + * Firmware CPU startup hook + * Complicated by PMON's weird interface which tries to minimic the UNIX fork. + * It launches the next * available CPU and copies some information on the + * stack so the first thing we do is throw away that stuff and load useful + * values into the registers ... + */ +static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle) +{ + unsigned long gp = (unsigned long) task_thread_info(idle); + unsigned long sp = __KSTK_TOS(idle); + + secondary_sp = sp; + secondary_gp = gp; + + spin_unlock(&launch_lock); +} + +/* + * Detect available CPUs, populate phys_cpu_present_map before smp_init + * + * We don't want to start the secondary CPU yet nor do we have a nice probing + * feature in PMON so we just assume presence of the secondary core. + */ +static void __init yos_smp_setup(void) +{ + int i; + + cpus_clear(phys_cpu_present_map); + + for (i = 0; i < 2; i++) { + cpu_set(i, phys_cpu_present_map); + __cpu_number_map[i] = i; + __cpu_logical_map[i] = i; + } +} + +static void __init yos_prepare_cpus(unsigned int max_cpus) +{ + /* + * Be paranoid. Enable the IPI only if we're really about to go SMP. + */ + if (cpus_weight(cpu_possible_map)) + set_c0_status(STATUSF_IP5); +} + +struct plat_smp_ops yos_smp_ops = { + .send_ipi_single = yos_send_ipi_single, + .send_ipi_mask = yos_send_ipi_mask, + .init_secondary = yos_init_secondary, + .smp_finish = yos_smp_finish, + .cpus_done = yos_cpus_done, + .boot_secondary = yos_boot_secondary, + .smp_setup = yos_smp_setup, + .prepare_cpus = yos_prepare_cpus, +}; diff --git a/arch/mips/qemu/q-smp.c b/arch/mips/qemu/q-smp.c index 4b0178d0df0b..ead6c30eeb14 100644 --- a/arch/mips/qemu/q-smp.c +++ b/arch/mips/qemu/q-smp.c @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2006, 07 by Ralf Baechle (ralf@linux-mips.org) * * Symmetric Uniprocessor (TM) Support */ @@ -13,43 +13,55 @@ /* * Send inter-processor interrupt */ -void core_send_ipi(int cpu, unsigned int action) +void up_send_ipi_single(int cpu, unsigned int action) { - panic(KERN_ERR "%s called", __FUNCTION__); + panic(KERN_ERR "%s called", __func__); +} + +static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action) +{ + panic(KERN_ERR "%s called", __func__); } /* * After we've done initial boot, this function is called to allow the * board code to clean up state, if needed */ -void __cpuinit prom_init_secondary(void) +void __cpuinit up_init_secondary(void) { } -void __cpuinit prom_smp_finish(void) +void __cpuinit up_smp_finish(void) { } /* Hook for after all CPUs are online */ -void prom_cpus_done(void) -{ -} - -void __init prom_prepare_cpus(unsigned int max_cpus) +void up_cpus_done(void) { - cpus_clear(phys_cpu_present_map); } /* * Firmware CPU startup hook */ -void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) +void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle) { } -void __init plat_smp_setup(void) +void __init up_smp_setup(void) { } -void __init plat_prepare_cpus(unsigned int max_cpus) + +void __init up_prepare_cpus(unsigned int max_cpus) { } + +struct plat_smp_ops up_smp_ops = { + .send_ipi_single = up_send_ipi_single, + .send_ipi_mask = up_send_ipi_mask, + .init_secondary = up_init_secondary, + .smp_finish = up_smp_finish, + .cpus_done = up_cpus_done, + .boot_secondary = up_boot_secondary, + .smp_setup = up_smp_setup, + .prepare_cpus = up_prepare_cpus, +}; diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c index 3305fa9ae66d..a49e7c85f724 100644 --- a/arch/mips/sgi-ip27/ip27-init.c +++ b/arch/mips/sgi-ip27/ip27-init.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c index f10d9839006d..48932ce1d730 100644 --- a/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/arch/mips/sgi-ip27/ip27-klnuma.c @@ -11,7 +11,6 @@ #include #include -#include #include #include #include diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index a70656d42191..f15fc93d6b35 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -140,30 +140,51 @@ static __init void intr_clear_all(nasid_t nasid) REMOTE_HUB_CLR_INTR(nasid, i); } -void __init plat_smp_setup(void) +static void ip27_send_ipi_single(int destid, unsigned int action) { - cnodeid_t cnode; + int irq; - for_each_online_node(cnode) { - if (cnode == 0) - continue; - intr_clear_all(COMPACT_TO_NASID_NODEID(cnode)); + switch (action) { + case SMP_RESCHEDULE_YOURSELF: + irq = CPU_RESCHED_A_IRQ; + break; + case SMP_CALL_FUNCTION: + irq = CPU_CALL_A_IRQ; + break; + default: + panic("sendintr"); } - replicate_kernel_text(); + irq += cputoslice(destid); /* - * Assumption to be fixed: we're always booted on logical / physical - * processor 0. While we're always running on logical processor 0 - * this still means this is physical processor zero; it might for - * example be disabled in the firwware. + * Convert the compact hub number to the NASID to get the correct + * part of the address space. Then set the interrupt bit associated + * with the CPU we want to send the interrupt to. */ - alloc_cpupda(0, 0); + REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq); } -void __init plat_prepare_cpus(unsigned int max_cpus) +static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action) +{ + unsigned int i; + + for_each_cpu_mask(i, mask) + ip27_send_ipi_single(i, action); +} + +static void __cpuinit ip27_init_secondary(void) +{ + per_cpu_init(); + local_irq_enable(); +} + +static void __cpuinit ip27_smp_finish(void) +{ +} + +static void __init ip27_cpus_done(void) { - /* We already did everything necessary earlier */ } /* @@ -171,7 +192,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus) * set sp to the kernel stack of the newly created idle process, gp to the proc * struct so that current_thread_info() will work. */ -void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) +static void __cpuinit ip27_boot_secondary(int cpu, struct task_struct *idle) { unsigned long gp = (unsigned long)task_thread_info(idle); unsigned long sp = __KSTK_TOS(idle); @@ -181,41 +202,39 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) 0, (void *) sp, (void *) gp); } -void __cpuinit prom_init_secondary(void) +static void __init ip27_smp_setup(void) { - per_cpu_init(); - local_irq_enable(); -} - -void __init prom_cpus_done(void) -{ -} - -void __cpuinit prom_smp_finish(void) -{ -} - -void core_send_ipi(int destid, unsigned int action) -{ - int irq; + cnodeid_t cnode; - switch (action) { - case SMP_RESCHEDULE_YOURSELF: - irq = CPU_RESCHED_A_IRQ; - break; - case SMP_CALL_FUNCTION: - irq = CPU_CALL_A_IRQ; - break; - default: - panic("sendintr"); + for_each_online_node(cnode) { + if (cnode == 0) + continue; + intr_clear_all(COMPACT_TO_NASID_NODEID(cnode)); } - irq += cputoslice(destid); + replicate_kernel_text(); /* - * Convert the compact hub number to the NASID to get the correct - * part of the address space. Then set the interrupt bit associated - * with the CPU we want to send the interrupt to. + * Assumption to be fixed: we're always booted on logical / physical + * processor 0. While we're always running on logical processor 0 + * this still means this is physical processor zero; it might for + * example be disabled in the firwware. */ - REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq); + alloc_cpupda(0, 0); } + +static void __init ip27_prepare_cpus(unsigned int max_cpus) +{ + /* We already did everything necessary earlier */ +} + +struct plat_smp_ops ip27_smp_ops = { + .send_ipi_single = ip27_send_ipi_single, + .send_ipi_mask = ip27_send_ipi_mask, + .init_secondary = ip27_init_secondary, + .smp_finish = ip27_smp_finish, + .cpus_done = ip27_cpus_done, + .boot_secondary = ip27_boot_secondary, + .smp_setup = ip27_smp_setup, + .prepare_cpus = ip27_prepare_cpus, +}; diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c index 436ba78359ab..183c460b9ca1 100644 --- a/arch/mips/sibyte/bcm1480/smp.c +++ b/arch/mips/sibyte/bcm1480/smp.c @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -67,28 +68,114 @@ void __cpuinit bcm1480_smp_init(void) change_c0_status(ST0_IM, imask); } -void __cpuinit bcm1480_smp_finish(void) +/* + * These are routines for dealing with the sb1250 smp capabilities + * independent of board/firmware + */ + +/* + * Simple enough; everything is set up, so just poke the appropriate mailbox + * register, and we should be set + */ +static void bcm1480_send_ipi_single(int cpu, unsigned int action) +{ + __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]); +} + +static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action) +{ + unsigned int i; + + for_each_cpu_mask(i, mask) + bcm1480_send_ipi_single(i, action); +} + +/* + * Code to run on secondary just after probing the CPU + */ +static void __cpuinit bcm1480_init_secondary(void) +{ + extern void bcm1480_smp_init(void); + + bcm1480_smp_init(); +} + +/* + * Do any tidying up before marking online and running the idle + * loop + */ +static void __cpuinit bcm1480_smp_finish(void) { extern void sb1480_clockevent_init(void); sb1480_clockevent_init(); local_irq_enable(); + bcm1480_smp_finish(); } /* - * These are routines for dealing with the sb1250 smp capabilities - * independent of board/firmware + * Final cleanup after all secondaries booted */ +static void bcm1480_cpus_done(void) +{ +} /* - * Simple enough; everything is set up, so just poke the appropriate mailbox - * register, and we should be set + * Setup the PC, SP, and GP of a secondary processor and start it + * running! */ -void core_send_ipi(int cpu, unsigned int action) +static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle) { - __raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]); + int retval; + + retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap, + __KSTK_TOS(idle), + (unsigned long)task_thread_info(idle), 0); + if (retval != 0) + printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval); } +/* + * Use CFE to find out how many CPUs are available, setting up + * phys_cpu_present_map and the logical/physical mappings. + * XXXKW will the boot CPU ever not be physical 0? + * + * Common setup before any secondaries are started + */ +static void __init bcm1480_smp_setup(void) +{ + int i, num; + + cpus_clear(phys_cpu_present_map); + cpu_set(0, phys_cpu_present_map); + __cpu_number_map[0] = 0; + __cpu_logical_map[0] = 0; + + for (i = 1, num = 0; i < NR_CPUS; i++) { + if (cfe_cpu_stop(i) == 0) { + cpu_set(i, phys_cpu_present_map); + __cpu_number_map[i] = ++num; + __cpu_logical_map[num] = i; + } + } + printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); +} + +static void __init bcm1480_prepare_cpus(unsigned int max_cpus) +{ +} + +struct plat_smp_ops bcm1480_smp_ops = { + .send_ipi_single = bcm1480_send_ipi_single, + .send_ipi_mask = bcm1480_send_ipi_mask, + .init_secondary = bcm1480_init_secondary, + .smp_finish = bcm1480_smp_finish, + .cpus_done = bcm1480_cpus_done, + .boot_secondary = bcm1480_boot_secondary, + .smp_setup = bcm1480_smp_setup, + .prepare_cpus = bcm1480_prepare_cpus, +}; + void bcm1480_mailbox_interrupt(void) { int cpu = smp_processor_id(); diff --git a/arch/mips/sibyte/cfe/Makefile b/arch/mips/sibyte/cfe/Makefile index a1214937b705..02b32e142adf 100644 --- a/arch/mips/sibyte/cfe/Makefile +++ b/arch/mips/sibyte/cfe/Makefile @@ -1,3 +1,2 @@ lib-y = setup.o -lib-$(CONFIG_SMP) += smp.o lib-$(CONFIG_SIBYTE_CFE_CONSOLE) += console.o diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c index dbd6e6fdd3f9..50d7c05e15b8 100644 --- a/arch/mips/sibyte/cfe/setup.c +++ b/arch/mips/sibyte/cfe/setup.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -232,6 +233,9 @@ static int __init initrd_setup(char *str) #endif +extern struct plat_smp_ops sb_smp_ops; +extern struct plat_smp_ops bcm1480_smp_ops; + /* * prom_init is called just after the cpu type is determined, from setup_arch() */ @@ -340,6 +344,13 @@ void __init prom_init(void) arcs_cmdline[CL_SIZE-1] = 0; prom_meminit(); + +#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250) + register_smp_ops(&sb_smp_ops); +#endif +#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) + register_smp_ops(&bcm1480_smp_ops); +#endif } void __init prom_free_prom_memory(void) diff --git a/arch/mips/sibyte/cfe/smp.c b/arch/mips/sibyte/cfe/smp.c deleted file mode 100644 index 534a62912f21..000000000000 --- a/arch/mips/sibyte/cfe/smp.c +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - */ - -#include -#include -#include -#include - -#include -#include - -/* - * Use CFE to find out how many CPUs are available, setting up - * phys_cpu_present_map and the logical/physical mappings. - * XXXKW will the boot CPU ever not be physical 0? - * - * Common setup before any secondaries are started - */ -void __init plat_smp_setup(void) -{ - int i, num; - - cpus_clear(phys_cpu_present_map); - cpu_set(0, phys_cpu_present_map); - __cpu_number_map[0] = 0; - __cpu_logical_map[0] = 0; - - for (i = 1, num = 0; i < NR_CPUS; i++) { - if (cfe_cpu_stop(i) == 0) { - cpu_set(i, phys_cpu_present_map); - __cpu_number_map[i] = ++num; - __cpu_logical_map[num] = i; - } - } - printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); -} - -void __init plat_prepare_cpus(unsigned int max_cpus) -{ -} - -/* - * Setup the PC, SP, and GP of a secondary processor and start it - * running! - */ -void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) -{ - int retval; - - retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap, - __KSTK_TOS(idle), - (unsigned long)task_thread_info(idle), 0); - if (retval != 0) - printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval); -} - -/* - * Code to run on secondary just after probing the CPU - */ -void __cpuinit prom_init_secondary(void) -{ -#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) - extern void bcm1480_smp_init(void); - bcm1480_smp_init(); -#elif defined(CONFIG_SIBYTE_SB1250) - extern void sb1250_smp_init(void); - sb1250_smp_init(); -#else -#error invalid SMP configuration -#endif -} - -/* - * Do any tidying up before marking online and running the idle - * loop - */ -void __cpuinit prom_smp_finish(void) -{ -#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) - extern void bcm1480_smp_finish(void); - bcm1480_smp_finish(); -#elif defined(CONFIG_SIBYTE_SB1250) - extern void sb1250_smp_finish(void); - sb1250_smp_finish(); -#else -#error invalid SMP configuration -#endif -} - -/* - * Final cleanup after all secondaries booted - */ -void prom_cpus_done(void) -{ -} diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c index 3f52c95a4eb8..0734b933e969 100644 --- a/arch/mips/sibyte/sb1250/smp.c +++ b/arch/mips/sibyte/sb1250/smp.c @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -55,7 +56,43 @@ void __cpuinit sb1250_smp_init(void) change_c0_status(ST0_IM, imask); } -void __cpuinit sb1250_smp_finish(void) +/* + * These are routines for dealing with the sb1250 smp capabilities + * independent of board/firmware + */ + +/* + * Simple enough; everything is set up, so just poke the appropriate mailbox + * register, and we should be set + */ +static void sb1250_send_ipi_single(int cpu, unsigned int action) +{ + __raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]); +} + +static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action) +{ + unsigned int i; + + for_each_cpu_mask(i, mask) + sb1250_send_ipi_single(i, action); +} + +/* + * Code to run on secondary just after probing the CPU + */ +static void __cpuinit sb1250_init_secondary(void) +{ + extern void sb1250_smp_init(void); + + sb1250_smp_init(); +} + +/* + * Do any tidying up before marking online and running the idle + * loop + */ +static void __cpuinit sb1250_smp_finish(void) { extern void sb1250_clockevent_init(void); @@ -64,19 +101,68 @@ void __cpuinit sb1250_smp_finish(void) } /* - * These are routines for dealing with the sb1250 smp capabilities - * independent of board/firmware + * Final cleanup after all secondaries booted */ +static void sb1250_cpus_done(void) +{ +} /* - * Simple enough; everything is set up, so just poke the appropriate mailbox - * register, and we should be set + * Setup the PC, SP, and GP of a secondary processor and start it + * running! */ -void core_send_ipi(int cpu, unsigned int action) +static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle) { - __raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]); + int retval; + + retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap, + __KSTK_TOS(idle), + (unsigned long)task_thread_info(idle), 0); + if (retval != 0) + printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval); } +/* + * Use CFE to find out how many CPUs are available, setting up + * phys_cpu_present_map and the logical/physical mappings. + * XXXKW will the boot CPU ever not be physical 0? + * + * Common setup before any secondaries are started + */ +static void __init sb1250_smp_setup(void) +{ + int i, num; + + cpus_clear(phys_cpu_present_map); + cpu_set(0, phys_cpu_present_map); + __cpu_number_map[0] = 0; + __cpu_logical_map[0] = 0; + + for (i = 1, num = 0; i < NR_CPUS; i++) { + if (cfe_cpu_stop(i) == 0) { + cpu_set(i, phys_cpu_present_map); + __cpu_number_map[i] = ++num; + __cpu_logical_map[num] = i; + } + } + printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); +} + +static void __init sb1250_prepare_cpus(unsigned int max_cpus) +{ +} + +struct plat_smp_ops sb_smp_ops = { + .send_ipi_single = sb1250_send_ipi_single, + .send_ipi_mask = sb1250_send_ipi_mask, + .init_secondary = sb1250_init_secondary, + .smp_finish = sb1250_smp_finish, + .cpus_done = sb1250_cpus_done, + .boot_secondary = sb1250_boot_secondary, + .smp_setup = sb1250_smp_setup, + .prepare_cpus = sb1250_prepare_cpus, +}; + void sb1250_mailbox_interrupt(void) { int cpu = smp_processor_id(); diff --git a/include/asm-mips/sibyte/sb1250.h b/include/asm-mips/sibyte/sb1250.h index 0dad844a3b5b..80c1a052662a 100644 --- a/include/asm-mips/sibyte/sb1250.h +++ b/include/asm-mips/sibyte/sb1250.h @@ -48,12 +48,10 @@ extern unsigned int zbbus_mhz; extern void sb1250_time_init(void); extern void sb1250_mask_irq(int cpu, int irq); extern void sb1250_unmask_irq(int cpu, int irq); -extern void sb1250_smp_finish(void); extern void bcm1480_time_init(void); extern void bcm1480_mask_irq(int cpu, int irq); extern void bcm1480_unmask_irq(int cpu, int irq); -extern void bcm1480_smp_finish(void); #define AT_spin \ __asm__ __volatile__ ( \ diff --git a/include/asm-mips/smp-ops.h b/include/asm-mips/smp-ops.h new file mode 100644 index 000000000000..b17fdfb5d818 --- /dev/null +++ b/include/asm-mips/smp-ops.h @@ -0,0 +1,56 @@ +/* + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + * + * Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com) + * Copyright (C) 2000 - 2001 by Silicon Graphics, Inc. + * Copyright (C) 2000, 2001, 2002 Ralf Baechle + * Copyright (C) 2000, 2001 Broadcom Corporation + */ +#ifndef __ASM_SMP_OPS_H +#define __ASM_SMP_OPS_H + +#ifdef CONFIG_SMP + +#include + +struct plat_smp_ops { + void (*send_ipi_single)(int cpu, unsigned int action); + void (*send_ipi_mask)(cpumask_t mask, unsigned int action); + void (*init_secondary)(void); + void (*smp_finish)(void); + void (*cpus_done)(void); + void (*boot_secondary)(int cpu, struct task_struct *idle); + void (*smp_setup)(void); + void (*prepare_cpus)(unsigned int max_cpus); +}; + +extern void register_smp_ops(struct plat_smp_ops *ops); + +static inline void plat_smp_setup(void) +{ + extern struct plat_smp_ops *mp_ops; /* private */ + + mp_ops->smp_setup(); +} + +#else /* !CONFIG_SMP */ + +struct plat_smp_ops; + +static inline void plat_smp_setup(void) +{ + /* UP, nothing to do ... */ +} + +static inline void register_smp_ops(struct plat_smp_ops *ops) +{ +} + +#endif /* !CONFIG_SMP */ + +extern struct plat_smp_ops up_smp_ops; +extern struct plat_smp_ops vsmp_smp_ops; + +#endif /* __ASM_SMP_OPS_H */ diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h index 23265879cee9..84fef1aeec0c 100644 --- a/include/asm-mips/smp.h +++ b/include/asm-mips/smp.h @@ -11,14 +11,13 @@ #ifndef __ASM_SMP_H #define __ASM_SMP_H - -#ifdef CONFIG_SMP - #include #include #include #include + #include +#include extern int smp_num_siblings; extern cpumask_t cpu_sibling_map[]; @@ -52,56 +51,6 @@ extern struct call_data_struct *call_data; extern cpumask_t phys_cpu_present_map; #define cpu_possible_map phys_cpu_present_map -/* - * These are defined by the board-specific code. - */ - -/* - * Cause the function described by call_data to be executed on the passed - * cpu. When the function has finished, increment the finished field of - * call_data. - */ -extern void core_send_ipi(int cpu, unsigned int action); - -static inline void core_send_ipi_mask(cpumask_t mask, unsigned int action) -{ - unsigned int i; - - for_each_cpu_mask(i, mask) - core_send_ipi(i, action); -} - - -/* - * Firmware CPU startup hook - */ -extern void prom_boot_secondary(int cpu, struct task_struct *idle); - -/* - * After we've done initial boot, this function is called to allow the - * board code to clean up state, if needed - */ -extern void prom_init_secondary(void); - -/* - * Populate cpu_possible_map before smp_init, called from setup_arch. - */ -extern void plat_smp_setup(void); - -/* - * Called in smp_prepare_cpus. - */ -extern void plat_prepare_cpus(unsigned int max_cpus); - -/* - * Last chance for the board code to finish SMP initialization before - * the CPU is "online". - */ -extern void prom_smp_finish(void); - -/* Hook for after all CPUs are online */ -extern void prom_cpus_done(void); - extern void asmlinkage smp_bootstrap(void); /* @@ -111,11 +60,11 @@ extern void asmlinkage smp_bootstrap(void); */ static inline void smp_send_reschedule(int cpu) { - core_send_ipi(cpu, SMP_RESCHEDULE_YOURSELF); + extern struct plat_smp_ops *mp_ops; /* private */ + + mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF); } extern asmlinkage void smp_call_function_interrupt(void); -#endif /* CONFIG_SMP */ - #endif /* __ASM_SMP_H */ -- cgit v1.2.3 From 237cfee1db66147aef4457f02b56a41e6f84bfd3 Mon Sep 17 00:00:00 2001 From: Manuel Lauss Date: Thu, 6 Dec 2007 09:07:55 +0100 Subject: [MIPS] Alchemy: Au1210/Au1250 CPU support This patch adds IDs for new Au1200 variants: Au1210 and Au1250. They are essentially identical to the Au1200 except for the Au1210 which has a different SoC-ID in the PRId register [bits 31:24]. The Au1250 is a "Au1200 V0.2". Signed-off-by: Manuel Lauss Signed-off-by: Ralf Baechle --- arch/mips/kernel/cpu-probe.c | 9 +++++++++ arch/mips/mm/c-r4k.c | 2 ++ arch/mips/mm/tlbex.c | 2 ++ include/asm-mips/cpu.h | 4 ++-- 4 files changed, 15 insertions(+), 2 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 5c2794391bf5..5861a432a52f 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -188,6 +188,8 @@ static inline void check_wait(void) case CPU_AU1500: case CPU_AU1550: case CPU_AU1200: + case CPU_AU1210: + case CPU_AU1250: if (allow_au1k_wait) cpu_wait = au1k_wait; break; @@ -733,6 +735,11 @@ static inline void cpu_probe_alchemy(struct cpuinfo_mips *c) break; case 4: c->cputype = CPU_AU1200; + if (2 == (c->processor_id & 0xff)) + c->cputype = CPU_AU1250; + break; + case 5: + c->cputype = CPU_AU1210; break; default: panic("Unknown Au Core!"); @@ -858,6 +865,8 @@ static __init const char *cpu_to_name(struct cpuinfo_mips *c) case CPU_AU1100: name = "Au1100"; break; case CPU_AU1550: name = "Au1550"; break; case CPU_AU1200: name = "Au1200"; break; + case CPU_AU1210: name = "Au1210"; break; + case CPU_AU1250: name = "Au1250"; break; case CPU_4KEC: name = "MIPS 4KEc"; break; case CPU_4KSC: name = "MIPS 4KSc"; break; case CPU_VR41XX: name = "NEC Vr41xx"; break; diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 06074948450d..02bd180f0e02 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -980,6 +980,8 @@ static void __init probe_pcache(void) case CPU_AU1100: case CPU_AU1550: case CPU_AU1200: + case CPU_AU1210: + case CPU_AU1250: c->icache.flags |= MIPS_CACHE_IC_F_DC; break; } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index c298344fcb71..d026302e0ecc 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -917,6 +917,8 @@ static void __init build_tlb_write_entry(u32 **p, struct label **l, case CPU_AU1500: case CPU_AU1550: case CPU_AU1200: + case CPU_AU1210: + case CPU_AU1250: case CPU_PR4450: i_nop(p); tlbw(p); diff --git a/include/asm-mips/cpu.h b/include/asm-mips/cpu.h index 54fc18a4e5a8..bf5bbc78a9f7 100644 --- a/include/asm-mips/cpu.h +++ b/include/asm-mips/cpu.h @@ -195,8 +195,8 @@ enum cpu_type_enum { * MIPS32 class processors */ CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_74K, CPU_AU1000, - CPU_AU1100, CPU_AU1200, CPU_AU1500, CPU_AU1550, CPU_PR4450, - CPU_BCM3302, CPU_BCM4710, + CPU_AU1100, CPU_AU1200, CPU_AU1210, CPU_AU1250, CPU_AU1500, CPU_AU1550, + CPU_PR4450, CPU_BCM3302, CPU_BCM4710, /* * MIPS64 class processors -- cgit v1.2.3 From c9662341f8a09cffd7234a89cc066c342fe343bf Mon Sep 17 00:00:00 2001 From: Yoichi Yuasa Date: Sun, 9 Dec 2007 21:19:36 +0900 Subject: [MIPS] time: remove unused mips_timer_state() Signed-off-by: Yoichi Yuasa Signed-off-by: Ralf Baechle --- arch/mips/dec/time.c | 1 - arch/mips/kernel/time.c | 2 -- include/asm-mips/time.h | 9 +-------- 3 files changed, 1 insertion(+), 11 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c index 820e5331205f..60349062595a 100644 --- a/arch/mips/dec/time.c +++ b/arch/mips/dec/time.c @@ -161,7 +161,6 @@ static cycle_t dec_ioasic_hpt_read(void) void __init plat_time_init(void) { - mips_timer_state = dec_timer_state; mips_timer_ack = dec_timer_ack; if (!cpu_has_counter && IOASIC) diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index 2995be1ab3ca..9f85d4cecc5b 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -50,8 +50,6 @@ int update_persistent_clock(struct timespec now) return rtc_mips_set_mmss(now.tv_sec); } -int (*mips_timer_state)(void); - int null_perf_irq(void) { return 0; diff --git a/include/asm-mips/time.h b/include/asm-mips/time.h index 7717934f94c3..a8fd16e1981f 100644 --- a/include/asm-mips/time.h +++ b/include/asm-mips/time.h @@ -30,12 +30,6 @@ extern spinlock_t rtc_lock; extern int rtc_mips_set_time(unsigned long); extern int rtc_mips_set_mmss(unsigned long); -/* - * Timer interrupt functions. - * mips_timer_state is needed for high precision timer calibration. - */ -extern int (*mips_timer_state)(void); - /* * board specific routines required by time_init(). */ @@ -43,8 +37,7 @@ extern void plat_time_init(void); /* * mips_hpt_frequency - must be set if you intend to use an R4k-compatible - * counter as a timer interrupt source; otherwise it can be set up - * automagically with an aid of mips_timer_state. + * counter as a timer interrupt source. */ extern unsigned int mips_hpt_frequency; -- cgit v1.2.3 From d4e9cffa161da806cb4bc9df4a4e49d287156bfa Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Tue, 29 Jan 2008 10:15:02 +0000 Subject: [MIPS] compat: handle argument endianess of sys32_(f)truncate64 with merge_64 Signed-off-by: Ralf Baechle --- arch/mips/kernel/linux32.c | 30 +++++------------------------- 1 file changed, 5 insertions(+), 25 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 2b8ec1102e86..65af3cc90abb 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -174,36 +174,16 @@ struct rlimit32 { int rlim_max; }; -#ifdef __MIPSEB__ -asmlinkage long sys32_truncate64(const char __user * path, unsigned long __dummy, - int length_hi, int length_lo) -#endif -#ifdef __MIPSEL__ -asmlinkage long sys32_truncate64(const char __user * path, unsigned long __dummy, - int length_lo, int length_hi) -#endif +asmlinkage long sys32_truncate64(const char __user * path, + unsigned long __dummy, int a2, int a3) { - loff_t length; - - length = ((unsigned long) length_hi << 32) | (unsigned int) length_lo; - - return sys_truncate(path, length); + return sys_truncate(path, merge_64(a2, a3)); } -#ifdef __MIPSEB__ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long __dummy, - int length_hi, int length_lo) -#endif -#ifdef __MIPSEL__ -asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long __dummy, - int length_lo, int length_hi) -#endif + int a2, int a3) { - loff_t length; - - length = ((unsigned long) length_hi << 32) | (unsigned int) length_lo; - - return sys_ftruncate(fd, length); + return sys_ftruncate(fd, merge_64(a2, a3)); } static inline long -- cgit v1.2.3 From 12323cacca2014dcf517d1988fcdb8e44a1f497b Mon Sep 17 00:00:00 2001 From: Jan Engelhardt Date: Tue, 22 Jan 2008 20:42:33 +0100 Subject: [MIPS]: constify function pointer tables Signed-off-by: Jan Engelhardt Signed-off-by: Ralf Baechle --- arch/mips/basler/excite/excite_iodev.c | 2 +- arch/mips/kernel/proc.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/basler/excite/excite_iodev.c b/arch/mips/basler/excite/excite_iodev.c index 6af0b21ebc32..476d20e08d0e 100644 --- a/arch/mips/basler/excite/excite_iodev.c +++ b/arch/mips/basler/excite/excite_iodev.c @@ -48,7 +48,7 @@ static DECLARE_WAIT_QUEUE_HEAD(wq); -static struct file_operations fops = +static const struct file_operations fops = { .owner = THIS_MODULE, .open = iodev_open, diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 34dd22838fdb..36f065398243 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -90,7 +90,7 @@ static void c_stop(struct seq_file *m, void *v) { } -struct seq_operations cpuinfo_op = { +const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, -- cgit v1.2.3 From 5f627f8e122a163ce53908d55e088247db31f1d7 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 30 Jan 2008 13:30:47 +0100 Subject: mips, x86: optimize the i8259 code a bit The timer code always calls the clock_event_device set_net_event and set_mode methods with interrupts disabled, so no need to use spin_lock_irqsave / spin_unlock_irqrestore for those. Signed-off-by: Ralf Baechle Acked-by:Thomas Gleixner Signed-off-by: Andrew Morton Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- arch/mips/kernel/i8253.c | 12 ++++-------- arch/x86/kernel/i8253.c | 12 ++++-------- 2 files changed, 8 insertions(+), 16 deletions(-) (limited to 'arch/mips/kernel') diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index c2d497ceffdd..fc4aa07b6d35 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c @@ -24,9 +24,7 @@ DEFINE_SPINLOCK(i8253_lock); static void init_pit_timer(enum clock_event_mode mode, struct clock_event_device *evt) { - unsigned long flags; - - spin_lock_irqsave(&i8253_lock, flags); + spin_lock(&i8253_lock); switch(mode) { case CLOCK_EVT_MODE_PERIODIC: @@ -55,7 +53,7 @@ static void init_pit_timer(enum clock_event_mode mode, /* Nothing to do here */ break; } - spin_unlock_irqrestore(&i8253_lock, flags); + spin_unlock(&i8253_lock); } /* @@ -65,12 +63,10 @@ static void init_pit_timer(enum clock_event_mode mode, */ static int pit_next_event(unsigned long delta, struct clock_event_device *evt) { - unsigned long flags; - - spin_lock_irqsave(&i8253_lock, flags); + spin_lock(&i8253_lock); outb_p(delta & 0xff , PIT_CH0); /* LSB */ outb(delta >> 8 , PIT_CH0); /* MSB */ - spin_unlock_irqrestore(&i8253_lock, flags); + spin_unlock(&i8253_lock); return 0; } diff --git a/arch/x86/kernel/i8253.c b/arch/x86/kernel/i8253.c index 377c3f8411f8..c76fef1ce355 100644 --- a/arch/x86/kernel/i8253.c +++ b/arch/x86/kernel/i8253.c @@ -38,9 +38,7 @@ struct clock_event_device *global_clock_event; static void init_pit_timer(enum clock_event_mode mode, struct clock_event_device *evt) { - unsigned long flags; - - spin_lock_irqsave(&i8253_lock, flags); + spin_lock(&i8253_lock); switch(mode) { case CLOCK_EVT_MODE_PERIODIC: @@ -71,7 +69,7 @@ static void init_pit_timer(enum clock_event_mode mode, /* Nothing to do here */ break; } - spin_unlock_irqrestore(&i8253_lock, flags); + spin_unlock(&i8253_lock); } /* @@ -81,12 +79,10 @@ static void init_pit_timer(enum clock_event_mode mode, */ static int pit_next_event(unsigned long delta, struct clock_event_device *evt) { - unsigned long flags; - - spin_lock_irqsave(&i8253_lock, flags); + spin_lock(&i8253_lock); outb_p(delta & 0xff , PIT_CH0); /* LSB */ outb(delta >> 8 , PIT_CH0); /* MSB */ - spin_unlock_irqrestore(&i8253_lock, flags); + spin_unlock(&i8253_lock); return 0; } -- cgit v1.2.3