summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorJan Glauber <jan.glauber@de.ibm.com>2007-04-27 16:01:55 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2007-04-27 16:01:46 +0200
commitdb77aa5f3d01fe6a6cc629dbd37936b1fdd129ba (patch)
tree49186d1428510ae6aa658099d36133a2a0406068 /arch/s390
parent131a395c18af43d824841642038e5cc0d48f0bd2 (diff)
downloadlwn-db77aa5f3d01fe6a6cc629dbd37936b1fdd129ba.tar.gz
lwn-db77aa5f3d01fe6a6cc629dbd37936b1fdd129ba.zip
[S390] vtime: cleanup per_cpu usage.
Replace per_cpu(... , smp_processor_id()) with __get_cpu_var() Signed-off-by: Jan Glauber <jan.glauber@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/kernel/vtime.c16
1 files changed, 7 insertions, 9 deletions
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 9d5b02801b46..1e1a6ee2cac1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -128,7 +128,7 @@ static inline void set_vtimer(__u64 expires)
S390_lowcore.last_update_timer = expires;
/* store expire time for this CPU timer */
- per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
+ __get_cpu_var(virt_cpu_timer).to_expire = expires;
}
#else
static inline void set_vtimer(__u64 expires)
@@ -137,7 +137,7 @@ static inline void set_vtimer(__u64 expires)
asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
/* store expire time for this CPU timer */
- per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
+ __get_cpu_var(virt_cpu_timer).to_expire = expires;
}
#endif
@@ -145,7 +145,7 @@ static void start_cpu_timer(void)
{
struct vtimer_queue *vt_list;
- vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+ vt_list = &__get_cpu_var(virt_cpu_timer);
/* CPU timer interrupt is pending, don't reprogramm it */
if (vt_list->idle & 1LL<<63)
@@ -159,7 +159,7 @@ static void stop_cpu_timer(void)
{
struct vtimer_queue *vt_list;
- vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+ vt_list = &__get_cpu_var(virt_cpu_timer);
/* nothing to do */
if (list_empty(&vt_list->list)) {
@@ -219,7 +219,7 @@ static void do_callbacks(struct list_head *cb_list)
if (list_empty(cb_list))
return;
- vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+ vt_list = &__get_cpu_var(virt_cpu_timer);
list_for_each_entry_safe(event, tmp, cb_list, entry) {
fn = event->function;
@@ -244,7 +244,6 @@ static void do_callbacks(struct list_head *cb_list)
*/
static void do_cpu_timer_interrupt(__u16 error_code)
{
- int cpu;
__u64 next, delta;
struct vtimer_queue *vt_list;
struct vtimer_list *event, *tmp;
@@ -253,8 +252,7 @@ static void do_cpu_timer_interrupt(__u16 error_code)
struct list_head cb_list;
INIT_LIST_HEAD(&cb_list);
- cpu = smp_processor_id();
- vt_list = &per_cpu(virt_cpu_timer, cpu);
+ vt_list = &__get_cpu_var(virt_cpu_timer);
/* walk timer list, fire all expired events */
spin_lock(&vt_list->lock);
@@ -534,7 +532,7 @@ void init_cpu_vtimer(void)
/* enable cpu timer interrupts */
__ctl_set_bit(0,10);
- vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
+ vt_list = &__get_cpu_var(virt_cpu_timer);
INIT_LIST_HEAD(&vt_list->list);
spin_lock_init(&vt_list->lock);
vt_list->to_expire = 0;