blob: 060f23ff181718fb6b9dec3f3a206f1013452956 (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
|
/*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
* Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
* Copyright (C) 2000, 2001, 2002 Ralf Baechle
* Copyright (C) 2000, 2001 Broadcom Corporation
*/
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
#include <linux/bitops.h>
#include <linux/linkage.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/atomic.h>
#include <asm/smp-ops.h>
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];
#define raw_smp_processor_id() (current_thread_info()->cpu)
/* Map from cpu id to sequential logical cpu number. This will only
not be idempotent when cpus failed to come on-line. */
extern int __cpu_number_map[NR_CPUS];
#define cpu_number_map(cpu) __cpu_number_map[cpu]
/* The reverse map from sequential logical cpu number to cpu id. */
extern int __cpu_logical_map[NR_CPUS];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
#define NO_PROC_ID (-1)
#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
#define SMP_CALL_FUNCTION 0x2
/* Octeon - Tell another core to flush its icache */
#define SMP_ICACHE_FLUSH 0x4
/* Used by kexec crashdump to save all cpu's state */
#define SMP_DUMP 0x8
#define SMP_ASK_C0COUNT 0x10
extern cpumask_t cpu_callin_map;
/* Mask of CPUs which are currently definitely operating coherently */
extern cpumask_t cpu_coherent_mask;
extern void asmlinkage smp_bootstrap(void);
extern void calculate_cpu_foreign_map(void);
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
static inline void smp_send_reschedule(int cpu)
{
extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
}
#ifdef CONFIG_HOTPLUG_CPU
static inline int __cpu_disable(void)
{
extern struct plat_smp_ops *mp_ops; /* private */
return mp_ops->cpu_disable();
}
static inline void __cpu_die(unsigned int cpu)
{
extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->cpu_die(cpu);
}
extern void play_dead(void);
#endif
/*
* This function will set up the necessary IPIs for Linux to communicate
* with the CPUs in mask.
* Return 0 on success.
*/
int mips_smp_ipi_allocate(const struct cpumask *mask);
/*
* This function will free up IPIs allocated with mips_smp_ipi_allocate to the
* CPUs in mask, which must be a subset of the IPIs that have been configured.
* Return 0 on success.
*/
int mips_smp_ipi_free(const struct cpumask *mask);
static inline void arch_send_call_function_single_ipi(int cpu)
{
extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->send_ipi_mask(cpumask_of(cpu), SMP_CALL_FUNCTION);
}
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
#if defined(CONFIG_KEXEC)
extern void (*dump_ipi_function_ptr)(void *);
void dump_send_ipi(void (*dump_ipi_callback)(void *));
#endif
#endif /* __ASM_SMP_H */
|