summaryrefslogtreecommitdiff
path: root/arch/arm/mm/proc-v7m.S
blob: 47a5acc644333f7f995293ef6b3dc6fb3527270a (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
/*
 *  linux/arch/arm/mm/proc-v7m.S
 *
 *  Copyright (C) 2008 ARM Ltd.
 *  Copyright (C) 2001 Deep Blue Solutions Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  This is the "shell" of the ARMv7-M processor support.
 */
#include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/v7m.h>
#include "proc-macros.S"

ENTRY(cpu_v7m_proc_init)
	ret	lr
ENDPROC(cpu_v7m_proc_init)

ENTRY(cpu_v7m_proc_fin)
	ret	lr
ENDPROC(cpu_v7m_proc_fin)

/*
 *	cpu_v7m_reset(loc)
 *
 *	Perform a soft reset of the system.  Put the CPU into the
 *	same state as it would be if it had been reset, and branch
 *	to what would be the reset vector.
 *
 *	- loc   - location to jump to for soft reset
 */
	.align	5
ENTRY(cpu_v7m_reset)
	ret	r0
ENDPROC(cpu_v7m_reset)

/*
 *	cpu_v7m_do_idle()
 *
 *	Idle the processor (eg, wait for interrupt).
 *
 *	IRQs are already disabled.
 */
ENTRY(cpu_v7m_do_idle)
	wfi
	ret	lr
ENDPROC(cpu_v7m_do_idle)

ENTRY(cpu_v7m_dcache_clean_area)
	ret	lr
ENDPROC(cpu_v7m_dcache_clean_area)

/*
 * There is no MMU, so here is nothing to do.
 */
ENTRY(cpu_v7m_switch_mm)
	ret	lr
ENDPROC(cpu_v7m_switch_mm)

.globl	cpu_v7m_suspend_size
.equ	cpu_v7m_suspend_size, 0

#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7m_do_suspend)
	ret	lr
ENDPROC(cpu_v7m_do_suspend)

ENTRY(cpu_v7m_do_resume)
	ret	lr
ENDPROC(cpu_v7m_do_resume)
#endif

ENTRY(cpu_cm7_dcache_clean_area)
	dcache_line_size r2, r3
	movw	r3, #:lower16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC
	movt	r3, #:upper16:BASEADDR_V7M_SCB + V7M_SCB_DCCMVAC

1:	str	r0, [r3]		@ clean D entry
	add	r0, r0, r2
	subs	r1, r1, r2
	bhi	1b
	dsb
	ret	lr
ENDPROC(cpu_cm7_dcache_clean_area)

ENTRY(cpu_cm7_proc_fin)
	movw	r2, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
	movt	r2, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
	ldr	r0, [r2]
	bic	r0, r0, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC)
	str	r0, [r2]
	ret	lr
ENDPROC(cpu_cm7_proc_fin)

	.section ".init.text", #alloc, #execinstr

__v7m_cm7_setup:
	mov	r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP)
	b	__v7m_setup_cont
/*
 *	__v7m_setup
 *
 *	This should be able to cover all ARMv7-M cores.
 */
__v7m_setup:
	mov	r8, 0

__v7m_setup_cont:
	@ Configure the vector table base address
	ldr	r0, =BASEADDR_V7M_SCB
	ldr	r12, =vector_table
	str	r12, [r0, V7M_SCB_VTOR]

	@ enable UsageFault, BusFault and MemManage fault.
	ldr	r5, [r0, #V7M_SCB_SHCSR]
	orr	r5, #(V7M_SCB_SHCSR_USGFAULTENA | V7M_SCB_SHCSR_BUSFAULTENA | V7M_SCB_SHCSR_MEMFAULTENA)
	str	r5, [r0, #V7M_SCB_SHCSR]

	@ Lower the priority of the SVC and PendSV exceptions
	mov	r5, #0x80000000
	str	r5, [r0, V7M_SCB_SHPR2]	@ set SVC priority
	mov	r5, #0x00800000
	str	r5, [r0, V7M_SCB_SHPR3]	@ set PendSV priority

	@ SVC to switch to handler mode. Notice that this requires sp to
	@ point to writeable memory because the processor saves
	@ some registers to the stack.
	badr	r1, 1f
	ldr	r5, [r12, #11 * 4]	@ read the SVC vector entry
	str	r1, [r12, #11 * 4]	@ write the temporary SVC vector entry
	dsb
	mov	r6, lr			@ save LR
	ldr	sp, =init_thread_union + THREAD_START_SP
	stmia	sp, {r0-r3, r12}
	cpsie	i
	svc	#0
1:	cpsid	i
	ldmia	sp, {r0-r3, r12}
	str	r5, [r12, #11 * 4]	@ restore the original SVC vector entry
	mov	lr, r6			@ restore LR

	@ Special-purpose control register
	mov	r1, #1
	msr	control, r1		@ Thread mode has unpriviledged access

	@ Configure caches (if implemented)
	teq     r8, #0
	stmneia	sp, {r0-r6, lr}		@ v7m_invalidate_l1 touches r0-r6
	blne	v7m_invalidate_l1
	teq     r8, #0			@ re-evalutae condition
	ldmneia	sp, {r0-r6, lr}

	@ Configure the System Control Register to ensure 8-byte stack alignment
	@ Note the STKALIGN bit is either RW or RAO.
	ldr	r0, [r0, V7M_SCB_CCR]   @ system control register
	orr	r0, #V7M_SCB_CCR_STKALIGN
	orr	r0, r0, r8

	ret	lr
ENDPROC(__v7m_setup)

/*
 * Cortex-M7 processor functions
 */
	globl_equ	cpu_cm7_proc_init,	cpu_v7m_proc_init
	globl_equ	cpu_cm7_reset,		cpu_v7m_reset
	globl_equ	cpu_cm7_do_idle,	cpu_v7m_do_idle
	globl_equ	cpu_cm7_switch_mm,	cpu_v7m_switch_mm

	define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
	define_processor_functions cm7, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1

	.section ".rodata"
	string cpu_arch_name, "armv7m"
	string cpu_elf_name "v7m"
	string cpu_v7m_name "ARMv7-M"

	.section ".proc.info.init", #alloc

.macro __v7m_proc name, initfunc, cache_fns = nop_cache_fns, hwcaps = 0,  proc_fns = v7m_processor_functions
	.long	0			/* proc_info_list.__cpu_mm_mmu_flags */
	.long	0			/* proc_info_list.__cpu_io_mmu_flags */
	initfn	\initfunc, \name
	.long	cpu_arch_name
	.long	cpu_elf_name
	.long	HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \hwcaps
	.long	cpu_v7m_name
	.long   \proc_fns
	.long	0			/* proc_info_list.tlb */
	.long	0			/* proc_info_list.user */
	.long	\cache_fns
.endm

	/*
	 * Match ARM Cortex-M7 processor.
	 */
	.type	__v7m_cm7_proc_info, #object
__v7m_cm7_proc_info:
	.long	0x410fc270		/* ARM Cortex-M7 0xC27 */
	.long	0xff0ffff0		/* Mask off revision, patch release */
	__v7m_proc __v7m_cm7_proc_info, __v7m_cm7_setup, hwcaps = HWCAP_EDSP, cache_fns = v7m_cache_fns, proc_fns = cm7_processor_functions
	.size	__v7m_cm7_proc_info, . - __v7m_cm7_proc_info

	/*
	 * Match ARM Cortex-M4 processor.
	 */
	.type	__v7m_cm4_proc_info, #object
__v7m_cm4_proc_info:
	.long	0x410fc240		/* ARM Cortex-M4 0xC24 */
	.long	0xff0ffff0		/* Mask off revision, patch release */
	__v7m_proc __v7m_cm4_proc_info, __v7m_setup, hwcaps = HWCAP_EDSP
	.size	__v7m_cm4_proc_info, . - __v7m_cm4_proc_info

	/*
	 * Match ARM Cortex-M3 processor.
	 */
	.type	__v7m_cm3_proc_info, #object
__v7m_cm3_proc_info:
	.long	0x410fc230		/* ARM Cortex-M3 0xC23 */
	.long	0xff0ffff0		/* Mask off revision, patch release */
	__v7m_proc __v7m_cm3_proc_info, __v7m_setup
	.size	__v7m_cm3_proc_info, . - __v7m_cm3_proc_info

	/*
	 * Match any ARMv7-M processor core.
	 */
	.type	__v7m_proc_info, #object
__v7m_proc_info:
	.long	0x000f0000		@ Required ID value
	.long	0x000f0000		@ Mask for ID
	__v7m_proc __v7m_proc_info, __v7m_setup
	.size	__v7m_proc_info, . - __v7m_proc_info