summaryrefslogtreecommitdiff
path: root/arch/arm/mach-mvebu/coherency_ll.S
blob: 2d962fe488210d309f050e7387bc7f1812210d3f (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
/*
 * Coherency fabric: low level functions
 *
 * Copyright (C) 2012 Marvell
 *
 * Gregory CLEMENT <gregory.clement@free-electrons.com>
 *
 * This file is licensed under the terms of the GNU General Public
 * License version 2.  This program is licensed "as is" without any
 * warranty of any kind, whether express or implied.
 *
 * This file implements the assembly function to add a CPU to the
 * coherency fabric. This function is called by each of the secondary
 * CPUs during their early boot in an SMP kernel, this why this
 * function have to callable from assembly. It can also be called by a
 * primary CPU from C code during its boot.
 */

#include <linux/linkage.h>
#define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0
#define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4

#include <asm/assembler.h>
#include <asm/cp15.h>

	.text
/*
 * Returns the coherency base address in r1 (r0 is untouched), or 0 if
 * the coherency fabric is not enabled.
 */
ENTRY(ll_get_coherency_base)
	mrc	p15, 0, r1, c1, c0, 0
	tst	r1, #CR_M @ Check MMU bit enabled
	bne	1f

	/*
	 * MMU is disabled, use the physical address of the coherency
	 * base address. However, if the coherency fabric isn't mapped
	 * (i.e its virtual address is zero), it means coherency is
	 * not enabled, so we return 0.
	 */
	ldr	r1, =coherency_base
	cmp	r1, #0
	beq	2f
	adr	r1, 3f
	ldr	r3, [r1]
	ldr	r1, [r1, r3]
	b	2f
1:
	/*
	 * MMU is enabled, use the virtual address of the coherency
	 * base address.
	 */
	ldr	r1, =coherency_base
	ldr	r1, [r1]
2:
	ret	lr
ENDPROC(ll_get_coherency_base)

/*
 * Returns the coherency CPU mask in r3 (r0 is untouched). This
 * coherency CPU mask can be used with the coherency fabric
 * configuration and control registers. Note that the mask is already
 * endian-swapped as appropriate so that the calling functions do not
 * have to care about endianness issues while accessing the coherency
 * fabric registers
 */
ENTRY(ll_get_coherency_cpumask)
	mrc	p15, 0, r3, cr0, cr0, 5
	and	r3, r3, #15
	mov	r2, #(1 << 24)
	lsl	r3, r2, r3
ARM_BE8(rev	r3, r3)
	ret	lr
ENDPROC(ll_get_coherency_cpumask)

/*
 * ll_add_cpu_to_smp_group(), ll_enable_coherency() and
 * ll_disable_coherency() use the strex/ldrex instructions while the
 * MMU can be disabled. The Armada XP SoC has an exclusive monitor
 * that tracks transactions to Device and/or SO memory and thanks to
 * that, exclusive transactions are functional even when the MMU is
 * disabled.
 */

ENTRY(ll_add_cpu_to_smp_group)
	/*
	 * As r0 is not modified by ll_get_coherency_base() and
	 * ll_get_coherency_cpumask(), we use it to temporarly save lr
	 * and avoid it being modified by the branch and link
	 * calls. This function is used very early in the secondary
	 * CPU boot, and no stack is available at this point.
	 */
	mov 	r0, lr
	bl	ll_get_coherency_base
	/* Bail out if the coherency is not enabled */
	cmp	r1, #0
	reteq	r0
	bl	ll_get_coherency_cpumask
	mov 	lr, r0
	add	r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET
1:
	ldrex	r2, [r0]
	orr	r2, r2, r3
	strex	r1, r2, [r0]
	cmp	r1, #0
	bne	1b
	ret	lr
ENDPROC(ll_add_cpu_to_smp_group)

ENTRY(ll_enable_coherency)
	/*
	 * As r0 is not modified by ll_get_coherency_base() and
	 * ll_get_coherency_cpumask(), we use it to temporarly save lr
	 * and avoid it being modified by the branch and link
	 * calls. This function is used very early in the secondary
	 * CPU boot, and no stack is available at this point.
	 */
	mov r0, lr
	bl	ll_get_coherency_base
	/* Bail out if the coherency is not enabled */
	cmp	r1, #0
	reteq	r0
	bl	ll_get_coherency_cpumask
	mov lr, r0
	add	r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
1:
	ldrex	r2, [r0]
	orr	r2, r2, r3
	strex	r1, r2, [r0]
	cmp	r1, #0
	bne	1b
	dsb
	mov	r0, #0
	ret	lr
ENDPROC(ll_enable_coherency)

ENTRY(ll_disable_coherency)
	/*
	 * As r0 is not modified by ll_get_coherency_base() and
	 * ll_get_coherency_cpumask(), we use it to temporarly save lr
	 * and avoid it being modified by the branch and link
	 * calls. This function is used very early in the secondary
	 * CPU boot, and no stack is available at this point.
	 */
	mov 	r0, lr
	bl	ll_get_coherency_base
	/* Bail out if the coherency is not enabled */
	cmp	r1, #0
	reteq	r0
	bl	ll_get_coherency_cpumask
	mov 	lr, r0
	add	r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET
1:
	ldrex	r2, [r0]
	bic	r2, r2, r3
	strex	r1, r2, [r0]
	cmp	r1, #0
	bne	1b
	dsb
	ret	lr
ENDPROC(ll_disable_coherency)

	.align 2
3:
	.long	coherency_phys_base - .