1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
|
/*
* Copyright (C) 2013 - ARM Ltd
* Author: Marc Zyngier <marc.zyngier@arm.com>
*
* Based on code by Carl van Schaik <carl@ok-labs.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <config.h>
#include <asm/gic.h>
#include <asm/psci.h>
#include <asm/arch/cpu.h>
/*
* Memory layout:
*
* SECURE_RAM to text_end :
* ._secure_text section
* text_end to ALIGN_PAGE(text_end):
* nothing
* ALIGN_PAGE(text_end) to ALIGN_PAGE(text_end) + 0x1000)
* 1kB of stack per CPU (4 CPUs max).
*/
.pushsection ._secure.text, "ax"
.arch_extension sec
#define ONE_MS (CONFIG_TIMER_CLK_FREQ / 1000)
#define TEN_MS (10 * ONE_MS)
#define GICD_BASE 0x1c81000
#define GICC_BASE 0x1c82000
.macro timer_wait reg, ticks
@ Program CNTP_TVAL
movw \reg, #(\ticks & 0xffff)
movt \reg, #(\ticks >> 16)
mcr p15, 0, \reg, c14, c2, 0
isb
@ Enable physical timer, mask interrupt
mov \reg, #3
mcr p15, 0, \reg, c14, c2, 1
@ Poll physical timer until ISTATUS is on
1: isb
mrc p15, 0, \reg, c14, c2, 1
ands \reg, \reg, #4
bne 1b
@ Disable timer
mov \reg, #0
mcr p15, 0, \reg, c14, c2, 1
isb
.endm
.globl psci_fiq_enter
psci_fiq_enter:
push {r0-r12}
@ Switch to secure
mrc p15, 0, r7, c1, c1, 0
bic r8, r7, #1
mcr p15, 0, r8, c1, c1, 0
isb
@ Validate reason based on IAR and acknowledge
movw r8, #(GICC_BASE & 0xffff)
movt r8, #(GICC_BASE >> 16)
ldr r9, [r8, #GICC_IAR]
movw r10, #0x3ff
movt r10, #0
cmp r9, r10 @ skip spurious interrupt 1023
beq out
movw r10, #0x3fe @ ...and 1022
cmp r9, r10
beq out
str r9, [r8, #GICC_EOIR] @ acknowledge the interrupt
dsb
@ Compute CPU number
lsr r9, r9, #10
and r9, r9, #0xf
movw r8, #(SUN7I_CPUCFG_BASE & 0xffff)
movt r8, #(SUN7I_CPUCFG_BASE >> 16)
@ Wait for the core to enter WFI
lsl r11, r9, #6 @ x64
add r11, r11, r8
1: ldr r10, [r11, #0x48]
tst r10, #(1 << 2)
bne 2f
timer_wait r10, ONE_MS
b 1b
@ Reset CPU
2: mov r10, #0
str r10, [r11, #0x40]
@ Lock CPU
mov r10, #1
lsl r9, r10, r9 @ r9 is now CPU mask
ldr r10, [r8, #0x1e4]
bic r10, r10, r9
str r10, [r8, #0x1e4]
@ Set power gating
ldr r10, [r8, #0x1b4]
orr r10, r10, #1
str r10, [r8, #0x1b4]
timer_wait r10, ONE_MS
@ Activate power clamp
mov r10, #1
1: str r10, [r8, #0x1b0]
lsl r10, r10, #1
orr r10, r10, #1
tst r10, #0x100
beq 1b
@ Restore security level
out: mcr p15, 0, r7, c1, c1, 0
pop {r0-r12}
subs pc, lr, #4
@ r1 = target CPU
@ r2 = target PC
.globl psci_cpu_on
psci_cpu_on:
adr r0, _target_pc
str r2, [r0]
dsb
movw r0, #(SUN7I_CPUCFG_BASE & 0xffff)
movt r0, #(SUN7I_CPUCFG_BASE >> 16)
@ CPU mask
and r1, r1, #3 @ only care about first cluster
mov r4, #1
lsl r4, r4, r1
adr r6, _sunxi_cpu_entry
str r6, [r0, #0x1a4] @ PRIVATE_REG (boot vector)
@ Assert reset on target CPU
mov r6, #0
lsl r5, r1, #6 @ 64 bytes per CPU
add r5, r5, #0x40 @ Offset from base
add r5, r5, r0 @ CPU control block
str r6, [r5] @ Reset CPU
@ l1 invalidate
ldr r6, [r0, #0x184]
bic r6, r6, r4
str r6, [r0, #0x184]
@ Lock CPU
ldr r6, [r0, #0x1e4]
bic r6, r6, r4
str r6, [r0, #0x1e4]
@ Release power clamp
movw r6, #0x1ff
movt r6, #0
1: lsrs r6, r6, #1
str r6, [r0, #0x1b0]
bne 1b
timer_wait r1, TEN_MS
@ Clear power gating
ldr r6, [r0, #0x1b4]
bic r6, r6, #1
str r6, [r0, #0x1b4]
@ Deassert reset on target CPU
mov r6, #3
str r6, [r5]
@ Unlock CPU
ldr r6, [r0, #0x1e4]
orr r6, r6, r4
str r6, [r0, #0x1e4]
mov r0, #ARM_PSCI_RET_SUCCESS @ Return PSCI_RET_SUCCESS
mov pc, lr
_target_pc:
.word 0
/* Imported from Linux kernel */
v7_flush_dcache_all:
dmb @ ensure ordering with previous memory accesses
mrc p15, 1, r0, c0, c0, 1 @ read clidr
ands r3, r0, #0x7000000 @ extract loc from clidr
mov r3, r3, lsr #23 @ left align loc bit field
beq finished @ if loc is 0, then no need to clean
mov r10, #0 @ start clean at cache level 0
flush_levels:
add r2, r10, r10, lsr #1 @ work out 3x current cache level
mov r1, r0, lsr r2 @ extract cache type bits from clidr
and r1, r1, #7 @ mask of the bits for current cache only
cmp r1, #2 @ see what cache we have at this level
blt skip @ skip if no cache, or just i-cache
mrs r9, cpsr @ make cssr&csidr read atomic
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
isb @ isb to sych the new cssr&csidr
mrc p15, 1, r1, c0, c0, 0 @ read the new csidr
msr cpsr_c, r9
and r2, r1, #7 @ extract the length of the cache lines
add r2, r2, #4 @ add 4 (line length offset)
ldr r4, =0x3ff
ands r4, r4, r1, lsr #3 @ find maximum number on the way size
clz r5, r4 @ find bit position of way size increment
ldr r7, =0x7fff
ands r7, r7, r1, lsr #13 @ extract max number of the index size
loop1:
mov r9, r7 @ create working copy of max index
loop2:
orr r11, r10, r4, lsl r5 @ factor way and cache number into r11
orr r11, r11, r9, lsl r2 @ factor index number into r11
mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
subs r9, r9, #1 @ decrement the index
bge loop2
subs r4, r4, #1 @ decrement the way
bge loop1
skip:
add r10, r10, #2 @ increment cache number
cmp r3, r10
bgt flush_levels
finished:
mov r10, #0 @ swith back to cache level 0
mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
dsb st
isb
bx lr
_sunxi_cpu_entry:
@ Set SMP bit
mrc p15, 0, r0, c1, c0, 1
orr r0, r0, #0x40
mcr p15, 0, r0, c1, c0, 1
isb
bl _nonsec_init
bl psci_arch_init
adr r0, _target_pc
ldr r0, [r0]
b _do_nonsec_entry
.globl psci_cpu_off
psci_cpu_off:
mrc p15, 0, r0, c1, c0, 0 @ SCTLR
bic r0, r0, #(1 << 2) @ Clear C bit
mcr p15, 0, r0, c1, c0, 0 @ SCTLR
isb
dsb
bl v7_flush_dcache_all
clrex @ Why???
mrc p15, 0, r0, c1, c0, 1 @ ACTLR
bic r0, r0, #(1 << 6) @ Clear SMP bit
mcr p15, 0, r0, c1, c0, 1 @ ACTLR
isb
dsb
@ Ask CPU0 to pull the rug...
movw r0, #(GICD_BASE & 0xffff)
movt r0, #(GICD_BASE >> 16)
movw r1, #15 @ SGI15
movt r1, #1 @ Target is CPU0
str r1, [r0, #GICD_SGIR]
dsb
1: wfi
b 1b
.globl psci_arch_init
psci_arch_init:
movw r4, #(GICD_BASE & 0xffff)
movt r4, #(GICD_BASE >> 16)
ldr r5, [r4, #GICD_IGROUPRn]
bic r5, r5, #(1 << 15) @ SGI15 as Group-0
str r5, [r4, #GICD_IGROUPRn]
mov r5, #0 @ Set SGI15 priority to 0
strb r5, [r4, #(GICD_IPRIORITYRn + 15)]
add r4, r4, #0x1000 @ GICC address
mov r5, #0xff
str r5, [r4, #GICC_PMR] @ Be cool with non-secure
ldr r5, [r4, #GICC_CTLR]
orr r5, r5, #(1 << 3) @ Switch FIQEn on
str r5, [r4, #GICC_CTLR]
mrc p15, 0, r5, c1, c1, 0 @ Read SCR
orr r5, r5, #4 @ Enable FIQ in monitor mode
bic r5, r5, #1 @ Secure mode
mcr p15, 0, r5, c1, c1, 0 @ Write SCR
isb
mrc p15, 0, r4, c0, c0, 5 @ MPIDR
and r4, r4, #3 @ cpu number in cluster
mov r5, #0x400 @ 1kB of stack per CPU
mul r4, r4, r5
adr r5, text_end @ end of text
add r5, r5, #0x2000 @ Skip two pages
lsr r5, r5, #12 @ Align to start of page
lsl r5, r5, #12
sub sp, r5, r4 @ here's our stack!
bx lr
text_end:
.popsection
|