blob: 34cbbff8ffdbaf4f8fd0799ddad53599cc1dfd5c (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
|
/*
* (C) Copyright 2007
* Yoshihiro Shimoda <shimoda.yoshihiro@renesas.com>
*
* (C) Copyright 2007
* Nobobuhiro Iwamatsu <iwamatsu@nigauri.org>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <command.h>
#include <asm/processor.h>
#include <asm/io.h>
/*
* Jump to P2 area.
* When handling TLB or caches, we need to do it from P2 area.
*/
#define jump_to_P2() \
do { \
unsigned long __dummy; \
__asm__ __volatile__( \
"mov.l 1f, %0\n\t" \
"or %1, %0\n\t" \
"jmp @%0\n\t" \
" nop\n\t" \
".balign 4\n" \
"1: .long 2f\n" \
"2:" \
: "=&r" (__dummy) \
: "r" (0x20000000)); \
} while (0)
/*
* Back to P1 area.
*/
#define back_to_P1() \
do { \
unsigned long __dummy; \
__asm__ __volatile__( \
"nop;nop;nop;nop;nop;nop;nop\n\t" \
"mov.l 1f, %0\n\t" \
"jmp @%0\n\t" \
" nop\n\t" \
".balign 4\n" \
"1: .long 2f\n" \
"2:" \
: "=&r" (__dummy)); \
} while (0)
#define CACHE_VALID 1
#define CACHE_UPDATED 2
static inline void cache_wback_all(void)
{
unsigned long addr, data, i, j;
jump_to_P2();
for (i = 0; i < CACHE_OC_NUM_ENTRIES; i++) {
for (j = 0; j < CACHE_OC_NUM_WAYS; j++) {
addr = CACHE_OC_ADDRESS_ARRAY
| (j << CACHE_OC_WAY_SHIFT)
| (i << CACHE_OC_ENTRY_SHIFT);
data = inl(addr);
if (data & CACHE_UPDATED) {
data &= ~CACHE_UPDATED;
outl(data, addr);
}
}
}
back_to_P1();
}
#define CACHE_ENABLE 0
#define CACHE_DISABLE 1
int cache_control(unsigned int cmd)
{
unsigned long ccr;
jump_to_P2();
ccr = inl(CCR);
if (ccr & CCR_CACHE_ENABLE)
cache_wback_all();
if (cmd == CACHE_DISABLE)
outl(CCR_CACHE_STOP, CCR);
else
outl(CCR_CACHE_INIT, CCR);
back_to_P1();
return 0;
}
|