summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAneesh V <aneesh@ti.com>2011-06-16 23:30:50 +0000
committerAlbert ARIBAUD <albert.u.boot@aribaud.net>2011-07-04 10:55:25 +0200
commite05f00792b71184428fdb34a303644a1e457f000 (patch)
tree6795b9334ef974e0f03ba55eb0cca01f4b58b372
parentc2dd0d45540397704de9b13287417d21049d34c6 (diff)
downloadu-boot-imx-e05f00792b71184428fdb34a303644a1e457f000.zip
u-boot-imx-e05f00792b71184428fdb34a303644a1e457f000.tar.gz
u-boot-imx-e05f00792b71184428fdb34a303644a1e457f000.tar.bz2
arm: minor fixes for cache and mmu handling
1. make sure that page table setup is not done multiple times 2. flush_dcache_all() is more appropriate while disabling cache than a range flush on the entire memory(flush_cache()) Provide a default implementation for flush_dcache_all() for backward compatibility and to avoid build issues. Signed-off-by: Aneesh V <aneesh@ti.com>
-rw-r--r--arch/arm/lib/cache-cp15.c9
-rw-r--r--arch/arm/lib/cache.c11
2 files changed, 18 insertions, 2 deletions
diff --git a/arch/arm/lib/cache-cp15.c b/arch/arm/lib/cache-cp15.c
index 51831a9..e6c3eae 100644
--- a/arch/arm/lib/cache-cp15.c
+++ b/arch/arm/lib/cache-cp15.c
@@ -92,13 +92,18 @@ static inline void mmu_setup(void)
set_cr(reg | CR_M);
}
+static int mmu_enabled(void)
+{
+ return get_cr() & CR_M;
+}
+
/* cache_bit must be either CR_I or CR_C */
static void cache_enable(uint32_t cache_bit)
{
uint32_t reg;
/* The data cache is not active unless the mmu is enabled too */
- if (cache_bit == CR_C)
+ if ((cache_bit == CR_C) && !mmu_enabled())
mmu_setup();
reg = get_cr(); /* get control reg. */
cp_delay();
@@ -117,7 +122,7 @@ static void cache_disable(uint32_t cache_bit)
return;
/* if disabling data cache, disable mmu too */
cache_bit |= CR_M;
- flush_cache(0, ~0);
+ flush_dcache_all();
}
reg = get_cr();
cp_delay();
diff --git a/arch/arm/lib/cache.c b/arch/arm/lib/cache.c
index dc3242c..92b61a2 100644
--- a/arch/arm/lib/cache.c
+++ b/arch/arm/lib/cache.c
@@ -42,3 +42,14 @@ void __flush_cache(unsigned long start, unsigned long size)
}
void flush_cache(unsigned long start, unsigned long size)
__attribute__((weak, alias("__flush_cache")));
+
+/*
+ * Default implementation:
+ * do a range flush for the entire range
+ */
+void __flush_dcache_all(void)
+{
+ flush_cache(0, ~0);
+}
+void flush_dcache_all(void)
+ __attribute__((weak, alias("__flush_dcache_all")));