summaryrefslogtreecommitdiff
path: root/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'cpu')
-rw-r--r--cpu/mpc85xx/Makefile1
-rw-r--r--cpu/mpc85xx/cpu.c2
-rw-r--r--cpu/mpc85xx/cpu_init.c13
-rw-r--r--cpu/mpc85xx/ddr-gen3.c6
-rw-r--r--cpu/mpc85xx/fdt.c73
-rw-r--r--cpu/mpc85xx/mp.c26
-rw-r--r--cpu/mpc85xx/mp.h4
-rw-r--r--cpu/mpc85xx/release.S16
-rw-r--r--cpu/mpc85xx/start.S51
-rw-r--r--cpu/mpc86xx/Makefile25
-rw-r--r--cpu/mpc86xx/cpu_init.c2
-rw-r--r--cpu/mpc86xx/fdt.c14
-rw-r--r--cpu/mpc86xx/mp.c51
-rw-r--r--cpu/mpc86xx/mp.h7
-rw-r--r--cpu/mpc86xx/release.S2
-rw-r--r--cpu/mpc8xxx/ddr/Makefile4
-rw-r--r--cpu/mpc8xxx/ddr/ctrl_regs.c383
-rw-r--r--cpu/mpc8xxx/ddr/ddr3_dimm_params.c314
-rw-r--r--cpu/mpc8xxx/ddr/lc_common_dimm_params.c74
-rw-r--r--cpu/mpc8xxx/ddr/options.c28
20 files changed, 972 insertions, 124 deletions
diff --git a/cpu/mpc85xx/Makefile b/cpu/mpc85xx/Makefile
index 99d88a8..8809302 100644
--- a/cpu/mpc85xx/Makefile
+++ b/cpu/mpc85xx/Makefile
@@ -49,6 +49,7 @@ COBJS-$(CONFIG_MPC8544) += ddr-gen2.o
COBJS-$(CONFIG_MPC8572) += ddr-gen3.o
COBJS-$(CONFIG_MPC8536) += ddr-gen3.o
COBJS-$(CONFIG_P2020) += ddr-gen3.o
+COBJS-$(CONFIG_MPC8569) += ddr-gen3.o
COBJS-$(CONFIG_MPC8536) += mpc8536_serdes.o
COBJS = traps.o cpu.o cpu_init.o speed.o interrupts.o tlb.o \
diff --git a/cpu/mpc85xx/cpu.c b/cpu/mpc85xx/cpu.c
index 5b72fe5..ef976a4 100644
--- a/cpu/mpc85xx/cpu.c
+++ b/cpu/mpc85xx/cpu.c
@@ -61,6 +61,8 @@ struct cpu_type cpu_type_list [] = {
CPU_TYPE_ENTRY(8567, 8567_E),
CPU_TYPE_ENTRY(8568, 8568),
CPU_TYPE_ENTRY(8568, 8568_E),
+ CPU_TYPE_ENTRY(8569, 8569),
+ CPU_TYPE_ENTRY(8569, 8569_E),
CPU_TYPE_ENTRY(8572, 8572),
CPU_TYPE_ENTRY(8572, 8572_E),
CPU_TYPE_ENTRY(P2020, P2020),
diff --git a/cpu/mpc85xx/cpu_init.c b/cpu/mpc85xx/cpu_init.c
index 0b7c609..c98dd8d 100644
--- a/cpu/mpc85xx/cpu_init.c
+++ b/cpu/mpc85xx/cpu_init.c
@@ -345,6 +345,19 @@ int cpu_init_r(void)
asm("msync;isync");
puts("enabled\n");
}
+#elif defined(CONFIG_BACKSIDE_L2_CACHE)
+ u32 l2cfg0 = mfspr(SPRN_L2CFG0);
+
+ /* invalidate the L2 cache */
+ mtspr(SPRN_L2CSR0, L2CSR0_L2FI);
+ while (mfspr(SPRN_L2CSR0) & L2CSR0_L2FI)
+ ;
+
+ /* enable the cache */
+ mtspr(SPRN_L2CSR0, CONFIG_SYS_INIT_L2CSR0);
+
+ if (CONFIG_SYS_INIT_L2CSR0 & L2CSR0_L2E)
+ printf("%d KB enabled\n", (l2cfg0 & 0x3fff) * 64);
#else
puts("disabled\n");
#endif
diff --git a/cpu/mpc85xx/ddr-gen3.c b/cpu/mpc85xx/ddr-gen3.c
index 99c325a..8ac3d5f 100644
--- a/cpu/mpc85xx/ddr-gen3.c
+++ b/cpu/mpc85xx/ddr-gen3.c
@@ -98,10 +98,12 @@ void fsl_ddr_set_memctl_regs(const fsl_ddr_cfg_regs_t *regs,
#endif
/*
- * 200 painful micro-seconds must elapse between
+ * 500 painful micro-seconds must elapse between
* the DDR clock setup and the DDR config enable.
+ * DDR2 need 200 us, and DDR3 need 500 us from spec,
+ * we choose the max, that is 500 us for all of case.
*/
- udelay(200);
+ udelay(500);
asm volatile("sync;isync");
/* Let the controller go */
diff --git a/cpu/mpc85xx/fdt.c b/cpu/mpc85xx/fdt.c
index 1fae47c..26a8f48 100644
--- a/cpu/mpc85xx/fdt.c
+++ b/cpu/mpc85xx/fdt.c
@@ -39,13 +39,8 @@ void ft_fixup_cpu(void *blob, u64 memory_limit)
{
int off;
ulong spin_tbl_addr = get_spin_addr();
- u32 bootpg, id = get_my_id();
-
- /* if we have 4G or more of memory, put the boot page at 4Gb-4k */
- if ((u64)gd->ram_size > 0xfffff000)
- bootpg = 0xfffff000;
- else
- bootpg = gd->ram_size - 4096;
+ u32 bootpg = determine_mp_bootpg();
+ u32 id = get_my_id();
off = fdt_node_offset_by_prop_value(blob, -1, "device_type", "cpu", 4);
while (off != -FDT_ERR_NOTFOUND) {
@@ -80,7 +75,9 @@ void ft_fixup_cpu(void *blob, u64 memory_limit)
}
#endif
-#ifdef CONFIG_L2_CACHE
+#define ft_fixup_l3cache(x, y)
+
+#if defined(CONFIG_L2_CACHE)
/* return size in kilobytes */
static inline u32 l2cache_size(void)
{
@@ -157,6 +154,66 @@ static inline void ft_fixup_l2cache(void *blob)
fdt_setprop_cell(blob, off, "cache-sets", num_sets);
fdt_setprop_cell(blob, off, "cache-level", 2);
fdt_setprop(blob, off, "compatible", compat_buf, sizeof(compat_buf));
+
+ /* we dont bother w/L3 since no platform of this type has one */
+}
+#elif defined(CONFIG_BACKSIDE_L2_CACHE)
+static inline void ft_fixup_l2cache(void *blob)
+{
+ int off, l2_off, l3_off = -1;
+ u32 *ph;
+ u32 l2cfg0 = mfspr(SPRN_L2CFG0);
+ u32 size, line_size, num_ways, num_sets;
+
+ size = (l2cfg0 & 0x3fff) * 64 * 1024;
+ num_ways = ((l2cfg0 >> 14) & 0x1f) + 1;
+ line_size = (((l2cfg0 >> 23) & 0x3) + 1) * 32;
+ num_sets = size / (line_size * num_ways);
+
+ off = fdt_node_offset_by_prop_value(blob, -1, "device_type", "cpu", 4);
+
+ while (off != -FDT_ERR_NOTFOUND) {
+ ph = (u32 *)fdt_getprop(blob, off, "next-level-cache", 0);
+
+ if (ph == NULL) {
+ debug("no next-level-cache property\n");
+ goto next;
+ }
+
+ l2_off = fdt_node_offset_by_phandle(blob, *ph);
+ if (l2_off < 0) {
+ printf("%s: %s\n", __func__, fdt_strerror(off));
+ goto next;
+ }
+
+ fdt_setprop(blob, l2_off, "cache-unified", NULL, 0);
+ fdt_setprop_cell(blob, l2_off, "cache-block-size", line_size);
+ fdt_setprop_cell(blob, l2_off, "cache-size", size);
+ fdt_setprop_cell(blob, l2_off, "cache-sets", num_sets);
+ fdt_setprop_cell(blob, l2_off, "cache-level", 2);
+ fdt_setprop(blob, l2_off, "compatible", "cache", 6);
+
+ if (l3_off < 0) {
+ ph = (u32 *)fdt_getprop(blob, l2_off, "next-level-cache", 0);
+
+ if (ph == NULL) {
+ debug("no next-level-cache property\n");
+ goto next;
+ }
+ l3_off = *ph;
+ }
+next:
+ off = fdt_node_offset_by_prop_value(blob, off,
+ "device_type", "cpu", 4);
+ }
+ if (l3_off > 0) {
+ l3_off = fdt_node_offset_by_phandle(blob, l3_off);
+ if (l3_off < 0) {
+ printf("%s: %s\n", __func__, fdt_strerror(off));
+ return ;
+ }
+ ft_fixup_l3cache(blob, l3_off);
+ }
}
#else
#define ft_fixup_l2cache(x)
diff --git a/cpu/mpc85xx/mp.c b/cpu/mpc85xx/mp.c
index 3338c1a..76f02a4 100644
--- a/cpu/mpc85xx/mp.c
+++ b/cpu/mpc85xx/mp.c
@@ -38,6 +38,7 @@ int cpu_reset(int nr)
{
volatile ccsr_pic_t *pic = (void *)(CONFIG_SYS_MPC85xx_PIC_ADDR);
out_be32(&pic->pir, 1 << nr);
+ /* the dummy read works around an errata on early 85xx MP PICs */
(void)in_be32(&pic->pir);
out_be32(&pic->pir, 0x0);
@@ -112,6 +113,15 @@ int cpu_release(int nr, int argc, char *argv[])
return 0;
}
+u32 determine_mp_bootpg(void)
+{
+ /* if we have 4G or more of memory, put the boot page at 4Gb-4k */
+ if ((u64)gd->ram_size > 0xfffff000)
+ return (0xfffff000);
+
+ return (gd->ram_size - 4096);
+}
+
ulong get_spin_addr(void)
{
extern ulong __secondary_start_page;
@@ -188,13 +198,7 @@ static void pq3_mp_up(unsigned long bootpg)
void cpu_mp_lmb_reserve(struct lmb *lmb)
{
- u32 bootpg;
-
- /* if we have 4G or more of memory, put the boot page at 4Gb-4k */
- if ((u64)gd->ram_size > 0xfffff000)
- bootpg = 0xfffff000;
- else
- bootpg = gd->ram_size - 4096;
+ u32 bootpg = determine_mp_bootpg();
lmb_reserve(lmb, bootpg, 4096);
}
@@ -203,13 +207,7 @@ void setup_mp(void)
{
extern ulong __secondary_start_page;
ulong fixup = (ulong)&__secondary_start_page;
- u32 bootpg;
-
- /* if we have 4G or more of memory, put the boot page at 4Gb-4k */
- if ((u64)gd->ram_size > 0xfffff000)
- bootpg = 0xfffff000;
- else
- bootpg = gd->ram_size - 4096;
+ u32 bootpg = determine_mp_bootpg();
memcpy((void *)bootpg, (void *)fixup, 4096);
flush_cache(bootpg, 4096);
diff --git a/cpu/mpc85xx/mp.h b/cpu/mpc85xx/mp.h
index 4329286..2c2929e 100644
--- a/cpu/mpc85xx/mp.h
+++ b/cpu/mpc85xx/mp.h
@@ -1,10 +1,10 @@
#ifndef __MPC85XX_MP_H_
#define __MPC85XX_MP_H_
+#include <asm/mp.h>
+
ulong get_spin_addr(void);
-void setup_mp(void);
u32 get_my_id(void);
-void cpu_mp_lmb_reserve(struct lmb *lmb);
#define BOOT_ENTRY_ADDR_UPPER 0
#define BOOT_ENTRY_ADDR_LOWER 1
diff --git a/cpu/mpc85xx/release.S b/cpu/mpc85xx/release.S
index 54c936c..fbefc2c 100644
--- a/cpu/mpc85xx/release.S
+++ b/cpu/mpc85xx/release.S
@@ -76,6 +76,22 @@ __secondary_start_page:
slwi r8,r4,5
add r10,r3,r8
+#ifdef CONFIG_BACKSIDE_L2_CACHE
+ /* Enable/invalidate the L2 cache */
+ msync
+ lis r3,L2CSR0_L2FI@h
+ mtspr SPRN_L2CSR0,r3
+1:
+ mfspr r3,SPRN_L2CSR0
+ andis. r1,r3,L2CSR0_L2FI@h
+ bne 1b
+
+ lis r3,CONFIG_SYS_INIT_L2CSR0@h
+ ori r3,r3,CONFIG_SYS_INIT_L2CSR0@l
+ mtspr SPRN_L2CSR0,r3
+ isync
+#endif
+
#define EPAPR_MAGIC (0x45504150)
#define ENTRY_ADDR_UPPER 0
#define ENTRY_ADDR_LOWER 4
diff --git a/cpu/mpc85xx/start.S b/cpu/mpc85xx/start.S
index 80f9677..4f7236f 100644
--- a/cpu/mpc85xx/start.S
+++ b/cpu/mpc85xx/start.S
@@ -161,7 +161,9 @@ _start_e500:
#if defined(CONFIG_ENABLE_36BIT_PHYS)
ori r0,r0,HID0_ENMAS7@l /* Enable MAS7 */
#endif
+#ifndef CONFIG_E500MC
ori r0,r0,HID0_TBEN@l /* Enable Timebase */
+#endif
mtspr HID0,r0
#ifndef CONFIG_E500MC
@@ -184,6 +186,55 @@ _start_e500:
mtspr DBCR0,r0
#endif
+#ifdef CONFIG_MPC8569
+#define CONFIG_SYS_LBC_ADDR (CONFIG_SYS_CCSRBAR_DEFAULT + 0x5000)
+#define CONFIG_SYS_LBCR_ADDR (CONFIG_SYS_LBC_ADDR + 0xd0)
+
+ /* MPC8569 Rev.0 silcon needs to set bit 13 of LBCR to allow elBC to
+ * use address space which is more than 12bits, and it must be done in
+ * the 4K boot page. So we set this bit here.
+ */
+
+ /* create a temp mapping TLB0[0] for LBCR */
+ lis r6,FSL_BOOKE_MAS0(0, 0, 0)@h
+ ori r6,r6,FSL_BOOKE_MAS0(0, 0, 0)@l
+
+ lis r7,FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@h
+ ori r7,r7,FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@l
+
+ lis r8,FSL_BOOKE_MAS2(CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G)@h
+ ori r8,r8,FSL_BOOKE_MAS2(CONFIG_SYS_LBC_ADDR, MAS2_I|MAS2_G)@l
+
+ lis r9,FSL_BOOKE_MAS3(CONFIG_SYS_LBC_ADDR, 0,
+ (MAS3_SX|MAS3_SW|MAS3_SR))@h
+ ori r9,r9,FSL_BOOKE_MAS3(CONFIG_SYS_LBC_ADDR, 0,
+ (MAS3_SX|MAS3_SW|MAS3_SR))@l
+
+ mtspr MAS0,r6
+ mtspr MAS1,r7
+ mtspr MAS2,r8
+ mtspr MAS3,r9
+ isync
+ msync
+ tlbwe
+
+ /* Set LBCR register */
+ lis r4,CONFIG_SYS_LBCR_ADDR@h
+ ori r4,r4,CONFIG_SYS_LBCR_ADDR@l
+
+ lis r5,CONFIG_SYS_LBC_LBCR@h
+ ori r5,r5,CONFIG_SYS_LBC_LBCR@l
+ stw r5,0(r4)
+ isync
+
+ /* invalidate this temp TLB */
+ lis r4,CONFIG_SYS_LBC_ADDR@h
+ ori r4,r4,CONFIG_SYS_LBC_ADDR@l
+ tlbivax 0,r4
+ isync
+
+#endif /* CONFIG_MPC8569 */
+
/* create a temp mapping in AS=1 to the 4M boot window */
lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h
ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l
diff --git a/cpu/mpc86xx/Makefile b/cpu/mpc86xx/Makefile
index 34a9755..daca79a 100644
--- a/cpu/mpc86xx/Makefile
+++ b/cpu/mpc86xx/Makefile
@@ -29,26 +29,23 @@ include $(TOPDIR)/config.mk
LIB = $(obj)lib$(CPU).a
START = start.o
-SOBJS = cache.o
-ifneq ($(CONFIG_NUM_CPUS),1)
-COBJS-y += mp.o
-SOBJS += release.o
-endif
-COBJS-y += traps.o
+SOBJS-y += cache.o
+SOBJS-$(CONFIG_MP) += release.o
+
COBJS-y += cpu.o
COBJS-y += cpu_init.o
-COBJS-y += speed.o
-COBJS-y += interrupts.o
-
-COBJS-$(CONFIG_OF_LIBFDT) += fdt.o
-
-COBJS-$(CONFIG_MPC8641) += ddr-8641.o
# 8610 & 8641 are identical w/regards to DDR
COBJS-$(CONFIG_MPC8610) += ddr-8641.o
+COBJS-$(CONFIG_MPC8641) += ddr-8641.o
+COBJS-$(CONFIG_OF_LIBFDT) += fdt.o
+COBJS-y += interrupts.o
+COBJS-$(CONFIG_MP) += mp.o
+COBJS-y += speed.o
+COBJS-y += traps.o
-SRCS := $(START:.o=.S) $(SOBJS:.o=.S) $(COBJS-y:.o=.c)
-OBJS := $(addprefix $(obj),$(SOBJS) $(COBJS-y))
+SRCS := $(START:.o=.S) $(SOBJS-y:.o=.S) $(COBJS-y:.o=.c)
+OBJS := $(addprefix $(obj),$(SOBJS-y) $(COBJS-y))
START := $(addprefix $(obj),$(START))
all: $(obj).depend $(START) $(LIB)
diff --git a/cpu/mpc86xx/cpu_init.c b/cpu/mpc86xx/cpu_init.c
index 4f29122..49528aa 100644
--- a/cpu/mpc86xx/cpu_init.c
+++ b/cpu/mpc86xx/cpu_init.c
@@ -31,7 +31,7 @@
#include <mpc86xx.h>
#include <asm/mmu.h>
#include <asm/fsl_law.h>
-#include "mp.h"
+#include <asm/mp.h>
void setup_bats(void);
diff --git a/cpu/mpc86xx/fdt.c b/cpu/mpc86xx/fdt.c
index 383b06b..a36ee30 100644
--- a/cpu/mpc86xx/fdt.c
+++ b/cpu/mpc86xx/fdt.c
@@ -9,15 +9,15 @@
#include <common.h>
#include <libfdt.h>
#include <fdt_support.h>
-#include "mp.h"
+#include <asm/mp.h>
DECLARE_GLOBAL_DATA_PTR;
void ft_cpu_setup(void *blob, bd_t *bd)
{
-#if (CONFIG_NUM_CPUS > 1)
+#ifdef CONFIG_MP
int off;
- u32 bootpg;
+ u32 bootpg = determine_mp_bootpg();
#endif
do_fixup_by_prop_u32(blob, "device_type", "cpu", 4,
@@ -48,13 +48,7 @@ void ft_cpu_setup(void *blob, bd_t *bd)
"clock-frequency", CONFIG_SYS_NS16550_CLK, 1);
#endif
-#if (CONFIG_NUM_CPUS > 1)
- /* if we have 4G or more of memory, put the boot page at 4Gb-1M */
- if (gd->ram_size > 0xfffff000)
- bootpg = 0xfff00000;
- else
- bootpg = gd->ram_size - (1024 * 1024);
-
+#ifdef CONFIG_MP
/* Reserve the boot page so OSes dont use it */
off = fdt_add_mem_rsv(blob, bootpg, (u64)4096);
if (off < 0)
diff --git a/cpu/mpc86xx/mp.c b/cpu/mpc86xx/mp.c
index 5014401..2940673 100644
--- a/cpu/mpc86xx/mp.c
+++ b/cpu/mpc86xx/mp.c
@@ -4,20 +4,45 @@
#include <ioports.h>
#include <lmb.h>
#include <asm/io.h>
-#include "mp.h"
+#include <asm/mp.h>
DECLARE_GLOBAL_DATA_PTR;
-#if (CONFIG_NUM_CPUS > 1)
-void cpu_mp_lmb_reserve(struct lmb *lmb)
+int cpu_reset(int nr)
+{
+ /* dummy function so common/cmd_mp.c will build
+ * should be implemented in the future, when cpu_release()
+ * is supported. Be aware there may be a similiar bug
+ * as exists on MPC85xx w/its PIC having a timing window
+ * associated to resetting the core */
+ return 1;
+}
+
+int cpu_status(int nr)
{
- u32 bootpg;
+ /* dummy function so common/cmd_mp.c will build */
+ return 0;
+}
+
+int cpu_release(int nr, int argc, char *argv[])
+{
+ /* dummy function so common/cmd_mp.c will build
+ * should be implemented in the future */
+ return 1;
+}
+u32 determine_mp_bootpg(void)
+{
/* if we have 4G or more of memory, put the boot page at 4Gb-1M */
if ((u64)gd->ram_size > 0xfffff000)
- bootpg = 0xfff00000;
- else
- bootpg = gd->ram_size - (1024 * 1024);
+ return (0xfff00000);
+
+ return (gd->ram_size - (1024 * 1024));
+}
+
+void cpu_mp_lmb_reserve(struct lmb *lmb)
+{
+ u32 bootpg = determine_mp_bootpg();
/* tell u-boot we stole a page */
lmb_reserve(lmb, bootpg, 4096);
@@ -31,18 +56,9 @@ void setup_mp(void)
{
extern ulong __secondary_start_page;
ulong fixup = (ulong)&__secondary_start_page;
- u32 bootpg;
+ u32 bootpg = determine_mp_bootpg();
u32 bootpg_va;
- /*
- * If we have 4G or more of memory, put the boot page at 4Gb-1M.
- * Otherwise, put it at the very end of RAM.
- */
- if (gd->ram_size > 0xfffff000)
- bootpg = 0xfff00000;
- else
- bootpg = gd->ram_size - (1024 * 1024);
-
if (bootpg >= CONFIG_SYS_MAX_DDR_BAT_SIZE) {
/* We're not covered by the DDR mapping, set up BAT */
write_bat(DBAT7, CONFIG_SYS_SCRATCH_VA | BATU_BL_128K |
@@ -65,4 +81,3 @@ void setup_mp(void)
out_be32((uint *)(CONFIG_SYS_CCSRBAR + 0x20), 0x80000000 |
(bootpg >> 12));
}
-#endif
diff --git a/cpu/mpc86xx/mp.h b/cpu/mpc86xx/mp.h
deleted file mode 100644
index 886e0c8..0000000
--- a/cpu/mpc86xx/mp.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __MPC86XX_MP_H_
-#define __MPC86XX_MP_H_
-
-void setup_mp(void);
-void cpu_mp_lmb_reserve(struct lmb *lmb);
-
-#endif
diff --git a/cpu/mpc86xx/release.S b/cpu/mpc86xx/release.S
index 95efbb4..67a6f2b 100644
--- a/cpu/mpc86xx/release.S
+++ b/cpu/mpc86xx/release.S
@@ -41,7 +41,6 @@
* Core 0 must copy this to a 1M aligned region and set BPTR
* to point to it.
*/
-#if (CONFIG_NUM_CPUS > 1)
.align 12
.globl __secondary_start_page
__secondary_start_page:
@@ -166,4 +165,3 @@ invl2:
blr
/* Never Returns, Running in Linux Now */
-#endif
diff --git a/cpu/mpc8xxx/ddr/Makefile b/cpu/mpc8xxx/ddr/Makefile
index b7f8d8c..cb7f856 100644
--- a/cpu/mpc8xxx/ddr/Makefile
+++ b/cpu/mpc8xxx/ddr/Makefile
@@ -18,6 +18,10 @@ COBJS-$(CONFIG_FSL_DDR2) += main.o util.o ctrl_regs.o options.o \
lc_common_dimm_params.o
COBJS-$(CONFIG_FSL_DDR2) += ddr2_dimm_params.o
+COBJS-$(CONFIG_FSL_DDR3) += main.o util.o ctrl_regs.o options.o \
+ lc_common_dimm_params.o
+COBJS-$(CONFIG_FSL_DDR3) += ddr3_dimm_params.o
+
SRCS := $(START:.o=.S) $(SOBJS-y:.o=.S) $(COBJS-y:.o=.c)
OBJS := $(addprefix $(obj),$(SOBJS-y) $(COBJS-y))
diff --git a/cpu/mpc8xxx/ddr/ctrl_regs.c b/cpu/mpc8xxx/ddr/ctrl_regs.c
index 292980d..490e3dc 100644
--- a/cpu/mpc8xxx/ddr/ctrl_regs.c
+++ b/cpu/mpc8xxx/ddr/ctrl_regs.c
@@ -23,11 +23,18 @@ extern unsigned int picos_to_mclk(unsigned int picos);
*
* This should likely be either board or controller specific.
*
- * Rtt(nominal):
+ * Rtt(nominal) - DDR2:
* 0 = Rtt disabled
* 1 = 75 ohm
* 2 = 150 ohm
* 3 = 50 ohm
+ * Rtt(nominal) - DDR3:
+ * 0 = Rtt disabled
+ * 1 = 60 ohm
+ * 2 = 120 ohm
+ * 3 = 40 ohm
+ * 4 = 20 ohm
+ * 5 = 30 ohm
*
* FIXME: Apparently 8641 needs a value of 2
* FIXME: Old code seys if 667 MHz or higher, use 3 on 8572
@@ -53,12 +60,37 @@ static inline int fsl_ddr_get_rtt(void)
#elif defined(CONFIG_FSL_DDR2)
rtt = 3;
#else
-#error "Need Rtt value for DDR3"
+ rtt = 0;
#endif
return rtt;
}
+/*
+ * compute the CAS write latency according to DDR3 spec
+ * CWL = 5 if tCK >= 2.5ns
+ * 6 if 2.5ns > tCK >= 1.875ns
+ * 7 if 1.875ns > tCK >= 1.5ns
+ * 8 if 1.5ns > tCK >= 1.25ns
+ */
+static inline unsigned int compute_cas_write_latency(void)
+{
+ unsigned int cwl;
+ const unsigned int mclk_ps = get_memory_clk_period_ps();
+
+ if (mclk_ps >= 2500)
+ cwl = 5;
+ else if (mclk_ps >= 1875)
+ cwl = 6;
+ else if (mclk_ps >= 1500)
+ cwl = 7;
+ else if (mclk_ps >= 1250)
+ cwl = 8;
+ else
+ cwl = 8;
+ return cwl;
+}
+
/* Chip Select Configuration (CSn_CONFIG) */
static void set_csn_config(int i, fsl_ddr_cfg_regs_t *ddr,
const memctl_options_t *popts,
@@ -126,7 +158,7 @@ static void set_csn_config_2(int i, fsl_ddr_cfg_regs_t *ddr)
/* -3E = 667 CL5, -25 = CL6 800, -25E = CL5 800 */
-#if defined(CONFIG_FSL_DDR2)
+#if !defined(CONFIG_FSL_DDR1)
/*
* DDR SDRAM Timing Configuration 0 (TIMING_CFG_0)
*
@@ -150,16 +182,32 @@ static void set_timing_cfg_0(fsl_ddr_cfg_regs_t *ddr)
/* Mode register set cycle time (tMRD). */
unsigned char tmrd_mclk;
- /* (tXARD and tXARDS). Empirical? */
- act_pd_exit_mclk = 2;
-
- /* XXX: tXARD = 2, tXARDS = 7 - AL. * Empirical? */
+#if defined(CONFIG_FSL_DDR3)
+ /*
+ * (tXARD and tXARDS). Empirical?
+ * The DDR3 spec has not tXARD,
+ * we use the tXP instead of it.
+ * tXP=max(3nCK, 7.5ns) for DDR3.
+ * we use the tXP=6
+ * spec has not the tAXPD, we use
+ * tAXPD=8, need design to confirm.
+ */
+ act_pd_exit_mclk = 6;
pre_pd_exit_mclk = 6;
-
- /* FIXME: tXP = 2 on Micron 667 MHz DIMM */
taxpd_mclk = 8;
-
+ tmrd_mclk = 4;
+#else /* CONFIG_FSL_DDR2 */
+ /*
+ * (tXARD and tXARDS). Empirical?
+ * tXARD = 2 for DDR2
+ * tXP=2
+ * tAXPD=8
+ */
+ act_pd_exit_mclk = 2;
+ pre_pd_exit_mclk = 2;
+ taxpd_mclk = 8;
tmrd_mclk = 2;
+#endif
ddr->timing_cfg_0 = (0
| ((trwt_mclk & 0x3) << 30) /* RWT */
@@ -177,7 +225,8 @@ static void set_timing_cfg_0(fsl_ddr_cfg_regs_t *ddr)
/* DDR SDRAM Timing Configuration 3 (TIMING_CFG_3) */
static void set_timing_cfg_3(fsl_ddr_cfg_regs_t *ddr,
- const common_timing_params_t *common_dimm)
+ const common_timing_params_t *common_dimm,
+ unsigned int cas_latency)
{
/* Extended Activate to precharge interval (tRAS) */
unsigned int ext_acttopre = 0;
@@ -190,6 +239,11 @@ static void set_timing_cfg_3(fsl_ddr_cfg_regs_t *ddr,
ext_acttopre = 1;
ext_refrec = (picos_to_mclk(common_dimm->tRFC_ps) - 8) >> 4;
+
+ /* If the CAS latency more than 8, use the ext mode */
+ if (cas_latency > 8)
+ ext_caslat = 1;
+
ddr->timing_cfg_3 = (0
| ((ext_acttopre & 0x1) << 24)
| ((ext_refrec & 0xF) << 16)
@@ -201,6 +255,7 @@ static void set_timing_cfg_3(fsl_ddr_cfg_regs_t *ddr,
/* DDR SDRAM Timing Configuration 1 (TIMING_CFG_1) */
static void set_timing_cfg_1(fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts,
const common_timing_params_t *common_dimm,
unsigned int cas_latency)
{
@@ -246,13 +301,42 @@ static void set_timing_cfg_1(fsl_ddr_cfg_regs_t *ddr,
#elif defined(CONFIG_FSL_DDR2)
caslat_ctrl = 2 * cas_latency - 1;
#else
-#error "Need CAS Latency help for DDR3 in fsl_ddr_sdram.c"
+ /*
+ * if the CAS latency more than 8 cycle,
+ * we need set extend bit for it at
+ * TIMING_CFG_3[EXT_CASLAT]
+ */
+ if (cas_latency > 8)
+ cas_latency -= 8;
+ caslat_ctrl = 2 * cas_latency - 1;
#endif
refrec_ctrl = picos_to_mclk(common_dimm->tRFC_ps) - 8;
wrrec_mclk = picos_to_mclk(common_dimm->tWR_ps);
+ if (popts->OTF_burst_chop_en)
+ wrrec_mclk += 2;
+
acttoact_mclk = picos_to_mclk(common_dimm->tRRD_ps);
+ /*
+ * JEDEC has min requirement for tRRD
+ */
+#if defined(CONFIG_FSL_DDR3)
+ if (acttoact_mclk < 4)
+ acttoact_mclk = 4;
+#endif
wrtord_mclk = picos_to_mclk(common_dimm->tWTR_ps);
+ /*
+ * JEDEC has some min requirements for tWTR
+ */
+#if defined(CONFIG_FSL_DDR2)
+ if (wrtord_mclk < 2)
+ wrtord_mclk = 2;
+#elif defined(CONFIG_FSL_DDR3)
+ if (wrtord_mclk < 4)
+ wrtord_mclk = 4;
+#endif
+ if (popts->OTF_burst_chop_en)
+ wrtord_mclk += 2;
ddr->timing_cfg_1 = (0
| ((pretoact_mclk & 0x0F) << 28)
@@ -302,12 +386,27 @@ static void set_timing_cfg_2(fsl_ddr_cfg_regs_t *ddr,
*/
wr_lat = 0;
#elif defined(CONFIG_FSL_DDR2)
- wr_lat = cas_latency + additive_latency - 1;
+ wr_lat = cas_latency - 1;
#else
-#error "Fix WR_LAT for DDR3"
+ wr_lat = compute_cas_write_latency();
#endif
rd_to_pre = picos_to_mclk(common_dimm->tRTP_ps);
+ /*
+ * JEDEC has some min requirements for tRTP
+ */
+#if defined(CONFIG_FSL_DDR2)
+ if (rd_to_pre < 2)
+ rd_to_pre = 2;
+#elif defined(CONFIG_FSL_DDR3)
+ if (rd_to_pre < 4)
+ rd_to_pre = 4;
+#endif
+ if (additive_latency)
+ rd_to_pre += additive_latency;
+ if (popts->OTF_burst_chop_en)
+ rd_to_pre += 2; /* according to UM */
+
wr_data_delay = popts->write_data_delay;
cke_pls = picos_to_mclk(popts->tCKE_clock_pulse_width_ps);
four_act = picos_to_mclk(popts->tFAW_window_four_activates_ps);
@@ -316,8 +415,8 @@ static void set_timing_cfg_2(fsl_ddr_cfg_regs_t *ddr,
| ((add_lat_mclk & 0xf) << 28)
| ((cpo & 0x1f) << 23)
| ((wr_lat & 0xf) << 19)
- | ((rd_to_pre & 0x7) << 13)
- | ((wr_data_delay & 0x7) << 10)
+ | ((rd_to_pre & RD_TO_PRE_MASK) << RD_TO_PRE_SHIFT)
+ | ((wr_data_delay & WR_DATA_DELAY_MASK) << WR_DATA_DELAY_SHIFT)
| ((cke_pls & 0x7) << 6)
| ((four_act & 0x3f) << 0)
);
@@ -363,9 +462,19 @@ static void set_ddr_sdram_cfg(fsl_ddr_cfg_regs_t *ddr,
dyn_pwr = popts->dynamic_power;
dbw = popts->data_bus_width;
- /* DDR3 must use 8-beat bursts when using 32-bit bus mode */
- if ((sdram_type == SDRAM_TYPE_DDR3) && (dbw == 0x1))
- eight_be = 1;
+ /* 8-beat burst enable DDR-III case
+ * we must clear it when use the on-the-fly mode,
+ * must set it when use the 32-bits bus mode.
+ */
+ if (sdram_type == SDRAM_TYPE_DDR3) {
+ if (popts->burst_length == DDR_BL8)
+ eight_be = 1;
+ if (popts->burst_length == DDR_OTF)
+ eight_be = 0;
+ if (dbw == 0x1)
+ eight_be = 1;
+ }
+
threeT_en = popts->threeT_en;
twoT_en = popts->twoT_en;
ba_intlv_ctl = popts->ba_intlv_ctl;
@@ -428,8 +537,12 @@ static void set_ddr_sdram_cfg_2(fsl_ddr_cfg_regs_t *ddr,
* * ({EXT_REFREC || REFREC} + 8 + 2)]}
* << DDR_SDRAM_INTERVAL[REFINT]
*/
+#if defined(CONFIG_FSL_DDR3)
+ obc_cfg = popts->OTF_burst_chop_en;
+#else
+ obc_cfg = 0;
+#endif
- obc_cfg = 0; /* Make this configurable? */
ap_en = 0; /* Make this configurable? */
#if defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER)
@@ -442,6 +555,9 @@ static void set_ddr_sdram_cfg_2(fsl_ddr_cfg_regs_t *ddr,
d_init = 0;
#endif
+#if defined(CONFIG_FSL_DDR3)
+ md_en = popts->mirrored_dimm;
+#endif
ddr->ddr_sdram_cfg_2 = (0
| ((frc_sr & 0x1) << 31)
| ((sr_ie & 0x1) << 30)
@@ -464,6 +580,20 @@ static void set_ddr_sdram_mode_2(fsl_ddr_cfg_regs_t *ddr)
unsigned short esdmode2 = 0; /* Extended SDRAM mode 2 */
unsigned short esdmode3 = 0; /* Extended SDRAM mode 3 */
+#if defined(CONFIG_FSL_DDR3)
+ unsigned int rtt_wr = 2; /* 120 ohm Rtt_WR */
+ unsigned int srt = 0; /* self-refresh temerature, normal range */
+ unsigned int asr = 0; /* auto self-refresh disable */
+ unsigned int cwl = compute_cas_write_latency() - 5;
+ unsigned int pasr = 0; /* partial array self refresh disable */
+
+ esdmode2 = (0
+ | ((rtt_wr & 0x3) << 9)
+ | ((srt & 0x1) << 7)
+ | ((asr & 0x1) << 6)
+ | ((cwl & 0x7) << 3)
+ | ((pasr & 0x7) << 0));
+#endif
ddr->ddr_sdram_mode_2 = (0
| ((esdmode2 & 0xFFFF) << 16)
| ((esdmode3 & 0xFFFF) << 0)
@@ -491,6 +621,139 @@ static void set_ddr_sdram_interval(fsl_ddr_cfg_regs_t *ddr,
debug("FSLDDR: ddr_sdram_interval = 0x%08x\n", ddr->ddr_sdram_interval);
}
+#if defined(CONFIG_FSL_DDR3)
+/* DDR SDRAM Mode configuration set (DDR_SDRAM_MODE) */
+static void set_ddr_sdram_mode(fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts,
+ const common_timing_params_t *common_dimm,
+ unsigned int cas_latency,
+ unsigned int additive_latency)
+{
+ unsigned short esdmode; /* Extended SDRAM mode */
+ unsigned short sdmode; /* SDRAM mode */
+
+ /* Mode Register - MR1 */
+ unsigned int qoff = 0; /* Output buffer enable 0=yes, 1=no */
+ unsigned int tdqs_en = 0; /* TDQS Enable: 0=no, 1=yes */
+ unsigned int rtt;
+ unsigned int wrlvl_en = 0; /* Write level enable: 0=no, 1=yes */
+ unsigned int al = 0; /* Posted CAS# additive latency (AL) */
+ unsigned int dic = 1; /* Output driver impedance, 34ohm */
+ unsigned int dll_en = 0; /* DLL Enable 0=Enable (Normal),
+ 1=Disable (Test/Debug) */
+
+ /* Mode Register - MR0 */
+ unsigned int dll_on; /* DLL control for precharge PD, 0=off, 1=on */
+ unsigned int wr; /* Write Recovery */
+ unsigned int dll_rst; /* DLL Reset */
+ unsigned int mode; /* Normal=0 or Test=1 */
+ unsigned int caslat = 4;/* CAS# latency, default set as 6 cycles */
+ /* BT: Burst Type (0=Nibble Sequential, 1=Interleaved) */
+ unsigned int bt;
+ unsigned int bl; /* BL: Burst Length */
+
+ unsigned int wr_mclk;
+
+ const unsigned int mclk_ps = get_memory_clk_period_ps();
+
+ rtt = fsl_ddr_get_rtt();
+ if (popts->rtt_override)
+ rtt = popts->rtt_override_value;
+
+ if (additive_latency == (cas_latency - 1))
+ al = 1;
+ if (additive_latency == (cas_latency - 2))
+ al = 2;
+
+ /*
+ * The esdmode value will also be used for writing
+ * MR1 during write leveling for DDR3, although the
+ * bits specifically related to the write leveling
+ * scheme will be handled automatically by the DDR
+ * controller. so we set the wrlvl_en = 0 here.
+ */
+ esdmode = (0
+ | ((qoff & 0x1) << 12)
+ | ((tdqs_en & 0x1) << 11)
+ | ((rtt & 0x4) << 9) /* rtt field is split */
+ | ((wrlvl_en & 0x1) << 7)
+ | ((rtt & 0x2) << 6) /* rtt field is split */
+ | ((dic & 0x2) << 5) /* DIC field is split */
+ | ((al & 0x3) << 3)
+ | ((rtt & 0x1) << 2) /* rtt field is split */
+ | ((dic & 0x1) << 1) /* DIC field is split */
+ | ((dll_en & 0x1) << 0)
+ );
+
+ /*
+ * DLL control for precharge PD
+ * 0=slow exit DLL off (tXPDLL)
+ * 1=fast exit DLL on (tXP)
+ */
+ dll_on = 1;
+ wr_mclk = (common_dimm->tWR_ps + mclk_ps - 1) / mclk_ps;
+ if (wr_mclk >= 12)
+ wr = 6;
+ else if (wr_mclk >= 9)
+ wr = 5;
+ else
+ wr = wr_mclk - 4;
+ dll_rst = 0; /* dll no reset */
+ mode = 0; /* normal mode */
+
+ /* look up table to get the cas latency bits */
+ if (cas_latency >= 5 && cas_latency <= 11) {
+ unsigned char cas_latency_table[7] = {
+ 0x2, /* 5 clocks */
+ 0x4, /* 6 clocks */
+ 0x6, /* 7 clocks */
+ 0x8, /* 8 clocks */
+ 0xa, /* 9 clocks */
+ 0xc, /* 10 clocks */
+ 0xe /* 11 clocks */
+ };
+ caslat = cas_latency_table[cas_latency - 5];
+ }
+ bt = 0; /* Nibble sequential */
+
+ switch (popts->burst_length) {
+ case DDR_BL8:
+ bl = 0;
+ break;
+ case DDR_OTF:
+ bl = 1;
+ break;
+ case DDR_BC4:
+ bl = 2;
+ break;
+ default:
+ printf("Error: invalid burst length of %u specified. "
+ " Defaulting to on-the-fly BC4 or BL8 beats.\n",
+ popts->burst_length);
+ bl = 1;
+ break;
+ }
+
+ sdmode = (0
+ | ((dll_on & 0x1) << 12)
+ | ((wr & 0x7) << 9)
+ | ((dll_rst & 0x1) << 8)
+ | ((mode & 0x1) << 7)
+ | (((caslat >> 1) & 0x7) << 4)
+ | ((bt & 0x1) << 3)
+ | ((bl & 0x3) << 0)
+ );
+
+ ddr->ddr_sdram_mode = (0
+ | ((esdmode & 0xFFFF) << 16)
+ | ((sdmode & 0xFFFF) << 0)
+ );
+
+ debug("FSLDDR: ddr_sdram_mode = 0x%08x\n", ddr->ddr_sdram_mode);
+}
+
+#else /* !CONFIG_FSL_DDR3 */
+
/* DDR SDRAM Mode configuration set (DDR_SDRAM_MODE) */
static void set_ddr_sdram_mode(fsl_ddr_cfg_regs_t *ddr,
const memctl_options_t *popts,
@@ -567,8 +830,6 @@ static void set_ddr_sdram_mode(fsl_ddr_cfg_regs_t *ddr,
wr = 0; /* Historical */
#elif defined(CONFIG_FSL_DDR2)
wr = (common_dimm->tWR_ps + mclk_ps - 1) / mclk_ps - 1;
-#else
-#error "Write tWR_auto for DDR3"
#endif
dll_res = 0;
mode = 0;
@@ -587,16 +848,14 @@ static void set_ddr_sdram_mode(fsl_ddr_cfg_regs_t *ddr,
}
#elif defined(CONFIG_FSL_DDR2)
caslat = cas_latency;
-#else
-#error "Fix the mode CAS Latency for DDR3"
#endif
bt = 0;
switch (popts->burst_length) {
- case 4:
+ case DDR_BL4:
bl = 2;
break;
- case 8:
+ case DDR_BL8:
bl = 3;
break;
default:
@@ -624,7 +883,7 @@ static void set_ddr_sdram_mode(fsl_ddr_cfg_regs_t *ddr,
);
debug("FSLDDR: ddr_sdram_mode = 0x%08x\n", ddr->ddr_sdram_mode);
}
-
+#endif
/* DDR SDRAM Data Initialization (DDR_DATA_INIT) */
static void set_ddr_data_init(fsl_ddr_cfg_regs_t *ddr)
@@ -678,6 +937,12 @@ static void set_timing_cfg_4(fsl_ddr_cfg_regs_t *ddr)
unsigned int wwt = 0; /* Write-to-write turnaround for same CS */
unsigned int dll_lock = 0; /* DDR SDRAM DLL Lock Time */
+#if defined(CONFIG_FSL_DDR3)
+ /* We need set BL/2 + 4 for BC4 or OTF */
+ rrt = 4; /* BL/2 + 4 clocks */
+ wwt = 4; /* BL/2 + 4 clocks */
+ dll_lock = 1; /* tDLLK = 512 clocks from spec */
+#endif
ddr->timing_cfg_4 = (0
| ((rwt & 0xf) << 28)
| ((wrt & 0xf) << 24)
@@ -696,6 +961,13 @@ static void set_timing_cfg_5(fsl_ddr_cfg_regs_t *ddr)
unsigned int wodt_on = 0; /* Write to ODT on */
unsigned int wodt_off = 0; /* Write to ODT off */
+#if defined(CONFIG_FSL_DDR3)
+ rodt_on = 3; /* 2 clocks */
+ rodt_off = 4; /* 4 clocks */
+ wodt_on = 2; /* 1 clocks */
+ wodt_off = 4; /* 4 clocks */
+#endif
+
ddr->timing_cfg_5 = (0
| ((rodt_on & 0x1f) << 24)
| ((rodt_off & 0x7) << 20)
@@ -706,15 +978,20 @@ static void set_timing_cfg_5(fsl_ddr_cfg_regs_t *ddr)
}
/* DDR ZQ Calibration Control (DDR_ZQ_CNTL) */
-static void set_ddr_zq_cntl(fsl_ddr_cfg_regs_t *ddr)
+static void set_ddr_zq_cntl(fsl_ddr_cfg_regs_t *ddr, unsigned int zq_en)
{
- unsigned int zq_en = 0; /* ZQ Calibration Enable */
unsigned int zqinit = 0;/* POR ZQ Calibration Time (tZQinit) */
/* Normal Operation Full Calibration Time (tZQoper) */
unsigned int zqoper = 0;
/* Normal Operation Short Calibration Time (tZQCS) */
unsigned int zqcs = 0;
+ if (zq_en) {
+ zqinit = 9; /* 512 clocks */
+ zqoper = 8; /* 256 clocks */
+ zqcs = 6; /* 64 clocks */
+ }
+
ddr->ddr_zq_cntl = (0
| ((zq_en & 0x1) << 31)
| ((zqinit & 0xF) << 24)
@@ -724,9 +1001,9 @@ static void set_ddr_zq_cntl(fsl_ddr_cfg_regs_t *ddr)
}
/* DDR Write Leveling Control (DDR_WRLVL_CNTL) */
-static void set_ddr_wrlvl_cntl(fsl_ddr_cfg_regs_t *ddr)
+static void set_ddr_wrlvl_cntl(fsl_ddr_cfg_regs_t *ddr,
+ unsigned int wrlvl_en)
{
- unsigned int wrlvl_en = 0; /* Write Leveling Enable */
/*
* First DQS pulse rising edge after margining mode
* is programmed (tWL_MRD)
@@ -743,6 +1020,34 @@ static void set_ddr_wrlvl_cntl(fsl_ddr_cfg_regs_t *ddr)
/* WRLVL_START: Write leveling start time */
unsigned int wrlvl_start = 0;
+ /* suggest enable write leveling for DDR3 due to fly-by topology */
+ if (wrlvl_en) {
+ /* tWL_MRD min = 40 nCK, we set it 64 */
+ wrlvl_mrd = 0x6;
+ /* tWL_ODTEN 128 */
+ wrlvl_odten = 0x7;
+ /* tWL_DQSEN min = 25 nCK, we set it 32 */
+ wrlvl_dqsen = 0x5;
+ /*
+ * Write leveling sample time at least need 14 clocks
+ * due to tWLO = 9, we set it 15 clocks
+ */
+ wrlvl_smpl = 0xf;
+ /*
+ * Write leveling repetition time
+ * at least tWLO + 6 clocks clocks
+ * we set it 32
+ */
+ wrlvl_wlr = 0x5;
+ /*
+ * Write leveling start time
+ * The value use for the DQS_ADJUST for the first sample
+ * when write leveling is enabled.
+ * we set it 1 clock delay
+ */
+ wrlvl_start = 0x8;
+ }
+
ddr->ddr_wrlvl_cntl = (0
| ((wrlvl_en & 0x1) << 31)
| ((wrlvl_mrd & 0x7) << 24)
@@ -861,6 +1166,8 @@ compute_fsl_memctl_config_regs(const memctl_options_t *popts,
unsigned int cas_latency;
unsigned int additive_latency;
unsigned int sr_it;
+ unsigned int zq_en;
+ unsigned int wrlvl_en;
memset(ddr, 0, sizeof(fsl_ddr_cfg_regs_t));
@@ -885,6 +1192,10 @@ compute_fsl_memctl_config_regs(const memctl_options_t *popts,
sr_it = (popts->auto_self_refresh_en)
? popts->sr_it
: 0;
+ /* ZQ calibration */
+ zq_en = (popts->zq_en) ? 1 : 0;
+ /* write leveling */
+ wrlvl_en = (popts->wrlvl_en) ? 1 : 0;
/* Chip Select Memory Bounds (CSn_BNDS) */
for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
@@ -1019,12 +1330,12 @@ compute_fsl_memctl_config_regs(const memctl_options_t *popts,
set_csn_config_2(i, ddr);
}
-#if defined(CONFIG_FSL_DDR2)
+#if !defined(CONFIG_FSL_DDR1)
set_timing_cfg_0(ddr);
#endif
- set_timing_cfg_3(ddr, common_dimm);
- set_timing_cfg_1(ddr, common_dimm, cas_latency);
+ set_timing_cfg_3(ddr, common_dimm, cas_latency);
+ set_timing_cfg_1(ddr, popts, common_dimm, cas_latency);
set_timing_cfg_2(ddr, popts, common_dimm,
cas_latency, additive_latency);
@@ -1042,8 +1353,8 @@ compute_fsl_memctl_config_regs(const memctl_options_t *popts,
set_timing_cfg_4(ddr);
set_timing_cfg_5(ddr);
- set_ddr_zq_cntl(ddr);
- set_ddr_wrlvl_cntl(ddr);
+ set_ddr_zq_cntl(ddr, zq_en);
+ set_ddr_wrlvl_cntl(ddr, wrlvl_en);
set_ddr_pd_cntl(ddr);
set_ddr_sr_cntr(ddr, sr_it);
diff --git a/cpu/mpc8xxx/ddr/ddr3_dimm_params.c b/cpu/mpc8xxx/ddr/ddr3_dimm_params.c
new file mode 100644
index 0000000..ca4be78
--- /dev/null
+++ b/cpu/mpc8xxx/ddr/ddr3_dimm_params.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright (C) 2008 Freescale Semiconductor, Inc.
+ * Dave Liu <daveliu@freescale.com>
+ *
+ * calculate the organization and timing parameter
+ * from ddr3 spd, please refer to the spec
+ * JEDEC standard No.21-C 4_01_02_11R18.pdf
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * Version 2 as published by the Free Software Foundation.
+ */
+
+#include <common.h>
+#include <asm/fsl_ddr_sdram.h>
+
+#include "ddr.h"
+
+/*
+ * Calculate the Density of each Physical Rank.
+ * Returned size is in bytes.
+ *
+ * each rank size =
+ * sdram capacity(bit) / 8 * primary bus width / sdram width
+ *
+ * where: sdram capacity = spd byte4[3:0]
+ * primary bus width = spd byte8[2:0]
+ * sdram width = spd byte7[2:0]
+ *
+ * SPD byte4 - sdram density and banks
+ * bit[3:0] size(bit) size(byte)
+ * 0000 256Mb 32MB
+ * 0001 512Mb 64MB
+ * 0010 1Gb 128MB
+ * 0011 2Gb 256MB
+ * 0100 4Gb 512MB
+ * 0101 8Gb 1GB
+ * 0110 16Gb 2GB
+ *
+ * SPD byte8 - module memory bus width
+ * bit[2:0] primary bus width
+ * 000 8bits
+ * 001 16bits
+ * 010 32bits
+ * 011 64bits
+ *
+ * SPD byte7 - module organiztion
+ * bit[2:0] sdram device width
+ * 000 4bits
+ * 001 8bits
+ * 010 16bits
+ * 011 32bits
+ *
+ */
+static phys_size_t
+compute_ranksize(const ddr3_spd_eeprom_t *spd)
+{
+ phys_size_t bsize;
+
+ int nbit_sdram_cap_bsize = 0;
+ int nbit_primary_bus_width = 0;
+ int nbit_sdram_width = 0;
+
+ if ((spd->density_banks & 0xf) < 7)
+ nbit_sdram_cap_bsize = (spd->density_banks & 0xf) + 28;
+ if ((spd->bus_width & 0x7) < 4)
+ nbit_primary_bus_width = (spd->bus_width & 0x7) + 3;
+ if ((spd->organization & 0x7) < 4)
+ nbit_sdram_width = (spd->organization & 0x7) + 2;
+
+ bsize = 1 << (nbit_sdram_cap_bsize - 3
+ + nbit_primary_bus_width - nbit_sdram_width);
+
+ debug("DDR: DDR III rank density = 0x%08x\n", bsize);
+
+ return bsize;
+}
+
+/*
+ * ddr_compute_dimm_parameters for DDR3 SPD
+ *
+ * Compute DIMM parameters based upon the SPD information in spd.
+ * Writes the results to the dimm_params_t structure pointed by pdimm.
+ *
+ */
+unsigned int
+ddr_compute_dimm_parameters(const ddr3_spd_eeprom_t *spd,
+ dimm_params_t *pdimm,
+ unsigned int dimm_number)
+{
+ unsigned int retval;
+ unsigned int mtb_ps;
+
+ if (spd->mem_type) {
+ if (spd->mem_type != SPD_MEMTYPE_DDR3) {
+ printf("DIMM %u: is not a DDR3 SPD.\n", dimm_number);
+ return 1;
+ }
+ } else {
+ memset(pdimm, 0, sizeof(dimm_params_t));
+ return 1;
+ }
+
+ retval = ddr3_spd_check(spd);
+ if (retval) {
+ printf("DIMM %u: failed checksum\n", dimm_number);
+ return 2;
+ }
+
+ /*
+ * The part name in ASCII in the SPD EEPROM is not null terminated.
+ * Guarantee null termination here by presetting all bytes to 0
+ * and copying the part name in ASCII from the SPD onto it
+ */
+ memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
+ memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+
+ /* DIMM organization parameters */
+ pdimm->n_ranks = ((spd->organization >> 3) & 0x7) + 1;
+ pdimm->rank_density = compute_ranksize(spd);
+ pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+ pdimm->primary_sdram_width = 1 << (3 + (spd->bus_width & 0x7));
+ if ((spd->bus_width >> 3) & 0x3)
+ pdimm->ec_sdram_width = 8;
+ else
+ pdimm->ec_sdram_width = 0;
+ pdimm->data_width = pdimm->primary_sdram_width
+ + pdimm->ec_sdram_width;
+
+ switch (spd->module_type & 0xf) {
+ case 0x01: /* RDIMM */
+ case 0x05: /* Mini-RDIMM */
+ pdimm->registered_dimm = 1; /* register buffered */
+ break;
+
+ case 0x02: /* UDIMM */
+ case 0x03: /* SO-DIMM */
+ case 0x04: /* Micro-DIMM */
+ case 0x06: /* Mini-UDIMM */
+ pdimm->registered_dimm = 0; /* unbuffered */
+ break;
+
+ default:
+ printf("unknown dimm_type 0x%02X\n", spd->module_type);
+ return 1;
+ }
+
+ /* SDRAM device parameters */
+ pdimm->n_row_addr = ((spd->addressing >> 3) & 0x7) + 12;
+ pdimm->n_col_addr = (spd->addressing & 0x7) + 9;
+ pdimm->n_banks_per_sdram_device = 8 << ((spd->density_banks >> 4) & 0x7);
+
+ /*
+ * The SPD spec has not the ECC bit,
+ * We consider the DIMM as ECC capability
+ * when the extension bus exist
+ */
+ if (pdimm->ec_sdram_width)
+ pdimm->edc_config = 0x02;
+ else
+ pdimm->edc_config = 0x00;
+
+ /*
+ * The SPD spec has not the burst length byte
+ * but DDR3 spec has nature BL8 and BC4,
+ * BL8 -bit3, BC4 -bit2
+ */
+ pdimm->burst_lengths_bitmask = 0x0c;
+ pdimm->row_density = __ilog2(pdimm->rank_density);
+
+ /* MTB - medium timebase
+ * The unit in the SPD spec is ns,
+ * We convert it to ps.
+ * eg: MTB = 0.125ns (125ps)
+ */
+ mtb_ps = (spd->mtb_dividend * 1000) /spd->mtb_divisor;
+ pdimm->mtb_ps = mtb_ps;
+
+ /*
+ * sdram minimum cycle time
+ * we assume the MTB is 0.125ns
+ * eg:
+ * tCK_min=15 MTB (1.875ns) ->DDR3-1066
+ * =12 MTB (1.5ns) ->DDR3-1333
+ * =10 MTB (1.25ns) ->DDR3-1600
+ */
+ pdimm->tCKmin_X_ps = spd->tCK_min * mtb_ps;
+
+ /*
+ * CAS latency supported
+ * bit4 - CL4
+ * bit5 - CL5
+ * bit18 - CL18
+ */
+ pdimm->caslat_X = ((spd->caslat_msb << 8) | spd->caslat_lsb) << 4;
+
+ /*
+ * min CAS latency time
+ * eg: tAA_min =
+ * DDR3-800D 100 MTB (12.5ns)
+ * DDR3-1066F 105 MTB (13.125ns)
+ * DDR3-1333H 108 MTB (13.5ns)
+ * DDR3-1600H 90 MTB (11.25ns)
+ */
+ pdimm->tAA_ps = spd->tAA_min * mtb_ps;
+
+ /*
+ * min write recovery time
+ * eg:
+ * tWR_min = 120 MTB (15ns) -> all speed grades.
+ */
+ pdimm->tWR_ps = spd->tWR_min * mtb_ps;
+
+ /*
+ * min RAS to CAS delay time
+ * eg: tRCD_min =
+ * DDR3-800 100 MTB (12.5ns)
+ * DDR3-1066F 105 MTB (13.125ns)
+ * DDR3-1333H 108 MTB (13.5ns)
+ * DDR3-1600H 90 MTB (11.25)
+ */
+ pdimm->tRCD_ps = spd->tRCD_min * mtb_ps;
+
+ /*
+ * min row active to row active delay time
+ * eg: tRRD_min =
+ * DDR3-800(1KB page) 80 MTB (10ns)
+ * DDR3-1333(1KB page) 48 MTB (6ns)
+ */
+ pdimm->tRRD_ps = spd->tRRD_min * mtb_ps;
+
+ /*
+ * min row precharge delay time
+ * eg: tRP_min =
+ * DDR3-800D 100 MTB (12.5ns)
+ * DDR3-1066F 105 MTB (13.125ns)
+ * DDR3-1333H 108 MTB (13.5ns)
+ * DDR3-1600H 90 MTB (11.25ns)
+ */
+ pdimm->tRP_ps = spd->tRP_min * mtb_ps;
+
+ /* min active to precharge delay time
+ * eg: tRAS_min =
+ * DDR3-800D 300 MTB (37.5ns)
+ * DDR3-1066F 300 MTB (37.5ns)
+ * DDR3-1333H 288 MTB (36ns)
+ * DDR3-1600H 280 MTB (35ns)
+ */
+ pdimm->tRAS_ps = (((spd->tRAS_tRC_ext & 0xf) << 8) | spd->tRAS_min_lsb)
+ * mtb_ps;
+ /*
+ * min active to actice/refresh delay time
+ * eg: tRC_min =
+ * DDR3-800D 400 MTB (50ns)
+ * DDR3-1066F 405 MTB (50.625ns)
+ * DDR3-1333H 396 MTB (49.5ns)
+ * DDR3-1600H 370 MTB (46.25ns)
+ */
+ pdimm->tRC_ps = (((spd->tRAS_tRC_ext & 0xf0) << 4) | spd->tRC_min_lsb)
+ * mtb_ps;
+ /*
+ * min refresh recovery delay time
+ * eg: tRFC_min =
+ * 512Mb 720 MTB (90ns)
+ * 1Gb 880 MTB (110ns)
+ * 2Gb 1280 MTB (160ns)
+ */
+ pdimm->tRFC_ps = ((spd->tRFC_min_msb << 8) | spd->tRFC_min_lsb)
+ * mtb_ps;
+ /*
+ * min internal write to read command delay time
+ * eg: tWTR_min = 40 MTB (7.5ns) - all speed bins.
+ * tWRT is at least 4 mclk independent of operating freq.
+ */
+ pdimm->tWTR_ps = spd->tWTR_min * mtb_ps;
+
+ /*
+ * min internal read to precharge command delay time
+ * eg: tRTP_min = 40 MTB (7.5ns) - all speed bins.
+ * tRTP is at least 4 mclk independent of operating freq.
+ */
+ pdimm->tRTP_ps = spd->tRTP_min * mtb_ps;
+
+ /*
+ * Average periodic refresh interval
+ * tREFI = 7.8 us at normal temperature range
+ * = 3.9 us at ext temperature range
+ */
+ pdimm->refresh_rate_ps = 7800000;
+
+ /*
+ * min four active window delay time
+ * eg: tFAW_min =
+ * DDR3-800(1KB page) 320 MTB (40ns)
+ * DDR3-1066(1KB page) 300 MTB (37.5ns)
+ * DDR3-1333(1KB page) 240 MTB (30ns)
+ * DDR3-1600(1KB page) 240 MTB (30ns)
+ */
+ pdimm->tFAW_ps = (((spd->tFAW_msb & 0xf) << 8) | spd->tFAW_min)
+ * mtb_ps;
+
+ /*
+ * We need check the address mirror for unbuffered DIMM
+ * If SPD indicate the address map mirror, The DDR controller
+ * need care it.
+ */
+ if ((spd->module_type == SPD_MODULETYPE_UDIMM) ||
+ (spd->module_type == SPD_MODULETYPE_SODIMM) ||
+ (spd->module_type == SPD_MODULETYPE_MICRODIMM) ||
+ (spd->module_type == SPD_MODULETYPE_MINIUDIMM))
+ pdimm->mirrored_dimm = spd->mod_section.unbuffered.addr_mapping & 0x1;
+
+ return 0;
+}
diff --git a/cpu/mpc8xxx/ddr/lc_common_dimm_params.c b/cpu/mpc8xxx/ddr/lc_common_dimm_params.c
index fbeb236..e888e3e 100644
--- a/cpu/mpc8xxx/ddr/lc_common_dimm_params.c
+++ b/cpu/mpc8xxx/ddr/lc_common_dimm_params.c
@@ -11,6 +11,59 @@
#include "ddr.h"
+unsigned int
+compute_cas_latency_ddr3(const dimm_params_t *dimm_params,
+ common_timing_params_t *outpdimm,
+ unsigned int number_of_dimms)
+{
+ unsigned int i;
+ unsigned int tAAmin_ps = 0;
+ unsigned int tCKmin_X_ps = 0;
+ unsigned int common_caslat;
+ unsigned int caslat_actual;
+ unsigned int retry = 16;
+ unsigned int tmp;
+ const unsigned int mclk_ps = get_memory_clk_period_ps();
+
+ /* compute the common CAS latency supported between slots */
+ tmp = dimm_params[0].caslat_X;
+ for (i = 1; i < number_of_dimms; i++)
+ tmp &= dimm_params[i].caslat_X;
+ common_caslat = tmp;
+
+ /* compute the max tAAmin tCKmin between slots */
+ for (i = 0; i < number_of_dimms; i++) {
+ tAAmin_ps = max(tAAmin_ps, dimm_params[i].tAA_ps);
+ tCKmin_X_ps = max(tCKmin_X_ps, dimm_params[i].tCKmin_X_ps);
+ }
+ /* validate if the memory clk is in the range of dimms */
+ if (mclk_ps < tCKmin_X_ps) {
+ printf("The DIMM max tCKmin is %d ps,"
+ "doesn't support the MCLK cycle %d ps\n",
+ tCKmin_X_ps, mclk_ps);
+ return 1;
+ }
+ /* determine the acutal cas latency */
+ caslat_actual = (tAAmin_ps + mclk_ps - 1) / mclk_ps;
+ /* check if the dimms support the CAS latency */
+ while (!(common_caslat & (1 << caslat_actual)) && retry > 0) {
+ caslat_actual++;
+ retry--;
+ }
+ /* once the caculation of caslat_actual is completed
+ * we must verify that this CAS latency value does not
+ * exceed tAAmax, which is 20 ns for all DDR3 speed grades
+ */
+ if (caslat_actual * mclk_ps > 20000) {
+ printf("The choosen cas latency %d is too large\n",
+ caslat_actual);
+ return 1;
+ }
+ outpdimm->lowest_common_SPD_caslat = caslat_actual;
+
+ return 0;
+}
+
/*
* compute_lowest_common_dimm_parameters()
*
@@ -46,12 +99,14 @@ compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
unsigned int tQHS_ps = 0;
unsigned int temp1, temp2;
- unsigned int lowest_good_caslat;
unsigned int additive_latency = 0;
+#if !defined(CONFIG_FSL_DDR3)
const unsigned int mclk_ps = get_memory_clk_period_ps();
+ unsigned int lowest_good_caslat;
unsigned int not_ok;
debug("using mclk_ps = %u\n", mclk_ps);
+#endif
temp1 = 0;
for (i = 0; i < number_of_dimms; i++) {
@@ -164,6 +219,10 @@ compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
"DIMMs detected!\n");
}
+#if defined(CONFIG_FSL_DDR3)
+ if (compute_cas_latency_ddr3(dimm_params, outpdimm, number_of_dimms))
+ return 1;
+#else
/*
* Compute a CAS latency suitable for all DIMMs
*
@@ -281,6 +340,7 @@ compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
}
outpdimm->highest_common_derated_caslat = temp1;
debug("highest common dereated CAS latency = %u\n", temp1);
+#endif /* #if defined(CONFIG_FSL_DDR3) */
/* Determine if all DIMMs ECC capable. */
temp1 = 1;
@@ -297,14 +357,14 @@ compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
}
outpdimm->all_DIMMs_ECC_capable = temp1;
-
+#ifndef CONFIG_FSL_DDR3
/* FIXME: move to somewhere else to validate. */
if (mclk_ps > tCKmax_max_ps) {
printf("Warning: some of the installed DIMMs "
"can not operate this slowly.\n");
return 1;
}
-
+#endif
/*
* Compute additive latency.
*
@@ -314,7 +374,7 @@ compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
* which comes from Trcd, and also note that:
* add_lat + caslat must be >= 4
*
- * For DDR3, FIXME additive latency determination
+ * For DDR3, we use the AL=0
*
* When to use additive latency for DDR2:
*
@@ -371,7 +431,11 @@ compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
}
#elif defined(CONFIG_FSL_DDR3)
-error "FIXME determine additive latency for DDR3"
+ /*
+ * The system will not use the global auto-precharge mode.
+ * However, it uses the page mode, so we set AL=0
+ */
+ additive_latency = 0;
#endif
/*
diff --git a/cpu/mpc8xxx/ddr/options.c b/cpu/mpc8xxx/ddr/options.c
index 29d4143..db44291 100644
--- a/cpu/mpc8xxx/ddr/options.c
+++ b/cpu/mpc8xxx/ddr/options.c
@@ -96,10 +96,8 @@ unsigned int populate_memctl_options(int all_DIMMs_registered,
*/
#if defined(CONFIG_FSL_DDR1)
popts->DQS_config = 0;
-#elif defined(CONFIG_FSL_DDR2)
+#elif defined(CONFIG_FSL_DDR2) || defined(CONFIG_FSL_DDR3)
popts->DQS_config = 1;
-#else
-#error "Fix DQS for DDR3"
#endif
/* Choose self-refresh during sleep. */
@@ -112,7 +110,17 @@ unsigned int populate_memctl_options(int all_DIMMs_registered,
popts->data_bus_width = 0;
/* Choose burst length. */
- popts->burst_length = 4; /* has to be 4 for DDR2 */
+#if defined(CONFIG_FSL_DDR3)
+ popts->OTF_burst_chop_en = 1; /* on-the-fly burst chop */
+ popts->burst_length = DDR_OTF; /* on-the-fly BC4 and BL8 */
+#else
+ popts->burst_length = DDR_BL4; /* has to be 4 for DDR2 */
+#endif
+
+ /* Choose ddr controller address mirror mode */
+#if defined(CONFIG_FSL_DDR3)
+ popts->mirrored_dimm = pdimm[0].mirrored_dimm;
+#endif
/* Global Timing Parameters. */
debug("mclk_ps = %u ps\n", get_memory_clk_period_ps());
@@ -181,7 +189,17 @@ unsigned int populate_memctl_options(int all_DIMMs_registered,
popts->tFAW_window_four_activates_ps = 37500;
#elif defined(CONFIG_FSL_DDR3)
-#error "FIXME determine four activates for DDR3"
+ popts->tFAW_window_four_activates_ps = pdimm[0].tFAW_ps;
+#endif
+ popts->zq_en = 0;
+ popts->wrlvl_en = 0;
+#if defined(CONFIG_FSL_DDR3)
+ /*
+ * due to ddr3 dimm is fly-by topology
+ * we suggest to enable write leveling to
+ * meet the tQDSS under different loading.
+ */
+ popts->wrlvl_en = 1;
#endif
/*