summaryrefslogtreecommitdiff
path: root/arch/ppc/cpu/mpc8xxx
diff options
context:
space:
mode:
authorMinkyu Kang <mk7.kang@samsung.com>2010-04-19 10:26:18 +0900
committerMinkyu Kang <mk7.kang@samsung.com>2010-04-19 10:26:18 +0900
commit83653121d7382fccfe329cb732f77f116341ef1d (patch)
tree0b1ce6764252af15dfb2614372de98a44a7ec61f /arch/ppc/cpu/mpc8xxx
parent0f1f21a345e02a68ec16f7ab9e7dc687f9276089 (diff)
parent07739bcef5da07cc4a4edef8b91014ccc332eda3 (diff)
downloadu-boot-imx-83653121d7382fccfe329cb732f77f116341ef1d.zip
u-boot-imx-83653121d7382fccfe329cb732f77f116341ef1d.tar.gz
u-boot-imx-83653121d7382fccfe329cb732f77f116341ef1d.tar.bz2
Merge branch 'master' of git://git.denx.de/u-boot-arm
Conflicts: cpu/arm1176/cpu.c cpu/arm1176/start.S cpu/arm_cortexa8/s5pc1xx/Makefile cpu/arm_cortexa8/s5pc1xx/clock.c drivers/serial/serial_s5p.c include/asm-arm/arch-s5pc1xx/clk.h include/asm-arm/arch-s5pc1xx/gpio.h include/asm-arm/arch-s5pc1xx/uart.h Signed-off-by: Minkyu Kang <mk7.kang@samsung.com>
Diffstat (limited to 'arch/ppc/cpu/mpc8xxx')
-rw-r--r--arch/ppc/cpu/mpc8xxx/Makefile27
-rw-r--r--arch/ppc/cpu/mpc8xxx/cpu.c144
-rw-r--r--arch/ppc/cpu/mpc8xxx/ddr/Makefile35
-rw-r--r--arch/ppc/cpu/mpc8xxx/ddr/common_timing_params.h53
-rw-r--r--arch/ppc/cpu/mpc8xxx/ddr/ctrl_regs.c1366
-rw-r--r--arch/ppc/cpu/mpc8xxx/ddr/ddr.h81
-rw-r--r--arch/ppc/cpu/mpc8xxx/ddr/ddr1_dimm_params.c343
-rw-r--r--arch/ppc/cpu/mpc8xxx/ddr/ddr2_dimm_params.c339
-rw-r--r--arch/ppc/cpu/mpc8xxx/ddr/ddr3_dimm_params.c314
-rw-r--r--arch/ppc/cpu/mpc8xxx/ddr/lc_common_dimm_params.c468
-rw-r--r--arch/ppc/cpu/mpc8xxx/ddr/main.c479
-rw-r--r--arch/ppc/cpu/mpc8xxx/ddr/options.c297
-rw-r--r--arch/ppc/cpu/mpc8xxx/ddr/util.c206
-rw-r--r--arch/ppc/cpu/mpc8xxx/fdt.c55
-rw-r--r--arch/ppc/cpu/mpc8xxx/pci_cfg.c214
15 files changed, 4421 insertions, 0 deletions
diff --git a/arch/ppc/cpu/mpc8xxx/Makefile b/arch/ppc/cpu/mpc8xxx/Makefile
new file mode 100644
index 0000000..481f9e5
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/Makefile
@@ -0,0 +1,27 @@
+#
+# Copyright 2009 Freescale Semiconductor, Inc.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# Version 2 as published by the Free Software Foundation.
+#
+
+include $(TOPDIR)/config.mk
+
+LIB = $(obj)lib8xxx.a
+
+COBJS-y += cpu.o
+COBJS-$(CONFIG_OF_LIBFDT) += fdt.o
+COBJS-$(CONFIG_PCI) += pci_cfg.o
+
+SRCS := $(START:.o=.S) $(SOBJS-y:.o=.S) $(COBJS-y:.o=.c)
+OBJS := $(addprefix $(obj),$(SOBJS-y) $(COBJS-y))
+
+all: $(obj).depend $(LIB)
+
+$(LIB): $(OBJS)
+ $(AR) $(ARFLAGS) $@ $(OBJS)
+
+include $(SRCTREE)/rules.mk
+
+sinclude $(obj).depend
diff --git a/arch/ppc/cpu/mpc8xxx/cpu.c b/arch/ppc/cpu/mpc8xxx/cpu.c
new file mode 100644
index 0000000..fef062b
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/cpu.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2009-2010 Freescale Semiconductor, Inc.
+ *
+ * This file is derived from arch/ppc/cpu/mpc85xx/cpu.c and
+ * arch/ppc/cpu/mpc86xx/cpu.c. Basically this file contains
+ * cpu specific common code for 85xx/86xx processors.
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <config.h>
+#include <common.h>
+#include <command.h>
+#include <tsec.h>
+#include <netdev.h>
+#include <asm/cache.h>
+#include <asm/io.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+struct cpu_type cpu_type_list [] = {
+#if defined(CONFIG_MPC85xx)
+ CPU_TYPE_ENTRY(8533, 8533, 1),
+ CPU_TYPE_ENTRY(8533, 8533_E, 1),
+ CPU_TYPE_ENTRY(8535, 8535, 1),
+ CPU_TYPE_ENTRY(8535, 8535_E, 1),
+ CPU_TYPE_ENTRY(8536, 8536, 1),
+ CPU_TYPE_ENTRY(8536, 8536_E, 1),
+ CPU_TYPE_ENTRY(8540, 8540, 1),
+ CPU_TYPE_ENTRY(8541, 8541, 1),
+ CPU_TYPE_ENTRY(8541, 8541_E, 1),
+ CPU_TYPE_ENTRY(8543, 8543, 1),
+ CPU_TYPE_ENTRY(8543, 8543_E, 1),
+ CPU_TYPE_ENTRY(8544, 8544, 1),
+ CPU_TYPE_ENTRY(8544, 8544_E, 1),
+ CPU_TYPE_ENTRY(8545, 8545, 1),
+ CPU_TYPE_ENTRY(8545, 8545_E, 1),
+ CPU_TYPE_ENTRY(8547, 8547_E, 1),
+ CPU_TYPE_ENTRY(8548, 8548, 1),
+ CPU_TYPE_ENTRY(8548, 8548_E, 1),
+ CPU_TYPE_ENTRY(8555, 8555, 1),
+ CPU_TYPE_ENTRY(8555, 8555_E, 1),
+ CPU_TYPE_ENTRY(8560, 8560, 1),
+ CPU_TYPE_ENTRY(8567, 8567, 1),
+ CPU_TYPE_ENTRY(8567, 8567_E, 1),
+ CPU_TYPE_ENTRY(8568, 8568, 1),
+ CPU_TYPE_ENTRY(8568, 8568_E, 1),
+ CPU_TYPE_ENTRY(8569, 8569, 1),
+ CPU_TYPE_ENTRY(8569, 8569_E, 1),
+ CPU_TYPE_ENTRY(8572, 8572, 2),
+ CPU_TYPE_ENTRY(8572, 8572_E, 2),
+ CPU_TYPE_ENTRY(P1011, P1011, 1),
+ CPU_TYPE_ENTRY(P1011, P1011_E, 1),
+ CPU_TYPE_ENTRY(P1012, P1012, 1),
+ CPU_TYPE_ENTRY(P1012, P1012_E, 1),
+ CPU_TYPE_ENTRY(P1013, P1013, 1),
+ CPU_TYPE_ENTRY(P1013, P1013_E, 1),
+ CPU_TYPE_ENTRY(P1020, P1020, 2),
+ CPU_TYPE_ENTRY(P1020, P1020_E, 2),
+ CPU_TYPE_ENTRY(P1021, P1021, 2),
+ CPU_TYPE_ENTRY(P1021, P1021_E, 2),
+ CPU_TYPE_ENTRY(P1022, P1022, 2),
+ CPU_TYPE_ENTRY(P1022, P1022_E, 2),
+ CPU_TYPE_ENTRY(P2010, P2010, 1),
+ CPU_TYPE_ENTRY(P2010, P2010_E, 1),
+ CPU_TYPE_ENTRY(P2020, P2020, 2),
+ CPU_TYPE_ENTRY(P2020, P2020_E, 2),
+ CPU_TYPE_ENTRY(P4040, P4040, 4),
+ CPU_TYPE_ENTRY(P4040, P4040_E, 4),
+ CPU_TYPE_ENTRY(P4080, P4080, 8),
+ CPU_TYPE_ENTRY(P4080, P4080_E, 8),
+#elif defined(CONFIG_MPC86xx)
+ CPU_TYPE_ENTRY(8610, 8610, 1),
+ CPU_TYPE_ENTRY(8641, 8641, 2),
+ CPU_TYPE_ENTRY(8641D, 8641D, 2),
+#endif
+};
+
+struct cpu_type cpu_type_unknown = CPU_TYPE_ENTRY(Unknown, Unknown, 1);
+
+struct cpu_type *identify_cpu(u32 ver)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(cpu_type_list); i++) {
+ if (cpu_type_list[i].soc_ver == ver)
+ return &cpu_type_list[i];
+ }
+ return &cpu_type_unknown;
+}
+
+int cpu_numcores() {
+ struct cpu_type *cpu;
+ cpu = gd->cpu;
+ return cpu->num_cores;
+}
+
+int probecpu (void)
+{
+ uint svr;
+ uint ver;
+
+ svr = get_svr();
+ ver = SVR_SOC_VER(svr);
+
+ gd->cpu = identify_cpu(ver);
+
+ return 0;
+}
+
+/*
+ * Initializes on-chip ethernet controllers.
+ * to override, implement board_eth_init()
+ */
+int cpu_eth_init(bd_t *bis)
+{
+#if defined(CONFIG_ETHER_ON_FCC)
+ fec_initialize(bis);
+#endif
+
+#if defined(CONFIG_UEC_ETH)
+ uec_standard_init(bis);
+#endif
+
+#if defined(CONFIG_TSEC_ENET) || defined(CONFIG_MPC85XX_FEC)
+ tsec_standard_init(bis);
+#endif
+
+ return 0;
+}
diff --git a/arch/ppc/cpu/mpc8xxx/ddr/Makefile b/arch/ppc/cpu/mpc8xxx/ddr/Makefile
new file mode 100644
index 0000000..cb7f856
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/ddr/Makefile
@@ -0,0 +1,35 @@
+#
+# Copyright 2008 Freescale Semiconductor, Inc.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# Version 2 as published by the Free Software Foundation.
+#
+
+include $(TOPDIR)/config.mk
+
+LIB = $(obj)libddr.a
+
+COBJS-$(CONFIG_FSL_DDR1) += main.o util.o ctrl_regs.o options.o \
+ lc_common_dimm_params.o
+COBJS-$(CONFIG_FSL_DDR1) += ddr1_dimm_params.o
+
+COBJS-$(CONFIG_FSL_DDR2) += main.o util.o ctrl_regs.o options.o \
+ lc_common_dimm_params.o
+COBJS-$(CONFIG_FSL_DDR2) += ddr2_dimm_params.o
+
+COBJS-$(CONFIG_FSL_DDR3) += main.o util.o ctrl_regs.o options.o \
+ lc_common_dimm_params.o
+COBJS-$(CONFIG_FSL_DDR3) += ddr3_dimm_params.o
+
+SRCS := $(START:.o=.S) $(SOBJS-y:.o=.S) $(COBJS-y:.o=.c)
+OBJS := $(addprefix $(obj),$(SOBJS-y) $(COBJS-y))
+
+all: $(obj).depend $(LIB)
+
+$(LIB): $(OBJS)
+ $(AR) $(ARFLAGS) $@ $(OBJS)
+
+include $(SRCTREE)/rules.mk
+
+sinclude $(obj).depend
diff --git a/arch/ppc/cpu/mpc8xxx/ddr/common_timing_params.h b/arch/ppc/cpu/mpc8xxx/ddr/common_timing_params.h
new file mode 100644
index 0000000..5aea517
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/ddr/common_timing_params.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * Version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef COMMON_TIMING_PARAMS_H
+#define COMMON_TIMING_PARAMS_H
+
+typedef struct {
+ /* parameters to constrict */
+
+ unsigned int tCKmin_X_ps;
+ unsigned int tCKmax_ps;
+ unsigned int tCKmax_max_ps;
+ unsigned int tRCD_ps;
+ unsigned int tRP_ps;
+ unsigned int tRAS_ps;
+
+ unsigned int tWR_ps; /* maximum = 63750 ps */
+ unsigned int tWTR_ps; /* maximum = 63750 ps */
+ unsigned int tRFC_ps; /* maximum = 255 ns + 256 ns + .75 ns
+ = 511750 ps */
+
+ unsigned int tRRD_ps; /* maximum = 63750 ps */
+ unsigned int tRC_ps; /* maximum = 254 ns + .75 ns = 254750 ps */
+
+ unsigned int refresh_rate_ps;
+
+ unsigned int tIS_ps; /* byte 32, spd->ca_setup */
+ unsigned int tIH_ps; /* byte 33, spd->ca_hold */
+ unsigned int tDS_ps; /* byte 34, spd->data_setup */
+ unsigned int tDH_ps; /* byte 35, spd->data_hold */
+ unsigned int tRTP_ps; /* byte 38, spd->trtp */
+ unsigned int tDQSQ_max_ps; /* byte 44, spd->tdqsq */
+ unsigned int tQHS_ps; /* byte 45, spd->tqhs */
+
+ unsigned int ndimms_present;
+ unsigned int lowest_common_SPD_caslat;
+ unsigned int highest_common_derated_caslat;
+ unsigned int additive_latency;
+ unsigned int all_DIMMs_burst_lengths_bitmask;
+ unsigned int all_DIMMs_registered;
+ unsigned int all_DIMMs_unbuffered;
+ unsigned int all_DIMMs_ECC_capable;
+
+ unsigned long long total_mem;
+ unsigned long long base_address;
+} common_timing_params_t;
+
+#endif
diff --git a/arch/ppc/cpu/mpc8xxx/ddr/ctrl_regs.c b/arch/ppc/cpu/mpc8xxx/ddr/ctrl_regs.c
new file mode 100644
index 0000000..03f9c43
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/ddr/ctrl_regs.c
@@ -0,0 +1,1366 @@
+/*
+ * Copyright 2008-2010 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Generic driver for Freescale DDR/DDR2/DDR3 memory controller.
+ * Based on code from spd_sdram.c
+ * Author: James Yang [at freescale.com]
+ */
+
+#include <common.h>
+#include <asm/fsl_ddr_sdram.h>
+
+#include "ddr.h"
+
+extern unsigned int picos_to_mclk(unsigned int picos);
+/*
+ * Determine Rtt value.
+ *
+ * This should likely be either board or controller specific.
+ *
+ * Rtt(nominal) - DDR2:
+ * 0 = Rtt disabled
+ * 1 = 75 ohm
+ * 2 = 150 ohm
+ * 3 = 50 ohm
+ * Rtt(nominal) - DDR3:
+ * 0 = Rtt disabled
+ * 1 = 60 ohm
+ * 2 = 120 ohm
+ * 3 = 40 ohm
+ * 4 = 20 ohm
+ * 5 = 30 ohm
+ *
+ * FIXME: Apparently 8641 needs a value of 2
+ * FIXME: Old code seys if 667 MHz or higher, use 3 on 8572
+ *
+ * FIXME: There was some effort down this line earlier:
+ *
+ * unsigned int i;
+ * for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL/2; i++) {
+ * if (popts->dimmslot[i].num_valid_cs
+ * && (popts->cs_local_opts[2*i].odt_rd_cfg
+ * || popts->cs_local_opts[2*i].odt_wr_cfg)) {
+ * rtt = 2;
+ * break;
+ * }
+ * }
+ */
+static inline int fsl_ddr_get_rtt(void)
+{
+ int rtt;
+
+#if defined(CONFIG_FSL_DDR1)
+ rtt = 0;
+#elif defined(CONFIG_FSL_DDR2)
+ rtt = 3;
+#else
+ rtt = 0;
+#endif
+
+ return rtt;
+}
+
+/*
+ * compute the CAS write latency according to DDR3 spec
+ * CWL = 5 if tCK >= 2.5ns
+ * 6 if 2.5ns > tCK >= 1.875ns
+ * 7 if 1.875ns > tCK >= 1.5ns
+ * 8 if 1.5ns > tCK >= 1.25ns
+ */
+static inline unsigned int compute_cas_write_latency(void)
+{
+ unsigned int cwl;
+ const unsigned int mclk_ps = get_memory_clk_period_ps();
+
+ if (mclk_ps >= 2500)
+ cwl = 5;
+ else if (mclk_ps >= 1875)
+ cwl = 6;
+ else if (mclk_ps >= 1500)
+ cwl = 7;
+ else if (mclk_ps >= 1250)
+ cwl = 8;
+ else
+ cwl = 8;
+ return cwl;
+}
+
+/* Chip Select Configuration (CSn_CONFIG) */
+static void set_csn_config(int i, fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts,
+ const dimm_params_t *dimm_params)
+{
+ unsigned int cs_n_en = 0; /* Chip Select enable */
+ unsigned int intlv_en = 0; /* Memory controller interleave enable */
+ unsigned int intlv_ctl = 0; /* Interleaving control */
+ unsigned int ap_n_en = 0; /* Chip select n auto-precharge enable */
+ unsigned int odt_rd_cfg = 0; /* ODT for reads configuration */
+ unsigned int odt_wr_cfg = 0; /* ODT for writes configuration */
+ unsigned int ba_bits_cs_n = 0; /* Num of bank bits for SDRAM on CSn */
+ unsigned int row_bits_cs_n = 0; /* Num of row bits for SDRAM on CSn */
+ unsigned int col_bits_cs_n = 0; /* Num of ocl bits for SDRAM on CSn */
+
+ /* Compute CS_CONFIG only for existing ranks of each DIMM. */
+ if ((((i&1) == 0)
+ && (dimm_params[i/2].n_ranks == 1))
+ || (dimm_params[i/2].n_ranks == 2)) {
+ unsigned int n_banks_per_sdram_device;
+ cs_n_en = 1;
+ if (i == 0) {
+ /* These fields only available in CS0_CONFIG */
+ intlv_en = popts->memctl_interleaving;
+ intlv_ctl = popts->memctl_interleaving_mode;
+ }
+ ap_n_en = popts->cs_local_opts[i].auto_precharge;
+ odt_rd_cfg = popts->cs_local_opts[i].odt_rd_cfg;
+ odt_wr_cfg = popts->cs_local_opts[i].odt_wr_cfg;
+ n_banks_per_sdram_device
+ = dimm_params[i/2].n_banks_per_sdram_device;
+ ba_bits_cs_n = __ilog2(n_banks_per_sdram_device) - 2;
+ row_bits_cs_n = dimm_params[i/2].n_row_addr - 12;
+ col_bits_cs_n = dimm_params[i/2].n_col_addr - 8;
+ }
+
+ ddr->cs[i].config = (0
+ | ((cs_n_en & 0x1) << 31)
+ | ((intlv_en & 0x3) << 29)
+ | ((intlv_ctl & 0xf) << 24)
+ | ((ap_n_en & 0x1) << 23)
+
+ /* XXX: some implementation only have 1 bit starting at left */
+ | ((odt_rd_cfg & 0x7) << 20)
+
+ /* XXX: Some implementation only have 1 bit starting at left */
+ | ((odt_wr_cfg & 0x7) << 16)
+
+ | ((ba_bits_cs_n & 0x3) << 14)
+ | ((row_bits_cs_n & 0x7) << 8)
+ | ((col_bits_cs_n & 0x7) << 0)
+ );
+ debug("FSLDDR: cs[%d]_config = 0x%08x\n", i,ddr->cs[i].config);
+}
+
+/* Chip Select Configuration 2 (CSn_CONFIG_2) */
+/* FIXME: 8572 */
+static void set_csn_config_2(int i, fsl_ddr_cfg_regs_t *ddr)
+{
+ unsigned int pasr_cfg = 0; /* Partial array self refresh config */
+
+ ddr->cs[i].config_2 = ((pasr_cfg & 7) << 24);
+ debug("FSLDDR: cs[%d]_config_2 = 0x%08x\n", i, ddr->cs[i].config_2);
+}
+
+/* -3E = 667 CL5, -25 = CL6 800, -25E = CL5 800 */
+
+#if !defined(CONFIG_FSL_DDR1)
+/*
+ * DDR SDRAM Timing Configuration 0 (TIMING_CFG_0)
+ *
+ * Avoid writing for DDR I. The new PQ38 DDR controller
+ * dreams up non-zero default values to be backwards compatible.
+ */
+static void set_timing_cfg_0(fsl_ddr_cfg_regs_t *ddr)
+{
+ unsigned char trwt_mclk = 0; /* Read-to-write turnaround */
+ unsigned char twrt_mclk = 0; /* Write-to-read turnaround */
+ /* 7.5 ns on -3E; 0 means WL - CL + BL/2 + 1 */
+ unsigned char trrt_mclk = 0; /* Read-to-read turnaround */
+ unsigned char twwt_mclk = 0; /* Write-to-write turnaround */
+
+ /* Active powerdown exit timing (tXARD and tXARDS). */
+ unsigned char act_pd_exit_mclk;
+ /* Precharge powerdown exit timing (tXP). */
+ unsigned char pre_pd_exit_mclk;
+ /* Precharge powerdown exit timing (tAXPD). */
+ unsigned char taxpd_mclk;
+ /* Mode register set cycle time (tMRD). */
+ unsigned char tmrd_mclk;
+
+#if defined(CONFIG_FSL_DDR3)
+ /*
+ * (tXARD and tXARDS). Empirical?
+ * The DDR3 spec has not tXARD,
+ * we use the tXP instead of it.
+ * tXP=max(3nCK, 7.5ns) for DDR3.
+ * spec has not the tAXPD, we use
+ * tAXPD=8, need design to confirm.
+ */
+ int tXP = max((get_memory_clk_period_ps() * 3), 7500); /* unit=ps */
+ act_pd_exit_mclk = picos_to_mclk(tXP);
+ /* Mode register MR0[A12] is '1' - fast exit */
+ pre_pd_exit_mclk = act_pd_exit_mclk;
+ taxpd_mclk = 8;
+ tmrd_mclk = 4;
+#else /* CONFIG_FSL_DDR2 */
+ /*
+ * (tXARD and tXARDS). Empirical?
+ * tXARD = 2 for DDR2
+ * tXP=2
+ * tAXPD=8
+ */
+ act_pd_exit_mclk = 2;
+ pre_pd_exit_mclk = 2;
+ taxpd_mclk = 8;
+ tmrd_mclk = 2;
+#endif
+
+ ddr->timing_cfg_0 = (0
+ | ((trwt_mclk & 0x3) << 30) /* RWT */
+ | ((twrt_mclk & 0x3) << 28) /* WRT */
+ | ((trrt_mclk & 0x3) << 26) /* RRT */
+ | ((twwt_mclk & 0x3) << 24) /* WWT */
+ | ((act_pd_exit_mclk & 0x7) << 20) /* ACT_PD_EXIT */
+ | ((pre_pd_exit_mclk & 0xF) << 16) /* PRE_PD_EXIT */
+ | ((taxpd_mclk & 0xf) << 8) /* ODT_PD_EXIT */
+ | ((tmrd_mclk & 0xf) << 0) /* MRS_CYC */
+ );
+ debug("FSLDDR: timing_cfg_0 = 0x%08x\n", ddr->timing_cfg_0);
+}
+#endif /* defined(CONFIG_FSL_DDR2) */
+
+/* DDR SDRAM Timing Configuration 3 (TIMING_CFG_3) */
+static void set_timing_cfg_3(fsl_ddr_cfg_regs_t *ddr,
+ const common_timing_params_t *common_dimm,
+ unsigned int cas_latency)
+{
+ /* Extended Activate to precharge interval (tRAS) */
+ unsigned int ext_acttopre = 0;
+ unsigned int ext_refrec; /* Extended refresh recovery time (tRFC) */
+ unsigned int ext_caslat = 0; /* Extended MCAS latency from READ cmd */
+ unsigned int cntl_adj = 0; /* Control Adjust */
+
+ /* If the tRAS > 19 MCLK, we use the ext mode */
+ if (picos_to_mclk(common_dimm->tRAS_ps) > 0x13)
+ ext_acttopre = 1;
+
+ ext_refrec = (picos_to_mclk(common_dimm->tRFC_ps) - 8) >> 4;
+
+ /* If the CAS latency more than 8, use the ext mode */
+ if (cas_latency > 8)
+ ext_caslat = 1;
+
+ ddr->timing_cfg_3 = (0
+ | ((ext_acttopre & 0x1) << 24)
+ | ((ext_refrec & 0xF) << 16)
+ | ((ext_caslat & 0x1) << 12)
+ | ((cntl_adj & 0x7) << 0)
+ );
+ debug("FSLDDR: timing_cfg_3 = 0x%08x\n", ddr->timing_cfg_3);
+}
+
+/* DDR SDRAM Timing Configuration 1 (TIMING_CFG_1) */
+static void set_timing_cfg_1(fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts,
+ const common_timing_params_t *common_dimm,
+ unsigned int cas_latency)
+{
+ /* Precharge-to-activate interval (tRP) */
+ unsigned char pretoact_mclk;
+ /* Activate to precharge interval (tRAS) */
+ unsigned char acttopre_mclk;
+ /* Activate to read/write interval (tRCD) */
+ unsigned char acttorw_mclk;
+ /* CASLAT */
+ unsigned char caslat_ctrl;
+ /* Refresh recovery time (tRFC) ; trfc_low */
+ unsigned char refrec_ctrl;
+ /* Last data to precharge minimum interval (tWR) */
+ unsigned char wrrec_mclk;
+ /* Activate-to-activate interval (tRRD) */
+ unsigned char acttoact_mclk;
+ /* Last write data pair to read command issue interval (tWTR) */
+ unsigned char wrtord_mclk;
+
+ pretoact_mclk = picos_to_mclk(common_dimm->tRP_ps);
+ acttopre_mclk = picos_to_mclk(common_dimm->tRAS_ps);
+ acttorw_mclk = picos_to_mclk(common_dimm->tRCD_ps);
+
+ /*
+ * Translate CAS Latency to a DDR controller field value:
+ *
+ * CAS Lat DDR I DDR II Ctrl
+ * Clocks SPD Bit SPD Bit Value
+ * ------- ------- ------- -----
+ * 1.0 0 0001
+ * 1.5 1 0010
+ * 2.0 2 2 0011
+ * 2.5 3 0100
+ * 3.0 4 3 0101
+ * 3.5 5 0110
+ * 4.0 4 0111
+ * 4.5 1000
+ * 5.0 5 1001
+ */
+#if defined(CONFIG_FSL_DDR1)
+ caslat_ctrl = (cas_latency + 1) & 0x07;
+#elif defined(CONFIG_FSL_DDR2)
+ caslat_ctrl = 2 * cas_latency - 1;
+#else
+ /*
+ * if the CAS latency more than 8 cycle,
+ * we need set extend bit for it at
+ * TIMING_CFG_3[EXT_CASLAT]
+ */
+ if (cas_latency > 8)
+ cas_latency -= 8;
+ caslat_ctrl = 2 * cas_latency - 1;
+#endif
+
+ refrec_ctrl = picos_to_mclk(common_dimm->tRFC_ps) - 8;
+ wrrec_mclk = picos_to_mclk(common_dimm->tWR_ps);
+ if (popts->OTF_burst_chop_en)
+ wrrec_mclk += 2;
+
+ acttoact_mclk = picos_to_mclk(common_dimm->tRRD_ps);
+ /*
+ * JEDEC has min requirement for tRRD
+ */
+#if defined(CONFIG_FSL_DDR3)
+ if (acttoact_mclk < 4)
+ acttoact_mclk = 4;
+#endif
+ wrtord_mclk = picos_to_mclk(common_dimm->tWTR_ps);
+ /*
+ * JEDEC has some min requirements for tWTR
+ */
+#if defined(CONFIG_FSL_DDR2)
+ if (wrtord_mclk < 2)
+ wrtord_mclk = 2;
+#elif defined(CONFIG_FSL_DDR3)
+ if (wrtord_mclk < 4)
+ wrtord_mclk = 4;
+#endif
+ if (popts->OTF_burst_chop_en)
+ wrtord_mclk += 2;
+
+ ddr->timing_cfg_1 = (0
+ | ((pretoact_mclk & 0x0F) << 28)
+ | ((acttopre_mclk & 0x0F) << 24)
+ | ((acttorw_mclk & 0xF) << 20)
+ | ((caslat_ctrl & 0xF) << 16)
+ | ((refrec_ctrl & 0xF) << 12)
+ | ((wrrec_mclk & 0x0F) << 8)
+ | ((acttoact_mclk & 0x07) << 4)
+ | ((wrtord_mclk & 0x07) << 0)
+ );
+ debug("FSLDDR: timing_cfg_1 = 0x%08x\n", ddr->timing_cfg_1);
+}
+
+/* DDR SDRAM Timing Configuration 2 (TIMING_CFG_2) */
+static void set_timing_cfg_2(fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts,
+ const common_timing_params_t *common_dimm,
+ unsigned int cas_latency,
+ unsigned int additive_latency)
+{
+ /* Additive latency */
+ unsigned char add_lat_mclk;
+ /* CAS-to-preamble override */
+ unsigned short cpo;
+ /* Write latency */
+ unsigned char wr_lat;
+ /* Read to precharge (tRTP) */
+ unsigned char rd_to_pre;
+ /* Write command to write data strobe timing adjustment */
+ unsigned char wr_data_delay;
+ /* Minimum CKE pulse width (tCKE) */
+ unsigned char cke_pls;
+ /* Window for four activates (tFAW) */
+ unsigned short four_act;
+
+ /* FIXME add check that this must be less than acttorw_mclk */
+ add_lat_mclk = additive_latency;
+ cpo = popts->cpo_override;
+
+#if defined(CONFIG_FSL_DDR1)
+ /*
+ * This is a lie. It should really be 1, but if it is
+ * set to 1, bits overlap into the old controller's
+ * otherwise unused ACSM field. If we leave it 0, then
+ * the HW will magically treat it as 1 for DDR 1. Oh Yea.
+ */
+ wr_lat = 0;
+#elif defined(CONFIG_FSL_DDR2)
+ wr_lat = cas_latency - 1;
+#else
+ wr_lat = compute_cas_write_latency();
+#endif
+
+ rd_to_pre = picos_to_mclk(common_dimm->tRTP_ps);
+ /*
+ * JEDEC has some min requirements for tRTP
+ */
+#if defined(CONFIG_FSL_DDR2)
+ if (rd_to_pre < 2)
+ rd_to_pre = 2;
+#elif defined(CONFIG_FSL_DDR3)
+ if (rd_to_pre < 4)
+ rd_to_pre = 4;
+#endif
+ if (additive_latency)
+ rd_to_pre += additive_latency;
+ if (popts->OTF_burst_chop_en)
+ rd_to_pre += 2; /* according to UM */
+
+ wr_data_delay = popts->write_data_delay;
+ cke_pls = picos_to_mclk(popts->tCKE_clock_pulse_width_ps);
+ four_act = picos_to_mclk(popts->tFAW_window_four_activates_ps);
+
+ ddr->timing_cfg_2 = (0
+ | ((add_lat_mclk & 0xf) << 28)
+ | ((cpo & 0x1f) << 23)
+ | ((wr_lat & 0xf) << 19)
+ | ((rd_to_pre & RD_TO_PRE_MASK) << RD_TO_PRE_SHIFT)
+ | ((wr_data_delay & WR_DATA_DELAY_MASK) << WR_DATA_DELAY_SHIFT)
+ | ((cke_pls & 0x7) << 6)
+ | ((four_act & 0x3f) << 0)
+ );
+ debug("FSLDDR: timing_cfg_2 = 0x%08x\n", ddr->timing_cfg_2);
+}
+
+/* DDR SDRAM control configuration (DDR_SDRAM_CFG) */
+static void set_ddr_sdram_cfg(fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts,
+ const common_timing_params_t *common_dimm)
+{
+ unsigned int mem_en; /* DDR SDRAM interface logic enable */
+ unsigned int sren; /* Self refresh enable (during sleep) */
+ unsigned int ecc_en; /* ECC enable. */
+ unsigned int rd_en; /* Registered DIMM enable */
+ unsigned int sdram_type; /* Type of SDRAM */
+ unsigned int dyn_pwr; /* Dynamic power management mode */
+ unsigned int dbw; /* DRAM dta bus width */
+ unsigned int eight_be = 0; /* 8-beat burst enable, DDR2 is zero */
+ unsigned int ncap = 0; /* Non-concurrent auto-precharge */
+ unsigned int threeT_en; /* Enable 3T timing */
+ unsigned int twoT_en; /* Enable 2T timing */
+ unsigned int ba_intlv_ctl; /* Bank (CS) interleaving control */
+ unsigned int x32_en = 0; /* x32 enable */
+ unsigned int pchb8 = 0; /* precharge bit 8 enable */
+ unsigned int hse; /* Global half strength override */
+ unsigned int mem_halt = 0; /* memory controller halt */
+ unsigned int bi = 0; /* Bypass initialization */
+
+ mem_en = 1;
+ sren = popts->self_refresh_in_sleep;
+ if (common_dimm->all_DIMMs_ECC_capable) {
+ /* Allow setting of ECC only if all DIMMs are ECC. */
+ ecc_en = popts->ECC_mode;
+ } else {
+ ecc_en = 0;
+ }
+
+ rd_en = (common_dimm->all_DIMMs_registered
+ && !common_dimm->all_DIMMs_unbuffered);
+
+ sdram_type = CONFIG_FSL_SDRAM_TYPE;
+
+ dyn_pwr = popts->dynamic_power;
+ dbw = popts->data_bus_width;
+ /* 8-beat burst enable DDR-III case
+ * we must clear it when use the on-the-fly mode,
+ * must set it when use the 32-bits bus mode.
+ */
+ if (sdram_type == SDRAM_TYPE_DDR3) {
+ if (popts->burst_length == DDR_BL8)
+ eight_be = 1;
+ if (popts->burst_length == DDR_OTF)
+ eight_be = 0;
+ if (dbw == 0x1)
+ eight_be = 1;
+ }
+
+ threeT_en = popts->threeT_en;
+ twoT_en = popts->twoT_en;
+ ba_intlv_ctl = popts->ba_intlv_ctl;
+ hse = popts->half_strength_driver_enable;
+
+ ddr->ddr_sdram_cfg = (0
+ | ((mem_en & 0x1) << 31)
+ | ((sren & 0x1) << 30)
+ | ((ecc_en & 0x1) << 29)
+ | ((rd_en & 0x1) << 28)
+ | ((sdram_type & 0x7) << 24)
+ | ((dyn_pwr & 0x1) << 21)
+ | ((dbw & 0x3) << 19)
+ | ((eight_be & 0x1) << 18)
+ | ((ncap & 0x1) << 17)
+ | ((threeT_en & 0x1) << 16)
+ | ((twoT_en & 0x1) << 15)
+ | ((ba_intlv_ctl & 0x7F) << 8)
+ | ((x32_en & 0x1) << 5)
+ | ((pchb8 & 0x1) << 4)
+ | ((hse & 0x1) << 3)
+ | ((mem_halt & 0x1) << 1)
+ | ((bi & 0x1) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_cfg = 0x%08x\n", ddr->ddr_sdram_cfg);
+}
+
+/* DDR SDRAM control configuration 2 (DDR_SDRAM_CFG_2) */
+static void set_ddr_sdram_cfg_2(fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts)
+{
+ unsigned int frc_sr = 0; /* Force self refresh */
+ unsigned int sr_ie = 0; /* Self-refresh interrupt enable */
+ unsigned int dll_rst_dis; /* DLL reset disable */
+ unsigned int dqs_cfg; /* DQS configuration */
+ unsigned int odt_cfg; /* ODT configuration */
+ unsigned int num_pr; /* Number of posted refreshes */
+ unsigned int obc_cfg; /* On-The-Fly Burst Chop Cfg */
+ unsigned int ap_en; /* Address Parity Enable */
+ unsigned int d_init; /* DRAM data initialization */
+ unsigned int rcw_en = 0; /* Register Control Word Enable */
+ unsigned int md_en = 0; /* Mirrored DIMM Enable */
+
+ dll_rst_dis = 1; /* Make this configurable */
+ dqs_cfg = popts->DQS_config;
+ if (popts->cs_local_opts[0].odt_rd_cfg
+ || popts->cs_local_opts[0].odt_wr_cfg) {
+ /* FIXME */
+ odt_cfg = 2;
+ } else {
+ odt_cfg = 0;
+ }
+
+ num_pr = 1; /* Make this configurable */
+
+ /*
+ * 8572 manual says
+ * {TIMING_CFG_1[PRETOACT]
+ * + [DDR_SDRAM_CFG_2[NUM_PR]
+ * * ({EXT_REFREC || REFREC} + 8 + 2)]}
+ * << DDR_SDRAM_INTERVAL[REFINT]
+ */
+#if defined(CONFIG_FSL_DDR3)
+ obc_cfg = popts->OTF_burst_chop_en;
+#else
+ obc_cfg = 0;
+#endif
+
+ ap_en = 0; /* Make this configurable? */
+
+#if defined(CONFIG_ECC_INIT_VIA_DDRCONTROLLER)
+ /* Use the DDR controller to auto initialize memory. */
+ d_init = 1;
+ ddr->ddr_data_init = CONFIG_MEM_INIT_VALUE;
+ debug("DDR: ddr_data_init = 0x%08x\n", ddr->ddr_data_init);
+#else
+ /* Memory will be initialized via DMA, or not at all. */
+ d_init = 0;
+#endif
+
+#if defined(CONFIG_FSL_DDR3)
+ md_en = popts->mirrored_dimm;
+#endif
+ ddr->ddr_sdram_cfg_2 = (0
+ | ((frc_sr & 0x1) << 31)
+ | ((sr_ie & 0x1) << 30)
+ | ((dll_rst_dis & 0x1) << 29)
+ | ((dqs_cfg & 0x3) << 26)
+ | ((odt_cfg & 0x3) << 21)
+ | ((num_pr & 0xf) << 12)
+ | ((obc_cfg & 0x1) << 6)
+ | ((ap_en & 0x1) << 5)
+ | ((d_init & 0x1) << 4)
+ | ((rcw_en & 0x1) << 2)
+ | ((md_en & 0x1) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_cfg_2 = 0x%08x\n", ddr->ddr_sdram_cfg_2);
+}
+
+/* DDR SDRAM Mode configuration 2 (DDR_SDRAM_MODE_2) */
+static void set_ddr_sdram_mode_2(fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts)
+{
+ unsigned short esdmode2 = 0; /* Extended SDRAM mode 2 */
+ unsigned short esdmode3 = 0; /* Extended SDRAM mode 3 */
+
+#if defined(CONFIG_FSL_DDR3)
+ unsigned int rtt_wr = 0; /* Rtt_WR - dynamic ODT off */
+ unsigned int srt = 0; /* self-refresh temerature, normal range */
+ unsigned int asr = 0; /* auto self-refresh disable */
+ unsigned int cwl = compute_cas_write_latency() - 5;
+ unsigned int pasr = 0; /* partial array self refresh disable */
+
+ if (popts->rtt_override)
+ rtt_wr = popts->rtt_wr_override_value;
+
+ esdmode2 = (0
+ | ((rtt_wr & 0x3) << 9)
+ | ((srt & 0x1) << 7)
+ | ((asr & 0x1) << 6)
+ | ((cwl & 0x7) << 3)
+ | ((pasr & 0x7) << 0));
+#endif
+ ddr->ddr_sdram_mode_2 = (0
+ | ((esdmode2 & 0xFFFF) << 16)
+ | ((esdmode3 & 0xFFFF) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_mode_2 = 0x%08x\n", ddr->ddr_sdram_mode_2);
+}
+
+/* DDR SDRAM Interval Configuration (DDR_SDRAM_INTERVAL) */
+static void set_ddr_sdram_interval(fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts,
+ const common_timing_params_t *common_dimm)
+{
+ unsigned int refint; /* Refresh interval */
+ unsigned int bstopre; /* Precharge interval */
+
+ refint = picos_to_mclk(common_dimm->refresh_rate_ps);
+
+ bstopre = popts->bstopre;
+
+ /* refint field used 0x3FFF in earlier controllers */
+ ddr->ddr_sdram_interval = (0
+ | ((refint & 0xFFFF) << 16)
+ | ((bstopre & 0x3FFF) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_interval = 0x%08x\n", ddr->ddr_sdram_interval);
+}
+
+#if defined(CONFIG_FSL_DDR3)
+/* DDR SDRAM Mode configuration set (DDR_SDRAM_MODE) */
+static void set_ddr_sdram_mode(fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts,
+ const common_timing_params_t *common_dimm,
+ unsigned int cas_latency,
+ unsigned int additive_latency)
+{
+ unsigned short esdmode; /* Extended SDRAM mode */
+ unsigned short sdmode; /* SDRAM mode */
+
+ /* Mode Register - MR1 */
+ unsigned int qoff = 0; /* Output buffer enable 0=yes, 1=no */
+ unsigned int tdqs_en = 0; /* TDQS Enable: 0=no, 1=yes */
+ unsigned int rtt;
+ unsigned int wrlvl_en = 0; /* Write level enable: 0=no, 1=yes */
+ unsigned int al = 0; /* Posted CAS# additive latency (AL) */
+ unsigned int dic = 1; /* Output driver impedance, 34ohm */
+ unsigned int dll_en = 0; /* DLL Enable 0=Enable (Normal),
+ 1=Disable (Test/Debug) */
+
+ /* Mode Register - MR0 */
+ unsigned int dll_on; /* DLL control for precharge PD, 0=off, 1=on */
+ unsigned int wr; /* Write Recovery */
+ unsigned int dll_rst; /* DLL Reset */
+ unsigned int mode; /* Normal=0 or Test=1 */
+ unsigned int caslat = 4;/* CAS# latency, default set as 6 cycles */
+ /* BT: Burst Type (0=Nibble Sequential, 1=Interleaved) */
+ unsigned int bt;
+ unsigned int bl; /* BL: Burst Length */
+
+ unsigned int wr_mclk;
+
+ const unsigned int mclk_ps = get_memory_clk_period_ps();
+
+ rtt = fsl_ddr_get_rtt();
+ if (popts->rtt_override)
+ rtt = popts->rtt_override_value;
+
+ if (additive_latency == (cas_latency - 1))
+ al = 1;
+ if (additive_latency == (cas_latency - 2))
+ al = 2;
+
+ /*
+ * The esdmode value will also be used for writing
+ * MR1 during write leveling for DDR3, although the
+ * bits specifically related to the write leveling
+ * scheme will be handled automatically by the DDR
+ * controller. so we set the wrlvl_en = 0 here.
+ */
+ esdmode = (0
+ | ((qoff & 0x1) << 12)
+ | ((tdqs_en & 0x1) << 11)
+ | ((rtt & 0x4) << 7) /* rtt field is split */
+ | ((wrlvl_en & 0x1) << 7)
+ | ((rtt & 0x2) << 5) /* rtt field is split */
+ | ((dic & 0x2) << 4) /* DIC field is split */
+ | ((al & 0x3) << 3)
+ | ((rtt & 0x1) << 2) /* rtt field is split */
+ | ((dic & 0x1) << 1) /* DIC field is split */
+ | ((dll_en & 0x1) << 0)
+ );
+
+ /*
+ * DLL control for precharge PD
+ * 0=slow exit DLL off (tXPDLL)
+ * 1=fast exit DLL on (tXP)
+ */
+ dll_on = 1;
+ wr_mclk = (common_dimm->tWR_ps + mclk_ps - 1) / mclk_ps;
+ if (wr_mclk >= 12)
+ wr = 6;
+ else if (wr_mclk >= 9)
+ wr = 5;
+ else
+ wr = wr_mclk - 4;
+ dll_rst = 0; /* dll no reset */
+ mode = 0; /* normal mode */
+
+ /* look up table to get the cas latency bits */
+ if (cas_latency >= 5 && cas_latency <= 11) {
+ unsigned char cas_latency_table[7] = {
+ 0x2, /* 5 clocks */
+ 0x4, /* 6 clocks */
+ 0x6, /* 7 clocks */
+ 0x8, /* 8 clocks */
+ 0xa, /* 9 clocks */
+ 0xc, /* 10 clocks */
+ 0xe /* 11 clocks */
+ };
+ caslat = cas_latency_table[cas_latency - 5];
+ }
+ bt = 0; /* Nibble sequential */
+
+ switch (popts->burst_length) {
+ case DDR_BL8:
+ bl = 0;
+ break;
+ case DDR_OTF:
+ bl = 1;
+ break;
+ case DDR_BC4:
+ bl = 2;
+ break;
+ default:
+ printf("Error: invalid burst length of %u specified. "
+ " Defaulting to on-the-fly BC4 or BL8 beats.\n",
+ popts->burst_length);
+ bl = 1;
+ break;
+ }
+
+ sdmode = (0
+ | ((dll_on & 0x1) << 12)
+ | ((wr & 0x7) << 9)
+ | ((dll_rst & 0x1) << 8)
+ | ((mode & 0x1) << 7)
+ | (((caslat >> 1) & 0x7) << 4)
+ | ((bt & 0x1) << 3)
+ | ((bl & 0x3) << 0)
+ );
+
+ ddr->ddr_sdram_mode = (0
+ | ((esdmode & 0xFFFF) << 16)
+ | ((sdmode & 0xFFFF) << 0)
+ );
+
+ debug("FSLDDR: ddr_sdram_mode = 0x%08x\n", ddr->ddr_sdram_mode);
+}
+
+#else /* !CONFIG_FSL_DDR3 */
+
+/* DDR SDRAM Mode configuration set (DDR_SDRAM_MODE) */
+static void set_ddr_sdram_mode(fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts,
+ const common_timing_params_t *common_dimm,
+ unsigned int cas_latency,
+ unsigned int additive_latency)
+{
+ unsigned short esdmode; /* Extended SDRAM mode */
+ unsigned short sdmode; /* SDRAM mode */
+
+ /*
+ * FIXME: This ought to be pre-calculated in a
+ * technology-specific routine,
+ * e.g. compute_DDR2_mode_register(), and then the
+ * sdmode and esdmode passed in as part of common_dimm.
+ */
+
+ /* Extended Mode Register */
+ unsigned int mrs = 0; /* Mode Register Set */
+ unsigned int outputs = 0; /* 0=Enabled, 1=Disabled */
+ unsigned int rdqs_en = 0; /* RDQS Enable: 0=no, 1=yes */
+ unsigned int dqs_en = 0; /* DQS# Enable: 0=enable, 1=disable */
+ unsigned int ocd = 0; /* 0x0=OCD not supported,
+ 0x7=OCD default state */
+ unsigned int rtt;
+ unsigned int al; /* Posted CAS# additive latency (AL) */
+ unsigned int ods = 0; /* Output Drive Strength:
+ 0 = Full strength (18ohm)
+ 1 = Reduced strength (4ohm) */
+ unsigned int dll_en = 0; /* DLL Enable 0=Enable (Normal),
+ 1=Disable (Test/Debug) */
+
+ /* Mode Register (MR) */
+ unsigned int mr; /* Mode Register Definition */
+ unsigned int pd; /* Power-Down Mode */
+ unsigned int wr; /* Write Recovery */
+ unsigned int dll_res; /* DLL Reset */
+ unsigned int mode; /* Normal=0 or Test=1 */
+ unsigned int caslat = 0;/* CAS# latency */
+ /* BT: Burst Type (0=Sequential, 1=Interleaved) */
+ unsigned int bt;
+ unsigned int bl; /* BL: Burst Length */
+
+#if defined(CONFIG_FSL_DDR2)
+ const unsigned int mclk_ps = get_memory_clk_period_ps();
+#endif
+
+ rtt = fsl_ddr_get_rtt();
+
+ al = additive_latency;
+
+ esdmode = (0
+ | ((mrs & 0x3) << 14)
+ | ((outputs & 0x1) << 12)
+ | ((rdqs_en & 0x1) << 11)
+ | ((dqs_en & 0x1) << 10)
+ | ((ocd & 0x7) << 7)
+ | ((rtt & 0x2) << 5) /* rtt field is split */
+ | ((al & 0x7) << 3)
+ | ((rtt & 0x1) << 2) /* rtt field is split */
+ | ((ods & 0x1) << 1)
+ | ((dll_en & 0x1) << 0)
+ );
+
+ mr = 0; /* FIXME: CHECKME */
+
+ /*
+ * 0 = Fast Exit (Normal)
+ * 1 = Slow Exit (Low Power)
+ */
+ pd = 0;
+
+#if defined(CONFIG_FSL_DDR1)
+ wr = 0; /* Historical */
+#elif defined(CONFIG_FSL_DDR2)
+ wr = (common_dimm->tWR_ps + mclk_ps - 1) / mclk_ps - 1;
+#endif
+ dll_res = 0;
+ mode = 0;
+
+#if defined(CONFIG_FSL_DDR1)
+ if (1 <= cas_latency && cas_latency <= 4) {
+ unsigned char mode_caslat_table[4] = {
+ 0x5, /* 1.5 clocks */
+ 0x2, /* 2.0 clocks */
+ 0x6, /* 2.5 clocks */
+ 0x3 /* 3.0 clocks */
+ };
+ caslat = mode_caslat_table[cas_latency - 1];
+ } else {
+ printf("Warning: unknown cas_latency %d\n", cas_latency);
+ }
+#elif defined(CONFIG_FSL_DDR2)
+ caslat = cas_latency;
+#endif
+ bt = 0;
+
+ switch (popts->burst_length) {
+ case DDR_BL4:
+ bl = 2;
+ break;
+ case DDR_BL8:
+ bl = 3;
+ break;
+ default:
+ printf("Error: invalid burst length of %u specified. "
+ " Defaulting to 4 beats.\n",
+ popts->burst_length);
+ bl = 2;
+ break;
+ }
+
+ sdmode = (0
+ | ((mr & 0x3) << 14)
+ | ((pd & 0x1) << 12)
+ | ((wr & 0x7) << 9)
+ | ((dll_res & 0x1) << 8)
+ | ((mode & 0x1) << 7)
+ | ((caslat & 0x7) << 4)
+ | ((bt & 0x1) << 3)
+ | ((bl & 0x7) << 0)
+ );
+
+ ddr->ddr_sdram_mode = (0
+ | ((esdmode & 0xFFFF) << 16)
+ | ((sdmode & 0xFFFF) << 0)
+ );
+ debug("FSLDDR: ddr_sdram_mode = 0x%08x\n", ddr->ddr_sdram_mode);
+}
+#endif
+
+/* DDR SDRAM Data Initialization (DDR_DATA_INIT) */
+static void set_ddr_data_init(fsl_ddr_cfg_regs_t *ddr)
+{
+ unsigned int init_value; /* Initialization value */
+
+ init_value = 0xDEADBEEF;
+ ddr->ddr_data_init = init_value;
+}
+
+/*
+ * DDR SDRAM Clock Control (DDR_SDRAM_CLK_CNTL)
+ * The old controller on the 8540/60 doesn't have this register.
+ * Hope it's OK to set it (to 0) anyway.
+ */
+static void set_ddr_sdram_clk_cntl(fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts)
+{
+ unsigned int clk_adjust; /* Clock adjust */
+
+ clk_adjust = popts->clk_adjust;
+ ddr->ddr_sdram_clk_cntl = (clk_adjust & 0xF) << 23;
+}
+
+/* DDR Initialization Address (DDR_INIT_ADDR) */
+static void set_ddr_init_addr(fsl_ddr_cfg_regs_t *ddr)
+{
+ unsigned int init_addr = 0; /* Initialization address */
+
+ ddr->ddr_init_addr = init_addr;
+}
+
+/* DDR Initialization Address (DDR_INIT_EXT_ADDR) */
+static void set_ddr_init_ext_addr(fsl_ddr_cfg_regs_t *ddr)
+{
+ unsigned int uia = 0; /* Use initialization address */
+ unsigned int init_ext_addr = 0; /* Initialization address */
+
+ ddr->ddr_init_ext_addr = (0
+ | ((uia & 0x1) << 31)
+ | (init_ext_addr & 0xF)
+ );
+}
+
+/* DDR SDRAM Timing Configuration 4 (TIMING_CFG_4) */
+static void set_timing_cfg_4(fsl_ddr_cfg_regs_t *ddr,
+ const memctl_options_t *popts)
+{
+ unsigned int rwt = 0; /* Read-to-write turnaround for same CS */
+ unsigned int wrt = 0; /* Write-to-read turnaround for same CS */
+ unsigned int rrt = 0; /* Read-to-read turnaround for same CS */
+ unsigned int wwt = 0; /* Write-to-write turnaround for same CS */
+ unsigned int dll_lock = 0; /* DDR SDRAM DLL Lock Time */
+
+#if defined(CONFIG_FSL_DDR3)
+ if (popts->burst_length == DDR_BL8) {
+ /* We set BL/2 for fixed BL8 */
+ rrt = 0; /* BL/2 clocks */
+ wwt = 0; /* BL/2 clocks */
+ } else {
+ /* We need to set BL/2 + 2 to BC4 and OTF */
+ rrt = 2; /* BL/2 + 2 clocks */
+ wwt = 2; /* BL/2 + 2 clocks */
+ }
+ dll_lock = 1; /* tDLLK = 512 clocks from spec */
+#endif
+ ddr->timing_cfg_4 = (0
+ | ((rwt & 0xf) << 28)
+ | ((wrt & 0xf) << 24)
+ | ((rrt & 0xf) << 20)
+ | ((wwt & 0xf) << 16)
+ | (dll_lock & 0x3)
+ );
+ debug("FSLDDR: timing_cfg_4 = 0x%08x\n", ddr->timing_cfg_4);
+}
+
+/* DDR SDRAM Timing Configuration 5 (TIMING_CFG_5) */
+static void set_timing_cfg_5(fsl_ddr_cfg_regs_t *ddr)
+{
+ unsigned int rodt_on = 0; /* Read to ODT on */
+ unsigned int rodt_off = 0; /* Read to ODT off */
+ unsigned int wodt_on = 0; /* Write to ODT on */
+ unsigned int wodt_off = 0; /* Write to ODT off */
+
+#if defined(CONFIG_FSL_DDR3)
+ rodt_on = 3; /* 2 clocks */
+ rodt_off = 4; /* 4 clocks */
+ wodt_on = 2; /* 1 clocks */
+ wodt_off = 4; /* 4 clocks */
+#endif
+
+ ddr->timing_cfg_5 = (0
+ | ((rodt_on & 0x1f) << 24)
+ | ((rodt_off & 0x7) << 20)
+ | ((wodt_on & 0x1f) << 12)
+ | ((wodt_off & 0x7) << 8)
+ );
+ debug("FSLDDR: timing_cfg_5 = 0x%08x\n", ddr->timing_cfg_5);
+}
+
+/* DDR ZQ Calibration Control (DDR_ZQ_CNTL) */
+static void set_ddr_zq_cntl(fsl_ddr_cfg_regs_t *ddr, unsigned int zq_en)
+{
+ unsigned int zqinit = 0;/* POR ZQ Calibration Time (tZQinit) */
+ /* Normal Operation Full Calibration Time (tZQoper) */
+ unsigned int zqoper = 0;
+ /* Normal Operation Short Calibration Time (tZQCS) */
+ unsigned int zqcs = 0;
+
+ if (zq_en) {
+ zqinit = 9; /* 512 clocks */
+ zqoper = 8; /* 256 clocks */
+ zqcs = 6; /* 64 clocks */
+ }
+
+ ddr->ddr_zq_cntl = (0
+ | ((zq_en & 0x1) << 31)
+ | ((zqinit & 0xF) << 24)
+ | ((zqoper & 0xF) << 16)
+ | ((zqcs & 0xF) << 8)
+ );
+}
+
+/* DDR Write Leveling Control (DDR_WRLVL_CNTL) */
+static void set_ddr_wrlvl_cntl(fsl_ddr_cfg_regs_t *ddr, unsigned int wrlvl_en,
+ const memctl_options_t *popts)
+{
+ /*
+ * First DQS pulse rising edge after margining mode
+ * is programmed (tWL_MRD)
+ */
+ unsigned int wrlvl_mrd = 0;
+ /* ODT delay after margining mode is programmed (tWL_ODTEN) */
+ unsigned int wrlvl_odten = 0;
+ /* DQS/DQS_ delay after margining mode is programmed (tWL_DQSEN) */
+ unsigned int wrlvl_dqsen = 0;
+ /* WRLVL_SMPL: Write leveling sample time */
+ unsigned int wrlvl_smpl = 0;
+ /* WRLVL_WLR: Write leveling repeition time */
+ unsigned int wrlvl_wlr = 0;
+ /* WRLVL_START: Write leveling start time */
+ unsigned int wrlvl_start = 0;
+
+ /* suggest enable write leveling for DDR3 due to fly-by topology */
+ if (wrlvl_en) {
+ /* tWL_MRD min = 40 nCK, we set it 64 */
+ wrlvl_mrd = 0x6;
+ /* tWL_ODTEN 128 */
+ wrlvl_odten = 0x7;
+ /* tWL_DQSEN min = 25 nCK, we set it 32 */
+ wrlvl_dqsen = 0x5;
+ /*
+ * Write leveling sample time at least need 6 clocks
+ * higher than tWLO to allow enough time for progagation
+ * delay and sampling the prime data bits.
+ */
+ wrlvl_smpl = 0xf;
+ /*
+ * Write leveling repetition time
+ * at least tWLO + 6 clocks clocks
+ * we set it 32
+ */
+ wrlvl_wlr = 0x5;
+ /*
+ * Write leveling start time
+ * The value use for the DQS_ADJUST for the first sample
+ * when write leveling is enabled.
+ */
+ wrlvl_start = 0x8;
+ /*
+ * Override the write leveling sample and start time
+ * according to specific board
+ */
+ if (popts->wrlvl_override) {
+ wrlvl_smpl = popts->wrlvl_sample;
+ wrlvl_start = popts->wrlvl_start;
+ }
+ }
+
+ ddr->ddr_wrlvl_cntl = (0
+ | ((wrlvl_en & 0x1) << 31)
+ | ((wrlvl_mrd & 0x7) << 24)
+ | ((wrlvl_odten & 0x7) << 20)
+ | ((wrlvl_dqsen & 0x7) << 16)
+ | ((wrlvl_smpl & 0xf) << 12)
+ | ((wrlvl_wlr & 0x7) << 8)
+ | ((wrlvl_start & 0x1F) << 0)
+ );
+}
+
+/* DDR Self Refresh Counter (DDR_SR_CNTR) */
+static void set_ddr_sr_cntr(fsl_ddr_cfg_regs_t *ddr, unsigned int sr_it)
+{
+ /* Self Refresh Idle Threshold */
+ ddr->ddr_sr_cntr = (sr_it & 0xF) << 16;
+}
+
+/* DDR SDRAM Register Control Word 1 (DDR_SDRAM_RCW_1) */
+static void set_ddr_sdram_rcw_1(fsl_ddr_cfg_regs_t *ddr)
+{
+ unsigned int rcw0 = 0; /* RCW0: Register Control Word 0 */
+ unsigned int rcw1 = 0; /* RCW1: Register Control Word 1 */
+ unsigned int rcw2 = 0; /* RCW2: Register Control Word 2 */
+ unsigned int rcw3 = 0; /* RCW3: Register Control Word 3 */
+ unsigned int rcw4 = 0; /* RCW4: Register Control Word 4 */
+ unsigned int rcw5 = 0; /* RCW5: Register Control Word 5 */
+ unsigned int rcw6 = 0; /* RCW6: Register Control Word 6 */
+ unsigned int rcw7 = 0; /* RCW7: Register Control Word 7 */
+
+ ddr->ddr_sdram_rcw_1 = (0
+ | ((rcw0 & 0xF) << 28)
+ | ((rcw1 & 0xF) << 24)
+ | ((rcw2 & 0xF) << 20)
+ | ((rcw3 & 0xF) << 16)
+ | ((rcw4 & 0xF) << 12)
+ | ((rcw5 & 0xF) << 8)
+ | ((rcw6 & 0xF) << 4)
+ | ((rcw7 & 0xF) << 0)
+ );
+}
+
+/* DDR SDRAM Register Control Word 2 (DDR_SDRAM_RCW_2) */
+static void set_ddr_sdram_rcw_2(fsl_ddr_cfg_regs_t *ddr)
+{
+ unsigned int rcw8 = 0; /* RCW0: Register Control Word 8 */
+ unsigned int rcw9 = 0; /* RCW1: Register Control Word 9 */
+ unsigned int rcw10 = 0; /* RCW2: Register Control Word 10 */
+ unsigned int rcw11 = 0; /* RCW3: Register Control Word 11 */
+ unsigned int rcw12 = 0; /* RCW4: Register Control Word 12 */
+ unsigned int rcw13 = 0; /* RCW5: Register Control Word 13 */
+ unsigned int rcw14 = 0; /* RCW6: Register Control Word 14 */
+ unsigned int rcw15 = 0; /* RCW7: Register Control Word 15 */
+
+ ddr->ddr_sdram_rcw_2 = (0
+ | ((rcw8 & 0xF) << 28)
+ | ((rcw9 & 0xF) << 24)
+ | ((rcw10 & 0xF) << 20)
+ | ((rcw11 & 0xF) << 16)
+ | ((rcw12 & 0xF) << 12)
+ | ((rcw13 & 0xF) << 8)
+ | ((rcw14 & 0xF) << 4)
+ | ((rcw15 & 0xF) << 0)
+ );
+}
+
+unsigned int
+check_fsl_memctl_config_regs(const fsl_ddr_cfg_regs_t *ddr)
+{
+ unsigned int res = 0;
+
+ /*
+ * Check that DDR_SDRAM_CFG[RD_EN] and DDR_SDRAM_CFG[2T_EN] are
+ * not set at the same time.
+ */
+ if (ddr->ddr_sdram_cfg & 0x10000000
+ && ddr->ddr_sdram_cfg & 0x00008000) {
+ printf("Error: DDR_SDRAM_CFG[RD_EN] and DDR_SDRAM_CFG[2T_EN] "
+ " should not be set at the same time.\n");
+ res++;
+ }
+
+ return res;
+}
+
+unsigned int
+compute_fsl_memctl_config_regs(const memctl_options_t *popts,
+ fsl_ddr_cfg_regs_t *ddr,
+ const common_timing_params_t *common_dimm,
+ const dimm_params_t *dimm_params,
+ unsigned int dbw_cap_adj)
+{
+ unsigned int i;
+ unsigned int cas_latency;
+ unsigned int additive_latency;
+ unsigned int sr_it;
+ unsigned int zq_en;
+ unsigned int wrlvl_en;
+
+ memset(ddr, 0, sizeof(fsl_ddr_cfg_regs_t));
+
+ if (common_dimm == NULL) {
+ printf("Error: subset DIMM params struct null pointer\n");
+ return 1;
+ }
+
+ /*
+ * Process overrides first.
+ *
+ * FIXME: somehow add dereated caslat to this
+ */
+ cas_latency = (popts->cas_latency_override)
+ ? popts->cas_latency_override_value
+ : common_dimm->lowest_common_SPD_caslat;
+
+ additive_latency = (popts->additive_latency_override)
+ ? popts->additive_latency_override_value
+ : common_dimm->additive_latency;
+
+ sr_it = (popts->auto_self_refresh_en)
+ ? popts->sr_it
+ : 0;
+ /* ZQ calibration */
+ zq_en = (popts->zq_en) ? 1 : 0;
+ /* write leveling */
+ wrlvl_en = (popts->wrlvl_en) ? 1 : 0;
+
+ /* Chip Select Memory Bounds (CSn_BNDS) */
+ for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
+ unsigned long long ea = 0, sa = 0;
+
+ if (popts->ba_intlv_ctl && (i > 0) &&
+ ((popts->ba_intlv_ctl & 0x60) != FSL_DDR_CS2_CS3 )) {
+ /* Don't set up boundaries for other CS
+ * other than CS0, if bank interleaving
+ * is enabled and not CS2+CS3 interleaved.
+ * But we need to set the ODT_RD_CFG and
+ * ODT_WR_CFG for CS1_CONFIG here.
+ */
+ set_csn_config(i, ddr, popts, dimm_params);
+ break;
+ }
+
+ if (dimm_params[i/2].n_ranks == 0) {
+ debug("Skipping setup of CS%u "
+ "because n_ranks on DIMM %u is 0\n", i, i/2);
+ continue;
+ }
+ if (popts->memctl_interleaving && popts->ba_intlv_ctl) {
+ /*
+ * This works superbank 2CS
+ * There are 2 memory controllers configured
+ * identically, memory is interleaved between them,
+ * and each controller uses rank interleaving within
+ * itself. Therefore the starting and ending address
+ * on each controller is twice the amount present on
+ * each controller.
+ */
+ unsigned long long rank_density
+ = dimm_params[0].capacity;
+ ea = (2 * (rank_density >> dbw_cap_adj)) - 1;
+ }
+ else if (!popts->memctl_interleaving && popts->ba_intlv_ctl) {
+ /*
+ * If memory interleaving between controllers is NOT
+ * enabled, the starting address for each memory
+ * controller is distinct. However, because rank
+ * interleaving is enabled, the starting and ending
+ * addresses of the total memory on that memory
+ * controller needs to be programmed into its
+ * respective CS0_BNDS.
+ */
+ unsigned long long rank_density
+ = dimm_params[i/2].rank_density;
+ switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
+ case FSL_DDR_CS0_CS1_CS2_CS3:
+ /* CS0+CS1+CS2+CS3 interleaving, only CS0_CNDS
+ * needs to be set.
+ */
+ sa = common_dimm->base_address;
+ ea = sa + (4 * (rank_density >> dbw_cap_adj))-1;
+ break;
+ case FSL_DDR_CS0_CS1_AND_CS2_CS3:
+ /* CS0+CS1 and CS2+CS3 interleaving, CS0_CNDS
+ * and CS2_CNDS need to be set.
+ */
+ if (!(i&1)) {
+ sa = dimm_params[i/2].base_address;
+ ea = sa + (i * (rank_density >>
+ dbw_cap_adj)) - 1;
+ }
+ break;
+ case FSL_DDR_CS0_CS1:
+ /* CS0+CS1 interleaving, CS0_CNDS needs
+ * to be set
+ */
+ sa = common_dimm->base_address;
+ ea = sa + (2 * (rank_density >> dbw_cap_adj))-1;
+ break;
+ case FSL_DDR_CS2_CS3:
+ /* CS2+CS3 interleaving*/
+ if (i == 2) {
+ sa = dimm_params[i/2].base_address;
+ ea = sa + (2 * (rank_density >>
+ dbw_cap_adj)) - 1;
+ }
+ break;
+ default: /* No bank(chip-select) interleaving */
+ break;
+ }
+ }
+ else if (popts->memctl_interleaving && !popts->ba_intlv_ctl) {
+ /*
+ * Only the rank on CS0 of each memory controller may
+ * be used if memory controller interleaving is used
+ * without rank interleaving within each memory
+ * controller. However, the ending address programmed
+ * into each CS0 must be the sum of the amount of
+ * memory in the two CS0 ranks.
+ */
+ if (i == 0) {
+ unsigned long long rank_density
+ = dimm_params[0].rank_density;
+ ea = (2 * (rank_density >> dbw_cap_adj)) - 1;
+ }
+
+ }
+ else if (!popts->memctl_interleaving && !popts->ba_intlv_ctl) {
+ /*
+ * No rank interleaving and no memory controller
+ * interleaving.
+ */
+ unsigned long long rank_density
+ = dimm_params[i/2].rank_density;
+ sa = dimm_params[i/2].base_address;
+ ea = sa + (rank_density >> dbw_cap_adj) - 1;
+ if (i&1) {
+ if ((dimm_params[i/2].n_ranks == 1)) {
+ /* Odd chip select, single-rank dimm */
+ sa = 0;
+ ea = 0;
+ } else {
+ /* Odd chip select, dual-rank DIMM */
+ sa += rank_density >> dbw_cap_adj;
+ ea += rank_density >> dbw_cap_adj;
+ }
+ }
+ }
+
+ sa >>= 24;
+ ea >>= 24;
+
+ ddr->cs[i].bnds = (0
+ | ((sa & 0xFFF) << 16) /* starting address MSB */
+ | ((ea & 0xFFF) << 0) /* ending address MSB */
+ );
+
+ debug("FSLDDR: cs[%d]_bnds = 0x%08x\n", i, ddr->cs[i].bnds);
+ set_csn_config(i, ddr, popts, dimm_params);
+ set_csn_config_2(i, ddr);
+ }
+
+#if !defined(CONFIG_FSL_DDR1)
+ set_timing_cfg_0(ddr);
+#endif
+
+ set_timing_cfg_3(ddr, common_dimm, cas_latency);
+ set_timing_cfg_1(ddr, popts, common_dimm, cas_latency);
+ set_timing_cfg_2(ddr, popts, common_dimm,
+ cas_latency, additive_latency);
+
+ set_ddr_sdram_cfg(ddr, popts, common_dimm);
+
+ set_ddr_sdram_cfg_2(ddr, popts);
+ set_ddr_sdram_mode(ddr, popts, common_dimm,
+ cas_latency, additive_latency);
+ set_ddr_sdram_mode_2(ddr, popts);
+ set_ddr_sdram_interval(ddr, popts, common_dimm);
+ set_ddr_data_init(ddr);
+ set_ddr_sdram_clk_cntl(ddr, popts);
+ set_ddr_init_addr(ddr);
+ set_ddr_init_ext_addr(ddr);
+ set_timing_cfg_4(ddr, popts);
+ set_timing_cfg_5(ddr);
+
+ set_ddr_zq_cntl(ddr, zq_en);
+ set_ddr_wrlvl_cntl(ddr, wrlvl_en, popts);
+
+ set_ddr_sr_cntr(ddr, sr_it);
+
+ set_ddr_sdram_rcw_1(ddr);
+ set_ddr_sdram_rcw_2(ddr);
+
+ return check_fsl_memctl_config_regs(ddr);
+}
diff --git a/arch/ppc/cpu/mpc8xxx/ddr/ddr.h b/arch/ppc/cpu/mpc8xxx/ddr/ddr.h
new file mode 100644
index 0000000..f122075
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/ddr/ddr.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * Version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef FSL_DDR_MAIN_H
+#define FSL_DDR_MAIN_H
+
+#include <asm/fsl_ddr_sdram.h>
+#include <asm/fsl_ddr_dimm_params.h>
+
+#include "common_timing_params.h"
+
+/*
+ * Bind the main DDR setup driver's generic names
+ * to this specific DDR technology.
+ */
+static __inline__ int
+compute_dimm_parameters(const generic_spd_eeprom_t *spd,
+ dimm_params_t *pdimm,
+ unsigned int dimm_number)
+{
+ return ddr_compute_dimm_parameters(spd, pdimm, dimm_number);
+}
+
+/*
+ * Data Structures
+ *
+ * All data structures have to be on the stack
+ */
+#define CONFIG_SYS_NUM_DDR_CTLRS CONFIG_NUM_DDR_CONTROLLERS
+#define CONFIG_SYS_DIMM_SLOTS_PER_CTLR CONFIG_DIMM_SLOTS_PER_CTLR
+
+typedef struct {
+ generic_spd_eeprom_t
+ spd_installed_dimms[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_SYS_DIMM_SLOTS_PER_CTLR];
+ struct dimm_params_s
+ dimm_params[CONFIG_SYS_NUM_DDR_CTLRS][CONFIG_SYS_DIMM_SLOTS_PER_CTLR];
+ memctl_options_t memctl_opts[CONFIG_SYS_NUM_DDR_CTLRS];
+ common_timing_params_t common_timing_params[CONFIG_SYS_NUM_DDR_CTLRS];
+ fsl_ddr_cfg_regs_t fsl_ddr_config_reg[CONFIG_SYS_NUM_DDR_CTLRS];
+} fsl_ddr_info_t;
+
+/* Compute steps */
+#define STEP_GET_SPD (1 << 0)
+#define STEP_COMPUTE_DIMM_PARMS (1 << 1)
+#define STEP_COMPUTE_COMMON_PARMS (1 << 2)
+#define STEP_GATHER_OPTS (1 << 3)
+#define STEP_ASSIGN_ADDRESSES (1 << 4)
+#define STEP_COMPUTE_REGS (1 << 5)
+#define STEP_PROGRAM_REGS (1 << 6)
+#define STEP_ALL 0xFFF
+
+extern unsigned long long
+fsl_ddr_compute(fsl_ddr_info_t *pinfo, unsigned int start_step);
+
+extern const char * step_to_string(unsigned int step);
+
+extern unsigned int
+compute_fsl_memctl_config_regs(const memctl_options_t *popts,
+ fsl_ddr_cfg_regs_t *ddr,
+ const common_timing_params_t *common_dimm,
+ const dimm_params_t *dimm_parameters,
+ unsigned int dbw_capacity_adjust);
+extern unsigned int
+compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
+ common_timing_params_t *outpdimm,
+ unsigned int number_of_dimms);
+extern unsigned int populate_memctl_options(int all_DIMMs_registered,
+ memctl_options_t *popts,
+ dimm_params_t *pdimm,
+ unsigned int ctrl_num);
+
+extern unsigned int mclk_to_picos(unsigned int mclk);
+extern unsigned int get_memory_clk_period_ps(void);
+extern unsigned int picos_to_mclk(unsigned int picos);
+
+#endif
diff --git a/arch/ppc/cpu/mpc8xxx/ddr/ddr1_dimm_params.c b/arch/ppc/cpu/mpc8xxx/ddr/ddr1_dimm_params.c
new file mode 100644
index 0000000..9184764
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/ddr/ddr1_dimm_params.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * Version 2 as published by the Free Software Foundation.
+ */
+
+#include <common.h>
+#include <asm/fsl_ddr_sdram.h>
+
+#include "ddr.h"
+
+/*
+ * Calculate the Density of each Physical Rank.
+ * Returned size is in bytes.
+ *
+ * Study these table from Byte 31 of JEDEC SPD Spec.
+ *
+ * DDR I DDR II
+ * Bit Size Size
+ * --- ----- ------
+ * 7 high 512MB 512MB
+ * 6 256MB 256MB
+ * 5 128MB 128MB
+ * 4 64MB 16GB
+ * 3 32MB 8GB
+ * 2 16MB 4GB
+ * 1 2GB 2GB
+ * 0 low 1GB 1GB
+ *
+ * Reorder Table to be linear by stripping the bottom
+ * 2 or 5 bits off and shifting them up to the top.
+ */
+
+static unsigned long long
+compute_ranksize(unsigned int mem_type, unsigned char row_dens)
+{
+ unsigned long long bsize;
+
+ /* Bottom 2 bits up to the top. */
+ bsize = ((row_dens >> 2) | ((row_dens & 3) << 6));
+ bsize <<= 24ULL;
+ debug("DDR: DDR I rank density = 0x%08x\n", bsize);
+
+ return bsize;
+}
+
+/*
+ * Convert a two-nibble BCD value into a cycle time.
+ * While the spec calls for nano-seconds, picos are returned.
+ *
+ * This implements the tables for bytes 9, 23 and 25 for both
+ * DDR I and II. No allowance for distinguishing the invalid
+ * fields absent for DDR I yet present in DDR II is made.
+ * (That is, cycle times of .25, .33, .66 and .75 ns are
+ * allowed for both DDR II and I.)
+ */
+static unsigned int
+convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val)
+{
+ /* Table look up the lower nibble, allow DDR I & II. */
+ unsigned int tenths_ps[16] = {
+ 0,
+ 100,
+ 200,
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 250, /* This and the next 3 entries valid ... */
+ 330, /* ... only for tCK calculations. */
+ 660,
+ 750,
+ 0, /* undefined */
+ 0 /* undefined */
+ };
+
+ unsigned int whole_ns = (spd_val & 0xF0) >> 4;
+ unsigned int tenth_ns = spd_val & 0x0F;
+ unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns];
+
+ return ps;
+}
+
+static unsigned int
+convert_bcd_hundredths_to_cycle_time_ps(unsigned int spd_val)
+{
+ unsigned int tenth_ns = (spd_val & 0xF0) >> 4;
+ unsigned int hundredth_ns = spd_val & 0x0F;
+ unsigned int ps = tenth_ns * 100 + hundredth_ns * 10;
+
+ return ps;
+}
+
+static unsigned int byte40_table_ps[8] = {
+ 0,
+ 250,
+ 330,
+ 500,
+ 660,
+ 750,
+ 0, /* supposed to be RFC, but not sure what that means */
+ 0 /* Undefined */
+};
+
+static unsigned int
+compute_trfc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trfc)
+{
+ unsigned int trfc_ps;
+
+ trfc_ps = (((trctrfc_ext & 0x1) * 256) + trfc) * 1000
+ + byte40_table_ps[(trctrfc_ext >> 1) & 0x7];
+
+ return trfc_ps;
+}
+
+static unsigned int
+compute_trc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trc)
+{
+ unsigned int trc_ps;
+
+ trc_ps = trc * 1000 + byte40_table_ps[(trctrfc_ext >> 4) & 0x7];
+
+ return trc_ps;
+}
+
+/*
+ * tCKmax from DDR I SPD Byte 43
+ *
+ * Bits 7:2 == whole ns
+ * Bits 1:0 == quarter ns
+ * 00 == 0.00 ns
+ * 01 == 0.25 ns
+ * 10 == 0.50 ns
+ * 11 == 0.75 ns
+ *
+ * Returns picoseconds.
+ */
+static unsigned int
+compute_tckmax_from_spd_ps(unsigned int byte43)
+{
+ return (byte43 >> 2) * 1000 + (byte43 & 0x3) * 250;
+}
+
+/*
+ * Determine Refresh Rate. Ignore self refresh bit on DDR I.
+ * Table from SPD Spec, Byte 12, converted to picoseconds and
+ * filled in with "default" normal values.
+ */
+static unsigned int
+determine_refresh_rate_ps(const unsigned int spd_refresh)
+{
+ unsigned int refresh_time_ps[8] = {
+ 15625000, /* 0 Normal 1.00x */
+ 3900000, /* 1 Reduced .25x */
+ 7800000, /* 2 Extended .50x */
+ 31300000, /* 3 Extended 2.00x */
+ 62500000, /* 4 Extended 4.00x */
+ 125000000, /* 5 Extended 8.00x */
+ 15625000, /* 6 Normal 1.00x filler */
+ 15625000, /* 7 Normal 1.00x filler */
+ };
+
+ return refresh_time_ps[spd_refresh & 0x7];
+}
+
+/*
+ * The purpose of this function is to compute a suitable
+ * CAS latency given the DRAM clock period. The SPD only
+ * defines at most 3 CAS latencies. Typically the slower in
+ * frequency the DIMM runs at, the shorter its CAS latency can be.
+ * If the DIMM is operating at a sufficiently low frequency,
+ * it may be able to run at a CAS latency shorter than the
+ * shortest SPD-defined CAS latency.
+ *
+ * If a CAS latency is not found, 0 is returned.
+ *
+ * Do this by finding in the standard speed bin table the longest
+ * tCKmin that doesn't exceed the value of mclk_ps (tCK).
+ *
+ * An assumption made is that the SDRAM device allows the
+ * CL to be programmed for a value that is lower than those
+ * advertised by the SPD. This is not always the case,
+ * as those modes not defined in the SPD are optional.
+ *
+ * CAS latency de-rating based upon values JEDEC Standard No. 79-E
+ * Table 11.
+ *
+ * ordinal 2, ddr1_speed_bins[1] contains tCK for CL=2
+ */
+ /* CL2.0 CL2.5 CL3.0 */
+unsigned short ddr1_speed_bins[] = {0, 7500, 6000, 5000 };
+
+unsigned int
+compute_derated_DDR1_CAS_latency(unsigned int mclk_ps)
+{
+ const unsigned int num_speed_bins = ARRAY_SIZE(ddr1_speed_bins);
+ unsigned int lowest_tCKmin_found = 0;
+ unsigned int lowest_tCKmin_CL = 0;
+ unsigned int i;
+
+ debug("mclk_ps = %u\n", mclk_ps);
+
+ for (i = 0; i < num_speed_bins; i++) {
+ unsigned int x = ddr1_speed_bins[i];
+ debug("i=%u, x = %u, lowest_tCKmin_found = %u\n",
+ i, x, lowest_tCKmin_found);
+ if (x && lowest_tCKmin_found <= x && x <= mclk_ps) {
+ lowest_tCKmin_found = x;
+ lowest_tCKmin_CL = i + 1;
+ }
+ }
+
+ debug("lowest_tCKmin_CL = %u\n", lowest_tCKmin_CL);
+
+ return lowest_tCKmin_CL;
+}
+
+/*
+ * ddr_compute_dimm_parameters for DDR1 SPD
+ *
+ * Compute DIMM parameters based upon the SPD information in spd.
+ * Writes the results to the dimm_params_t structure pointed by pdimm.
+ *
+ * FIXME: use #define for the retvals
+ */
+unsigned int
+ddr_compute_dimm_parameters(const ddr1_spd_eeprom_t *spd,
+ dimm_params_t *pdimm,
+ unsigned int dimm_number)
+{
+ unsigned int retval;
+
+ if (spd->mem_type) {
+ if (spd->mem_type != SPD_MEMTYPE_DDR) {
+ printf("DIMM %u: is not a DDR1 SPD.\n", dimm_number);
+ return 1;
+ }
+ } else {
+ memset(pdimm, 0, sizeof(dimm_params_t));
+ return 1;
+ }
+
+ retval = ddr1_spd_check(spd);
+ if (retval) {
+ printf("DIMM %u: failed checksum\n", dimm_number);
+ return 2;
+ }
+
+ /*
+ * The part name in ASCII in the SPD EEPROM is not null terminated.
+ * Guarantee null termination here by presetting all bytes to 0
+ * and copying the part name in ASCII from the SPD onto it
+ */
+ memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
+ memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+
+ /* DIMM organization parameters */
+ pdimm->n_ranks = spd->nrows;
+ pdimm->rank_density = compute_ranksize(spd->mem_type, spd->bank_dens);
+ pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+ pdimm->data_width = spd->dataw_lsb;
+ pdimm->primary_sdram_width = spd->primw;
+ pdimm->ec_sdram_width = spd->ecw;
+
+ /*
+ * FIXME: Need to determine registered_dimm status.
+ * 1 == register buffered
+ * 0 == unbuffered
+ */
+ pdimm->registered_dimm = 0; /* unbuffered */
+
+ /* SDRAM device parameters */
+ pdimm->n_row_addr = spd->nrow_addr;
+ pdimm->n_col_addr = spd->ncol_addr;
+ pdimm->n_banks_per_sdram_device = spd->nbanks;
+ pdimm->edc_config = spd->config;
+ pdimm->burst_lengths_bitmask = spd->burstl;
+ pdimm->row_density = spd->bank_dens;
+
+ /*
+ * Calculate the Maximum Data Rate based on the Minimum Cycle time.
+ * The SPD clk_cycle field (tCKmin) is measured in tenths of
+ * nanoseconds and represented as BCD.
+ */
+ pdimm->tCKmin_X_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle);
+ pdimm->tCKmin_X_minus_1_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle2);
+ pdimm->tCKmin_X_minus_2_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle3);
+
+ pdimm->tCKmax_ps = compute_tckmax_from_spd_ps(spd->tckmax);
+
+ /*
+ * Compute CAS latencies defined by SPD
+ * The SPD caslat_X should have at least 1 and at most 3 bits set.
+ *
+ * If cas_lat after masking is 0, the __ilog2 function returns
+ * 255 into the variable. This behavior is abused once.
+ */
+ pdimm->caslat_X = __ilog2(spd->cas_lat);
+ pdimm->caslat_X_minus_1 = __ilog2(spd->cas_lat
+ & ~(1 << pdimm->caslat_X));
+ pdimm->caslat_X_minus_2 = __ilog2(spd->cas_lat
+ & ~(1 << pdimm->caslat_X)
+ & ~(1 << pdimm->caslat_X_minus_1));
+
+ /* Compute CAS latencies below that defined by SPD */
+ pdimm->caslat_lowest_derated
+ = compute_derated_DDR1_CAS_latency(get_memory_clk_period_ps());
+
+ /* Compute timing parameters */
+ pdimm->tRCD_ps = spd->trcd * 250;
+ pdimm->tRP_ps = spd->trp * 250;
+ pdimm->tRAS_ps = spd->tras * 1000;
+
+ pdimm->tWR_ps = mclk_to_picos(3);
+ pdimm->tWTR_ps = mclk_to_picos(1);
+ pdimm->tRFC_ps = compute_trfc_ps_from_spd(0, spd->trfc);
+
+ pdimm->tRRD_ps = spd->trrd * 250;
+ pdimm->tRC_ps = compute_trc_ps_from_spd(0, spd->trc);
+
+ pdimm->refresh_rate_ps = determine_refresh_rate_ps(spd->refresh);
+
+ pdimm->tIS_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_setup);
+ pdimm->tIH_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_hold);
+ pdimm->tDS_ps
+ = convert_bcd_hundredths_to_cycle_time_ps(spd->data_setup);
+ pdimm->tDH_ps
+ = convert_bcd_hundredths_to_cycle_time_ps(spd->data_hold);
+
+ pdimm->tRTP_ps = mclk_to_picos(2); /* By the book. */
+ pdimm->tDQSQ_max_ps = spd->tdqsq * 10;
+ pdimm->tQHS_ps = spd->tqhs * 10;
+
+ return 0;
+}
diff --git a/arch/ppc/cpu/mpc8xxx/ddr/ddr2_dimm_params.c b/arch/ppc/cpu/mpc8xxx/ddr/ddr2_dimm_params.c
new file mode 100644
index 0000000..d9d0fa7
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/ddr/ddr2_dimm_params.c
@@ -0,0 +1,339 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * Version 2 as published by the Free Software Foundation.
+ */
+
+#include <common.h>
+#include <asm/fsl_ddr_sdram.h>
+
+#include "ddr.h"
+/*
+ * Calculate the Density of each Physical Rank.
+ * Returned size is in bytes.
+ *
+ * Study these table from Byte 31 of JEDEC SPD Spec.
+ *
+ * DDR I DDR II
+ * Bit Size Size
+ * --- ----- ------
+ * 7 high 512MB 512MB
+ * 6 256MB 256MB
+ * 5 128MB 128MB
+ * 4 64MB 16GB
+ * 3 32MB 8GB
+ * 2 16MB 4GB
+ * 1 2GB 2GB
+ * 0 low 1GB 1GB
+ *
+ * Reorder Table to be linear by stripping the bottom
+ * 2 or 5 bits off and shifting them up to the top.
+ *
+ */
+static unsigned long long
+compute_ranksize(unsigned int mem_type, unsigned char row_dens)
+{
+ unsigned long long bsize;
+
+ /* Bottom 5 bits up to the top. */
+ bsize = ((row_dens >> 5) | ((row_dens & 31) << 3));
+ bsize <<= 27ULL;
+ debug("DDR: DDR II rank density = 0x%08x\n", bsize);
+
+ return bsize;
+}
+
+/*
+ * Convert a two-nibble BCD value into a cycle time.
+ * While the spec calls for nano-seconds, picos are returned.
+ *
+ * This implements the tables for bytes 9, 23 and 25 for both
+ * DDR I and II. No allowance for distinguishing the invalid
+ * fields absent for DDR I yet present in DDR II is made.
+ * (That is, cycle times of .25, .33, .66 and .75 ns are
+ * allowed for both DDR II and I.)
+ */
+static unsigned int
+convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val)
+{
+ /* Table look up the lower nibble, allow DDR I & II. */
+ unsigned int tenths_ps[16] = {
+ 0,
+ 100,
+ 200,
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 250, /* This and the next 3 entries valid ... */
+ 330, /* ... only for tCK calculations. */
+ 660,
+ 750,
+ 0, /* undefined */
+ 0 /* undefined */
+ };
+
+ unsigned int whole_ns = (spd_val & 0xF0) >> 4;
+ unsigned int tenth_ns = spd_val & 0x0F;
+ unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns];
+
+ return ps;
+}
+
+static unsigned int
+convert_bcd_hundredths_to_cycle_time_ps(unsigned int spd_val)
+{
+ unsigned int tenth_ns = (spd_val & 0xF0) >> 4;
+ unsigned int hundredth_ns = spd_val & 0x0F;
+ unsigned int ps = tenth_ns * 100 + hundredth_ns * 10;
+
+ return ps;
+}
+
+static unsigned int byte40_table_ps[8] = {
+ 0,
+ 250,
+ 330,
+ 500,
+ 660,
+ 750,
+ 0, /* supposed to be RFC, but not sure what that means */
+ 0 /* Undefined */
+};
+
+static unsigned int
+compute_trfc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trfc)
+{
+ unsigned int trfc_ps;
+
+ trfc_ps = (((trctrfc_ext & 0x1) * 256) + trfc) * 1000
+ + byte40_table_ps[(trctrfc_ext >> 1) & 0x7];
+
+ return trfc_ps;
+}
+
+static unsigned int
+compute_trc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trc)
+{
+ unsigned int trc_ps;
+
+ trc_ps = trc * 1000 + byte40_table_ps[(trctrfc_ext >> 4) & 0x7];
+
+ return trc_ps;
+}
+
+/*
+ * Determine Refresh Rate. Ignore self refresh bit on DDR I.
+ * Table from SPD Spec, Byte 12, converted to picoseconds and
+ * filled in with "default" normal values.
+ */
+static unsigned int
+determine_refresh_rate_ps(const unsigned int spd_refresh)
+{
+ unsigned int refresh_time_ps[8] = {
+ 15625000, /* 0 Normal 1.00x */
+ 3900000, /* 1 Reduced .25x */
+ 7800000, /* 2 Extended .50x */
+ 31300000, /* 3 Extended 2.00x */
+ 62500000, /* 4 Extended 4.00x */
+ 125000000, /* 5 Extended 8.00x */
+ 15625000, /* 6 Normal 1.00x filler */
+ 15625000, /* 7 Normal 1.00x filler */
+ };
+
+ return refresh_time_ps[spd_refresh & 0x7];
+}
+
+/*
+ * The purpose of this function is to compute a suitable
+ * CAS latency given the DRAM clock period. The SPD only
+ * defines at most 3 CAS latencies. Typically the slower in
+ * frequency the DIMM runs at, the shorter its CAS latency can.
+ * be. If the DIMM is operating at a sufficiently low frequency,
+ * it may be able to run at a CAS latency shorter than the
+ * shortest SPD-defined CAS latency.
+ *
+ * If a CAS latency is not found, 0 is returned.
+ *
+ * Do this by finding in the standard speed bin table the longest
+ * tCKmin that doesn't exceed the value of mclk_ps (tCK).
+ *
+ * An assumption made is that the SDRAM device allows the
+ * CL to be programmed for a value that is lower than those
+ * advertised by the SPD. This is not always the case,
+ * as those modes not defined in the SPD are optional.
+ *
+ * CAS latency de-rating based upon values JEDEC Standard No. 79-2C
+ * Table 40, "DDR2 SDRAM stanadard speed bins and tCK, tRCD, tRP, tRAS,
+ * and tRC for corresponding bin"
+ *
+ * ordinal 2, ddr2_speed_bins[1] contains tCK for CL=3
+ * Not certain if any good value exists for CL=2
+ */
+ /* CL2 CL3 CL4 CL5 CL6 */
+unsigned short ddr2_speed_bins[] = { 0, 5000, 3750, 3000, 2500 };
+
+unsigned int
+compute_derated_DDR2_CAS_latency(unsigned int mclk_ps)
+{
+ const unsigned int num_speed_bins = ARRAY_SIZE(ddr2_speed_bins);
+ unsigned int lowest_tCKmin_found = 0;
+ unsigned int lowest_tCKmin_CL = 0;
+ unsigned int i;
+
+ debug("mclk_ps = %u\n", mclk_ps);
+
+ for (i = 0; i < num_speed_bins; i++) {
+ unsigned int x = ddr2_speed_bins[i];
+ debug("i=%u, x = %u, lowest_tCKmin_found = %u\n",
+ i, x, lowest_tCKmin_found);
+ if (x && x <= mclk_ps && x >= lowest_tCKmin_found ) {
+ lowest_tCKmin_found = x;
+ lowest_tCKmin_CL = i + 2;
+ }
+ }
+
+ debug("lowest_tCKmin_CL = %u\n", lowest_tCKmin_CL);
+
+ return lowest_tCKmin_CL;
+}
+
+/*
+ * ddr_compute_dimm_parameters for DDR2 SPD
+ *
+ * Compute DIMM parameters based upon the SPD information in spd.
+ * Writes the results to the dimm_params_t structure pointed by pdimm.
+ *
+ * FIXME: use #define for the retvals
+ */
+unsigned int
+ddr_compute_dimm_parameters(const ddr2_spd_eeprom_t *spd,
+ dimm_params_t *pdimm,
+ unsigned int dimm_number)
+{
+ unsigned int retval;
+
+ if (spd->mem_type) {
+ if (spd->mem_type != SPD_MEMTYPE_DDR2) {
+ printf("DIMM %u: is not a DDR2 SPD.\n", dimm_number);
+ return 1;
+ }
+ } else {
+ memset(pdimm, 0, sizeof(dimm_params_t));
+ return 1;
+ }
+
+ retval = ddr2_spd_check(spd);
+ if (retval) {
+ printf("DIMM %u: failed checksum\n", dimm_number);
+ return 2;
+ }
+
+ /*
+ * The part name in ASCII in the SPD EEPROM is not null terminated.
+ * Guarantee null termination here by presetting all bytes to 0
+ * and copying the part name in ASCII from the SPD onto it
+ */
+ memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
+ memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+
+ /* DIMM organization parameters */
+ pdimm->n_ranks = (spd->mod_ranks & 0x7) + 1;
+ pdimm->rank_density = compute_ranksize(spd->mem_type, spd->rank_dens);
+ pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+ pdimm->data_width = spd->dataw;
+ pdimm->primary_sdram_width = spd->primw;
+ pdimm->ec_sdram_width = spd->ecw;
+
+ /* FIXME: what about registered SO-DIMM? */
+ switch (spd->dimm_type) {
+ case 0x01: /* RDIMM */
+ case 0x10: /* Mini-RDIMM */
+ pdimm->registered_dimm = 1; /* register buffered */
+ break;
+
+ case 0x02: /* UDIMM */
+ case 0x04: /* SO-DIMM */
+ case 0x08: /* Micro-DIMM */
+ case 0x20: /* Mini-UDIMM */
+ pdimm->registered_dimm = 0; /* unbuffered */
+ break;
+
+ default:
+ printf("unknown dimm_type 0x%02X\n", spd->dimm_type);
+ return 1;
+ break;
+ }
+
+ /* SDRAM device parameters */
+ pdimm->n_row_addr = spd->nrow_addr;
+ pdimm->n_col_addr = spd->ncol_addr;
+ pdimm->n_banks_per_sdram_device = spd->nbanks;
+ pdimm->edc_config = spd->config;
+ pdimm->burst_lengths_bitmask = spd->burstl;
+ pdimm->row_density = spd->rank_dens;
+
+ /*
+ * Calculate the Maximum Data Rate based on the Minimum Cycle time.
+ * The SPD clk_cycle field (tCKmin) is measured in tenths of
+ * nanoseconds and represented as BCD.
+ */
+ pdimm->tCKmin_X_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle);
+ pdimm->tCKmin_X_minus_1_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle2);
+ pdimm->tCKmin_X_minus_2_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle3);
+
+ pdimm->tCKmax_ps = convert_bcd_tenths_to_cycle_time_ps(spd->tckmax);
+
+ /*
+ * Compute CAS latencies defined by SPD
+ * The SPD caslat_X should have at least 1 and at most 3 bits set.
+ *
+ * If cas_lat after masking is 0, the __ilog2 function returns
+ * 255 into the variable. This behavior is abused once.
+ */
+ pdimm->caslat_X = __ilog2(spd->cas_lat);
+ pdimm->caslat_X_minus_1 = __ilog2(spd->cas_lat
+ & ~(1 << pdimm->caslat_X));
+ pdimm->caslat_X_minus_2 = __ilog2(spd->cas_lat
+ & ~(1 << pdimm->caslat_X)
+ & ~(1 << pdimm->caslat_X_minus_1));
+
+ /* Compute CAS latencies below that defined by SPD */
+ pdimm->caslat_lowest_derated
+ = compute_derated_DDR2_CAS_latency(get_memory_clk_period_ps());
+
+ /* Compute timing parameters */
+ pdimm->tRCD_ps = spd->trcd * 250;
+ pdimm->tRP_ps = spd->trp * 250;
+ pdimm->tRAS_ps = spd->tras * 1000;
+
+ pdimm->tWR_ps = spd->twr * 250;
+ pdimm->tWTR_ps = spd->twtr * 250;
+ pdimm->tRFC_ps = compute_trfc_ps_from_spd(spd->trctrfc_ext, spd->trfc);
+
+ pdimm->tRRD_ps = spd->trrd * 250;
+ pdimm->tRC_ps = compute_trc_ps_from_spd(spd->trctrfc_ext, spd->trc);
+
+ pdimm->refresh_rate_ps = determine_refresh_rate_ps(spd->refresh);
+
+ pdimm->tIS_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_setup);
+ pdimm->tIH_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_hold);
+ pdimm->tDS_ps
+ = convert_bcd_hundredths_to_cycle_time_ps(spd->data_setup);
+ pdimm->tDH_ps
+ = convert_bcd_hundredths_to_cycle_time_ps(spd->data_hold);
+
+ pdimm->tRTP_ps = spd->trtp * 250;
+ pdimm->tDQSQ_max_ps = spd->tdqsq * 10;
+ pdimm->tQHS_ps = spd->tqhs * 10;
+
+ return 0;
+}
diff --git a/arch/ppc/cpu/mpc8xxx/ddr/ddr3_dimm_params.c b/arch/ppc/cpu/mpc8xxx/ddr/ddr3_dimm_params.c
new file mode 100644
index 0000000..d4199ba
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/ddr/ddr3_dimm_params.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2008-2009 Freescale Semiconductor, Inc.
+ * Dave Liu <daveliu@freescale.com>
+ *
+ * calculate the organization and timing parameter
+ * from ddr3 spd, please refer to the spec
+ * JEDEC standard No.21-C 4_01_02_11R18.pdf
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * Version 2 as published by the Free Software Foundation.
+ */
+
+#include <common.h>
+#include <asm/fsl_ddr_sdram.h>
+
+#include "ddr.h"
+
+/*
+ * Calculate the Density of each Physical Rank.
+ * Returned size is in bytes.
+ *
+ * each rank size =
+ * sdram capacity(bit) / 8 * primary bus width / sdram width
+ *
+ * where: sdram capacity = spd byte4[3:0]
+ * primary bus width = spd byte8[2:0]
+ * sdram width = spd byte7[2:0]
+ *
+ * SPD byte4 - sdram density and banks
+ * bit[3:0] size(bit) size(byte)
+ * 0000 256Mb 32MB
+ * 0001 512Mb 64MB
+ * 0010 1Gb 128MB
+ * 0011 2Gb 256MB
+ * 0100 4Gb 512MB
+ * 0101 8Gb 1GB
+ * 0110 16Gb 2GB
+ *
+ * SPD byte8 - module memory bus width
+ * bit[2:0] primary bus width
+ * 000 8bits
+ * 001 16bits
+ * 010 32bits
+ * 011 64bits
+ *
+ * SPD byte7 - module organiztion
+ * bit[2:0] sdram device width
+ * 000 4bits
+ * 001 8bits
+ * 010 16bits
+ * 011 32bits
+ *
+ */
+static unsigned long long
+compute_ranksize(const ddr3_spd_eeprom_t *spd)
+{
+ unsigned long long bsize;
+
+ int nbit_sdram_cap_bsize = 0;
+ int nbit_primary_bus_width = 0;
+ int nbit_sdram_width = 0;
+
+ if ((spd->density_banks & 0xf) < 7)
+ nbit_sdram_cap_bsize = (spd->density_banks & 0xf) + 28;
+ if ((spd->bus_width & 0x7) < 4)
+ nbit_primary_bus_width = (spd->bus_width & 0x7) + 3;
+ if ((spd->organization & 0x7) < 4)
+ nbit_sdram_width = (spd->organization & 0x7) + 2;
+
+ bsize = 1ULL << (nbit_sdram_cap_bsize - 3
+ + nbit_primary_bus_width - nbit_sdram_width);
+
+ debug("DDR: DDR III rank density = 0x%16lx\n", bsize);
+
+ return bsize;
+}
+
+/*
+ * ddr_compute_dimm_parameters for DDR3 SPD
+ *
+ * Compute DIMM parameters based upon the SPD information in spd.
+ * Writes the results to the dimm_params_t structure pointed by pdimm.
+ *
+ */
+unsigned int
+ddr_compute_dimm_parameters(const ddr3_spd_eeprom_t *spd,
+ dimm_params_t *pdimm,
+ unsigned int dimm_number)
+{
+ unsigned int retval;
+ unsigned int mtb_ps;
+
+ if (spd->mem_type) {
+ if (spd->mem_type != SPD_MEMTYPE_DDR3) {
+ printf("DIMM %u: is not a DDR3 SPD.\n", dimm_number);
+ return 1;
+ }
+ } else {
+ memset(pdimm, 0, sizeof(dimm_params_t));
+ return 1;
+ }
+
+ retval = ddr3_spd_check(spd);
+ if (retval) {
+ printf("DIMM %u: failed checksum\n", dimm_number);
+ return 2;
+ }
+
+ /*
+ * The part name in ASCII in the SPD EEPROM is not null terminated.
+ * Guarantee null termination here by presetting all bytes to 0
+ * and copying the part name in ASCII from the SPD onto it
+ */
+ memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
+ memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+
+ /* DIMM organization parameters */
+ pdimm->n_ranks = ((spd->organization >> 3) & 0x7) + 1;
+ pdimm->rank_density = compute_ranksize(spd);
+ pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+ pdimm->primary_sdram_width = 1 << (3 + (spd->bus_width & 0x7));
+ if ((spd->bus_width >> 3) & 0x3)
+ pdimm->ec_sdram_width = 8;
+ else
+ pdimm->ec_sdram_width = 0;
+ pdimm->data_width = pdimm->primary_sdram_width
+ + pdimm->ec_sdram_width;
+
+ switch (spd->module_type & 0xf) {
+ case 0x01: /* RDIMM */
+ case 0x05: /* Mini-RDIMM */
+ pdimm->registered_dimm = 1; /* register buffered */
+ break;
+
+ case 0x02: /* UDIMM */
+ case 0x03: /* SO-DIMM */
+ case 0x04: /* Micro-DIMM */
+ case 0x06: /* Mini-UDIMM */
+ pdimm->registered_dimm = 0; /* unbuffered */
+ break;
+
+ default:
+ printf("unknown dimm_type 0x%02X\n", spd->module_type);
+ return 1;
+ }
+
+ /* SDRAM device parameters */
+ pdimm->n_row_addr = ((spd->addressing >> 3) & 0x7) + 12;
+ pdimm->n_col_addr = (spd->addressing & 0x7) + 9;
+ pdimm->n_banks_per_sdram_device = 8 << ((spd->density_banks >> 4) & 0x7);
+
+ /*
+ * The SPD spec has not the ECC bit,
+ * We consider the DIMM as ECC capability
+ * when the extension bus exist
+ */
+ if (pdimm->ec_sdram_width)
+ pdimm->edc_config = 0x02;
+ else
+ pdimm->edc_config = 0x00;
+
+ /*
+ * The SPD spec has not the burst length byte
+ * but DDR3 spec has nature BL8 and BC4,
+ * BL8 -bit3, BC4 -bit2
+ */
+ pdimm->burst_lengths_bitmask = 0x0c;
+ pdimm->row_density = __ilog2(pdimm->rank_density);
+
+ /* MTB - medium timebase
+ * The unit in the SPD spec is ns,
+ * We convert it to ps.
+ * eg: MTB = 0.125ns (125ps)
+ */
+ mtb_ps = (spd->mtb_dividend * 1000) /spd->mtb_divisor;
+ pdimm->mtb_ps = mtb_ps;
+
+ /*
+ * sdram minimum cycle time
+ * we assume the MTB is 0.125ns
+ * eg:
+ * tCK_min=15 MTB (1.875ns) ->DDR3-1066
+ * =12 MTB (1.5ns) ->DDR3-1333
+ * =10 MTB (1.25ns) ->DDR3-1600
+ */
+ pdimm->tCKmin_X_ps = spd->tCK_min * mtb_ps;
+
+ /*
+ * CAS latency supported
+ * bit4 - CL4
+ * bit5 - CL5
+ * bit18 - CL18
+ */
+ pdimm->caslat_X = ((spd->caslat_msb << 8) | spd->caslat_lsb) << 4;
+
+ /*
+ * min CAS latency time
+ * eg: tAA_min =
+ * DDR3-800D 100 MTB (12.5ns)
+ * DDR3-1066F 105 MTB (13.125ns)
+ * DDR3-1333H 108 MTB (13.5ns)
+ * DDR3-1600H 90 MTB (11.25ns)
+ */
+ pdimm->tAA_ps = spd->tAA_min * mtb_ps;
+
+ /*
+ * min write recovery time
+ * eg:
+ * tWR_min = 120 MTB (15ns) -> all speed grades.
+ */
+ pdimm->tWR_ps = spd->tWR_min * mtb_ps;
+
+ /*
+ * min RAS to CAS delay time
+ * eg: tRCD_min =
+ * DDR3-800 100 MTB (12.5ns)
+ * DDR3-1066F 105 MTB (13.125ns)
+ * DDR3-1333H 108 MTB (13.5ns)
+ * DDR3-1600H 90 MTB (11.25)
+ */
+ pdimm->tRCD_ps = spd->tRCD_min * mtb_ps;
+
+ /*
+ * min row active to row active delay time
+ * eg: tRRD_min =
+ * DDR3-800(1KB page) 80 MTB (10ns)
+ * DDR3-1333(1KB page) 48 MTB (6ns)
+ */
+ pdimm->tRRD_ps = spd->tRRD_min * mtb_ps;
+
+ /*
+ * min row precharge delay time
+ * eg: tRP_min =
+ * DDR3-800D 100 MTB (12.5ns)
+ * DDR3-1066F 105 MTB (13.125ns)
+ * DDR3-1333H 108 MTB (13.5ns)
+ * DDR3-1600H 90 MTB (11.25ns)
+ */
+ pdimm->tRP_ps = spd->tRP_min * mtb_ps;
+
+ /* min active to precharge delay time
+ * eg: tRAS_min =
+ * DDR3-800D 300 MTB (37.5ns)
+ * DDR3-1066F 300 MTB (37.5ns)
+ * DDR3-1333H 288 MTB (36ns)
+ * DDR3-1600H 280 MTB (35ns)
+ */
+ pdimm->tRAS_ps = (((spd->tRAS_tRC_ext & 0xf) << 8) | spd->tRAS_min_lsb)
+ * mtb_ps;
+ /*
+ * min active to actice/refresh delay time
+ * eg: tRC_min =
+ * DDR3-800D 400 MTB (50ns)
+ * DDR3-1066F 405 MTB (50.625ns)
+ * DDR3-1333H 396 MTB (49.5ns)
+ * DDR3-1600H 370 MTB (46.25ns)
+ */
+ pdimm->tRC_ps = (((spd->tRAS_tRC_ext & 0xf0) << 4) | spd->tRC_min_lsb)
+ * mtb_ps;
+ /*
+ * min refresh recovery delay time
+ * eg: tRFC_min =
+ * 512Mb 720 MTB (90ns)
+ * 1Gb 880 MTB (110ns)
+ * 2Gb 1280 MTB (160ns)
+ */
+ pdimm->tRFC_ps = ((spd->tRFC_min_msb << 8) | spd->tRFC_min_lsb)
+ * mtb_ps;
+ /*
+ * min internal write to read command delay time
+ * eg: tWTR_min = 40 MTB (7.5ns) - all speed bins.
+ * tWRT is at least 4 mclk independent of operating freq.
+ */
+ pdimm->tWTR_ps = spd->tWTR_min * mtb_ps;
+
+ /*
+ * min internal read to precharge command delay time
+ * eg: tRTP_min = 40 MTB (7.5ns) - all speed bins.
+ * tRTP is at least 4 mclk independent of operating freq.
+ */
+ pdimm->tRTP_ps = spd->tRTP_min * mtb_ps;
+
+ /*
+ * Average periodic refresh interval
+ * tREFI = 7.8 us at normal temperature range
+ * = 3.9 us at ext temperature range
+ */
+ pdimm->refresh_rate_ps = 7800000;
+
+ /*
+ * min four active window delay time
+ * eg: tFAW_min =
+ * DDR3-800(1KB page) 320 MTB (40ns)
+ * DDR3-1066(1KB page) 300 MTB (37.5ns)
+ * DDR3-1333(1KB page) 240 MTB (30ns)
+ * DDR3-1600(1KB page) 240 MTB (30ns)
+ */
+ pdimm->tFAW_ps = (((spd->tFAW_msb & 0xf) << 8) | spd->tFAW_min)
+ * mtb_ps;
+
+ /*
+ * We need check the address mirror for unbuffered DIMM
+ * If SPD indicate the address map mirror, The DDR controller
+ * need care it.
+ */
+ if ((spd->module_type == SPD_MODULETYPE_UDIMM) ||
+ (spd->module_type == SPD_MODULETYPE_SODIMM) ||
+ (spd->module_type == SPD_MODULETYPE_MICRODIMM) ||
+ (spd->module_type == SPD_MODULETYPE_MINIUDIMM))
+ pdimm->mirrored_dimm = spd->mod_section.unbuffered.addr_mapping & 0x1;
+
+ return 0;
+}
diff --git a/arch/ppc/cpu/mpc8xxx/ddr/lc_common_dimm_params.c b/arch/ppc/cpu/mpc8xxx/ddr/lc_common_dimm_params.c
new file mode 100644
index 0000000..e888e3e
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/ddr/lc_common_dimm_params.c
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * Version 2 as published by the Free Software Foundation.
+ */
+
+#include <common.h>
+#include <asm/fsl_ddr_sdram.h>
+
+#include "ddr.h"
+
+unsigned int
+compute_cas_latency_ddr3(const dimm_params_t *dimm_params,
+ common_timing_params_t *outpdimm,
+ unsigned int number_of_dimms)
+{
+ unsigned int i;
+ unsigned int tAAmin_ps = 0;
+ unsigned int tCKmin_X_ps = 0;
+ unsigned int common_caslat;
+ unsigned int caslat_actual;
+ unsigned int retry = 16;
+ unsigned int tmp;
+ const unsigned int mclk_ps = get_memory_clk_period_ps();
+
+ /* compute the common CAS latency supported between slots */
+ tmp = dimm_params[0].caslat_X;
+ for (i = 1; i < number_of_dimms; i++)
+ tmp &= dimm_params[i].caslat_X;
+ common_caslat = tmp;
+
+ /* compute the max tAAmin tCKmin between slots */
+ for (i = 0; i < number_of_dimms; i++) {
+ tAAmin_ps = max(tAAmin_ps, dimm_params[i].tAA_ps);
+ tCKmin_X_ps = max(tCKmin_X_ps, dimm_params[i].tCKmin_X_ps);
+ }
+ /* validate if the memory clk is in the range of dimms */
+ if (mclk_ps < tCKmin_X_ps) {
+ printf("The DIMM max tCKmin is %d ps,"
+ "doesn't support the MCLK cycle %d ps\n",
+ tCKmin_X_ps, mclk_ps);
+ return 1;
+ }
+ /* determine the acutal cas latency */
+ caslat_actual = (tAAmin_ps + mclk_ps - 1) / mclk_ps;
+ /* check if the dimms support the CAS latency */
+ while (!(common_caslat & (1 << caslat_actual)) && retry > 0) {
+ caslat_actual++;
+ retry--;
+ }
+ /* once the caculation of caslat_actual is completed
+ * we must verify that this CAS latency value does not
+ * exceed tAAmax, which is 20 ns for all DDR3 speed grades
+ */
+ if (caslat_actual * mclk_ps > 20000) {
+ printf("The choosen cas latency %d is too large\n",
+ caslat_actual);
+ return 1;
+ }
+ outpdimm->lowest_common_SPD_caslat = caslat_actual;
+
+ return 0;
+}
+
+/*
+ * compute_lowest_common_dimm_parameters()
+ *
+ * Determine the worst-case DIMM timing parameters from the set of DIMMs
+ * whose parameters have been computed into the array pointed to
+ * by dimm_params.
+ */
+unsigned int
+compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params,
+ common_timing_params_t *outpdimm,
+ unsigned int number_of_dimms)
+{
+ unsigned int i;
+
+ unsigned int tCKmin_X_ps = 0;
+ unsigned int tCKmax_ps = 0xFFFFFFFF;
+ unsigned int tCKmax_max_ps = 0;
+ unsigned int tRCD_ps = 0;
+ unsigned int tRP_ps = 0;
+ unsigned int tRAS_ps = 0;
+ unsigned int tWR_ps = 0;
+ unsigned int tWTR_ps = 0;
+ unsigned int tRFC_ps = 0;
+ unsigned int tRRD_ps = 0;
+ unsigned int tRC_ps = 0;
+ unsigned int refresh_rate_ps = 0;
+ unsigned int tIS_ps = 0;
+ unsigned int tIH_ps = 0;
+ unsigned int tDS_ps = 0;
+ unsigned int tDH_ps = 0;
+ unsigned int tRTP_ps = 0;
+ unsigned int tDQSQ_max_ps = 0;
+ unsigned int tQHS_ps = 0;
+
+ unsigned int temp1, temp2;
+ unsigned int additive_latency = 0;
+#if !defined(CONFIG_FSL_DDR3)
+ const unsigned int mclk_ps = get_memory_clk_period_ps();
+ unsigned int lowest_good_caslat;
+ unsigned int not_ok;
+
+ debug("using mclk_ps = %u\n", mclk_ps);
+#endif
+
+ temp1 = 0;
+ for (i = 0; i < number_of_dimms; i++) {
+ /*
+ * If there are no ranks on this DIMM,
+ * it probably doesn't exist, so skip it.
+ */
+ if (dimm_params[i].n_ranks == 0) {
+ temp1++;
+ continue;
+ }
+
+ /*
+ * Find minimum tCKmax_ps to find fastest slow speed,
+ * i.e., this is the slowest the whole system can go.
+ */
+ tCKmax_ps = min(tCKmax_ps, dimm_params[i].tCKmax_ps);
+
+ /* Either find maximum value to determine slowest
+ * speed, delay, time, period, etc */
+ tCKmin_X_ps = max(tCKmin_X_ps, dimm_params[i].tCKmin_X_ps);
+ tCKmax_max_ps = max(tCKmax_max_ps, dimm_params[i].tCKmax_ps);
+ tRCD_ps = max(tRCD_ps, dimm_params[i].tRCD_ps);
+ tRP_ps = max(tRP_ps, dimm_params[i].tRP_ps);
+ tRAS_ps = max(tRAS_ps, dimm_params[i].tRAS_ps);
+ tWR_ps = max(tWR_ps, dimm_params[i].tWR_ps);
+ tWTR_ps = max(tWTR_ps, dimm_params[i].tWTR_ps);
+ tRFC_ps = max(tRFC_ps, dimm_params[i].tRFC_ps);
+ tRRD_ps = max(tRRD_ps, dimm_params[i].tRRD_ps);
+ tRC_ps = max(tRC_ps, dimm_params[i].tRC_ps);
+ tIS_ps = max(tIS_ps, dimm_params[i].tIS_ps);
+ tIH_ps = max(tIH_ps, dimm_params[i].tIH_ps);
+ tDS_ps = max(tDS_ps, dimm_params[i].tDS_ps);
+ tDH_ps = max(tDH_ps, dimm_params[i].tDH_ps);
+ tRTP_ps = max(tRTP_ps, dimm_params[i].tRTP_ps);
+ tQHS_ps = max(tQHS_ps, dimm_params[i].tQHS_ps);
+ refresh_rate_ps = max(refresh_rate_ps,
+ dimm_params[i].refresh_rate_ps);
+
+ /*
+ * Find maximum tDQSQ_max_ps to find slowest.
+ *
+ * FIXME: is finding the slowest value the correct
+ * strategy for this parameter?
+ */
+ tDQSQ_max_ps = max(tDQSQ_max_ps, dimm_params[i].tDQSQ_max_ps);
+ }
+
+ outpdimm->ndimms_present = number_of_dimms - temp1;
+
+ if (temp1 == number_of_dimms) {
+ debug("no dimms this memory controller\n");
+ return 0;
+ }
+
+ outpdimm->tCKmin_X_ps = tCKmin_X_ps;
+ outpdimm->tCKmax_ps = tCKmax_ps;
+ outpdimm->tCKmax_max_ps = tCKmax_max_ps;
+ outpdimm->tRCD_ps = tRCD_ps;
+ outpdimm->tRP_ps = tRP_ps;
+ outpdimm->tRAS_ps = tRAS_ps;
+ outpdimm->tWR_ps = tWR_ps;
+ outpdimm->tWTR_ps = tWTR_ps;
+ outpdimm->tRFC_ps = tRFC_ps;
+ outpdimm->tRRD_ps = tRRD_ps;
+ outpdimm->tRC_ps = tRC_ps;
+ outpdimm->refresh_rate_ps = refresh_rate_ps;
+ outpdimm->tIS_ps = tIS_ps;
+ outpdimm->tIH_ps = tIH_ps;
+ outpdimm->tDS_ps = tDS_ps;
+ outpdimm->tDH_ps = tDH_ps;
+ outpdimm->tRTP_ps = tRTP_ps;
+ outpdimm->tDQSQ_max_ps = tDQSQ_max_ps;
+ outpdimm->tQHS_ps = tQHS_ps;
+
+ /* Determine common burst length for all DIMMs. */
+ temp1 = 0xff;
+ for (i = 0; i < number_of_dimms; i++) {
+ if (dimm_params[i].n_ranks) {
+ temp1 &= dimm_params[i].burst_lengths_bitmask;
+ }
+ }
+ outpdimm->all_DIMMs_burst_lengths_bitmask = temp1;
+
+ /* Determine if all DIMMs registered buffered. */
+ temp1 = temp2 = 0;
+ for (i = 0; i < number_of_dimms; i++) {
+ if (dimm_params[i].n_ranks) {
+ if (dimm_params[i].registered_dimm)
+ temp1 = 1;
+ if (!dimm_params[i].registered_dimm)
+ temp2 = 1;
+ }
+ }
+
+ outpdimm->all_DIMMs_registered = 0;
+ if (temp1 && !temp2) {
+ outpdimm->all_DIMMs_registered = 1;
+ }
+
+ outpdimm->all_DIMMs_unbuffered = 0;
+ if (!temp1 && temp2) {
+ outpdimm->all_DIMMs_unbuffered = 1;
+ }
+
+ /* CHECKME: */
+ if (!outpdimm->all_DIMMs_registered
+ && !outpdimm->all_DIMMs_unbuffered) {
+ printf("ERROR: Mix of registered buffered and unbuffered "
+ "DIMMs detected!\n");
+ }
+
+#if defined(CONFIG_FSL_DDR3)
+ if (compute_cas_latency_ddr3(dimm_params, outpdimm, number_of_dimms))
+ return 1;
+#else
+ /*
+ * Compute a CAS latency suitable for all DIMMs
+ *
+ * Strategy for SPD-defined latencies: compute only
+ * CAS latency defined by all DIMMs.
+ */
+
+ /*
+ * Step 1: find CAS latency common to all DIMMs using bitwise
+ * operation.
+ */
+ temp1 = 0xFF;
+ for (i = 0; i < number_of_dimms; i++) {
+ if (dimm_params[i].n_ranks) {
+ temp2 = 0;
+ temp2 |= 1 << dimm_params[i].caslat_X;
+ temp2 |= 1 << dimm_params[i].caslat_X_minus_1;
+ temp2 |= 1 << dimm_params[i].caslat_X_minus_2;
+ /*
+ * FIXME: If there was no entry for X-2 (X-1) in
+ * the SPD, then caslat_X_minus_2
+ * (caslat_X_minus_1) contains either 255 or
+ * 0xFFFFFFFF because that's what the glorious
+ * __ilog2 function returns for an input of 0.
+ * On 32-bit PowerPC, left shift counts with bit
+ * 26 set (that the value of 255 or 0xFFFFFFFF
+ * will have), cause the destination register to
+ * be 0. That is why this works.
+ */
+ temp1 &= temp2;
+ }
+ }
+
+ /*
+ * Step 2: check each common CAS latency against tCK of each
+ * DIMM's SPD.
+ */
+ lowest_good_caslat = 0;
+ temp2 = 0;
+ while (temp1) {
+ not_ok = 0;
+ temp2 = __ilog2(temp1);
+ debug("checking common caslat = %u\n", temp2);
+
+ /* Check if this CAS latency will work on all DIMMs at tCK. */
+ for (i = 0; i < number_of_dimms; i++) {
+ if (!dimm_params[i].n_ranks) {
+ continue;
+ }
+ if (dimm_params[i].caslat_X == temp2) {
+ if (mclk_ps >= dimm_params[i].tCKmin_X_ps) {
+ debug("CL = %u ok on DIMM %u at tCK=%u"
+ " ps with its tCKmin_X_ps of %u\n",
+ temp2, i, mclk_ps,
+ dimm_params[i].tCKmin_X_ps);
+ continue;
+ } else {
+ not_ok++;
+ }
+ }
+
+ if (dimm_params[i].caslat_X_minus_1 == temp2) {
+ unsigned int tCKmin_X_minus_1_ps
+ = dimm_params[i].tCKmin_X_minus_1_ps;
+ if (mclk_ps >= tCKmin_X_minus_1_ps) {
+ debug("CL = %u ok on DIMM %u at "
+ "tCK=%u ps with its "
+ "tCKmin_X_minus_1_ps of %u\n",
+ temp2, i, mclk_ps,
+ tCKmin_X_minus_1_ps);
+ continue;
+ } else {
+ not_ok++;
+ }
+ }
+
+ if (dimm_params[i].caslat_X_minus_2 == temp2) {
+ unsigned int tCKmin_X_minus_2_ps
+ = dimm_params[i].tCKmin_X_minus_2_ps;
+ if (mclk_ps >= tCKmin_X_minus_2_ps) {
+ debug("CL = %u ok on DIMM %u at "
+ "tCK=%u ps with its "
+ "tCKmin_X_minus_2_ps of %u\n",
+ temp2, i, mclk_ps,
+ tCKmin_X_minus_2_ps);
+ continue;
+ } else {
+ not_ok++;
+ }
+ }
+ }
+
+ if (!not_ok) {
+ lowest_good_caslat = temp2;
+ }
+
+ temp1 &= ~(1 << temp2);
+ }
+
+ debug("lowest common SPD-defined CAS latency = %u\n",
+ lowest_good_caslat);
+ outpdimm->lowest_common_SPD_caslat = lowest_good_caslat;
+
+
+ /*
+ * Compute a common 'de-rated' CAS latency.
+ *
+ * The strategy here is to find the *highest* dereated cas latency
+ * with the assumption that all of the DIMMs will support a dereated
+ * CAS latency higher than or equal to their lowest dereated value.
+ */
+ temp1 = 0;
+ for (i = 0; i < number_of_dimms; i++) {
+ temp1 = max(temp1, dimm_params[i].caslat_lowest_derated);
+ }
+ outpdimm->highest_common_derated_caslat = temp1;
+ debug("highest common dereated CAS latency = %u\n", temp1);
+#endif /* #if defined(CONFIG_FSL_DDR3) */
+
+ /* Determine if all DIMMs ECC capable. */
+ temp1 = 1;
+ for (i = 0; i < number_of_dimms; i++) {
+ if (dimm_params[i].n_ranks && dimm_params[i].edc_config != 2) {
+ temp1 = 0;
+ break;
+ }
+ }
+ if (temp1) {
+ debug("all DIMMs ECC capable\n");
+ } else {
+ debug("Warning: not all DIMMs ECC capable, cant enable ECC\n");
+ }
+ outpdimm->all_DIMMs_ECC_capable = temp1;
+
+#ifndef CONFIG_FSL_DDR3
+ /* FIXME: move to somewhere else to validate. */
+ if (mclk_ps > tCKmax_max_ps) {
+ printf("Warning: some of the installed DIMMs "
+ "can not operate this slowly.\n");
+ return 1;
+ }
+#endif
+ /*
+ * Compute additive latency.
+ *
+ * For DDR1, additive latency should be 0.
+ *
+ * For DDR2, with ODT enabled, use "a value" less than ACTTORW,
+ * which comes from Trcd, and also note that:
+ * add_lat + caslat must be >= 4
+ *
+ * For DDR3, we use the AL=0
+ *
+ * When to use additive latency for DDR2:
+ *
+ * I. Because you are using CL=3 and need to do ODT on writes and
+ * want functionality.
+ * 1. Are you going to use ODT? (Does your board not have
+ * additional termination circuitry for DQ, DQS, DQS_,
+ * DM, RDQS, RDQS_ for x4/x8 configs?)
+ * 2. If so, is your lowest supported CL going to be 3?
+ * 3. If so, then you must set AL=1 because
+ *
+ * WL >= 3 for ODT on writes
+ * RL = AL + CL
+ * WL = RL - 1
+ * ->
+ * WL = AL + CL - 1
+ * AL + CL - 1 >= 3
+ * AL + CL >= 4
+ * QED
+ *
+ * RL >= 3 for ODT on reads
+ * RL = AL + CL
+ *
+ * Since CL aren't usually less than 2, AL=0 is a minimum,
+ * so the WL-derived AL should be the -- FIXME?
+ *
+ * II. Because you are using auto-precharge globally and want to
+ * use additive latency (posted CAS) to get more bandwidth.
+ * 1. Are you going to use auto-precharge mode globally?
+ *
+ * Use addtivie latency and compute AL to be 1 cycle less than
+ * tRCD, i.e. the READ or WRITE command is in the cycle
+ * immediately following the ACTIVATE command..
+ *
+ * III. Because you feel like it or want to do some sort of
+ * degraded-performance experiment.
+ * 1. Do you just want to use additive latency because you feel
+ * like it?
+ *
+ * Validation: AL is less than tRCD, and within the other
+ * read-to-precharge constraints.
+ */
+
+ additive_latency = 0;
+
+#if defined(CONFIG_FSL_DDR2)
+ if (lowest_good_caslat < 4) {
+ additive_latency = picos_to_mclk(tRCD_ps) - lowest_good_caslat;
+ if (mclk_to_picos(additive_latency) > tRCD_ps) {
+ additive_latency = picos_to_mclk(tRCD_ps);
+ debug("setting additive_latency to %u because it was "
+ " greater than tRCD_ps\n", additive_latency);
+ }
+ }
+
+#elif defined(CONFIG_FSL_DDR3)
+ /*
+ * The system will not use the global auto-precharge mode.
+ * However, it uses the page mode, so we set AL=0
+ */
+ additive_latency = 0;
+#endif
+
+ /*
+ * Validate additive latency
+ * FIXME: move to somewhere else to validate
+ *
+ * AL <= tRCD(min)
+ */
+ if (mclk_to_picos(additive_latency) > tRCD_ps) {
+ printf("Error: invalid additive latency exceeds tRCD(min).\n");
+ return 1;
+ }
+
+ /*
+ * RL = CL + AL; RL >= 3 for ODT_RD_CFG to be enabled
+ * WL = RL - 1; WL >= 3 for ODT_WL_CFG to be enabled
+ * ADD_LAT (the register) must be set to a value less
+ * than ACTTORW if WL = 1, then AL must be set to 1
+ * RD_TO_PRE (the register) must be set to a minimum
+ * tRTP + AL if AL is nonzero
+ */
+
+ /*
+ * Additive latency will be applied only if the memctl option to
+ * use it.
+ */
+ outpdimm->additive_latency = additive_latency;
+
+ return 0;
+}
diff --git a/arch/ppc/cpu/mpc8xxx/ddr/main.c b/arch/ppc/cpu/mpc8xxx/ddr/main.c
new file mode 100644
index 0000000..faa1af9
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/ddr/main.c
@@ -0,0 +1,479 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * Version 2 as published by the Free Software Foundation.
+ */
+
+/*
+ * Generic driver for Freescale DDR/DDR2/DDR3 memory controller.
+ * Based on code from spd_sdram.c
+ * Author: James Yang [at freescale.com]
+ */
+
+#include <common.h>
+#include <asm/fsl_ddr_sdram.h>
+
+#include "ddr.h"
+
+extern void fsl_ddr_set_lawbar(
+ const common_timing_params_t *memctl_common_params,
+ unsigned int memctl_interleaved,
+ unsigned int ctrl_num);
+
+/* processor specific function */
+extern void fsl_ddr_set_memctl_regs(const fsl_ddr_cfg_regs_t *regs,
+ unsigned int ctrl_num);
+
+/* Board-specific functions defined in each board's ddr.c */
+extern void fsl_ddr_get_spd(generic_spd_eeprom_t *ctrl_dimms_spd,
+ unsigned int ctrl_num);
+
+/*
+ * ASSUMPTIONS:
+ * - Same number of CONFIG_DIMM_SLOTS_PER_CTLR on each controller
+ * - Same memory data bus width on all controllers
+ *
+ * NOTES:
+ *
+ * The memory controller and associated documentation use confusing
+ * terminology when referring to the orgranization of DRAM.
+ *
+ * Here is a terminology translation table:
+ *
+ * memory controller/documention |industry |this code |signals
+ * -------------------------------|-----------|-----------|-----------------
+ * physical bank/bank |rank |rank |chip select (CS)
+ * logical bank/sub-bank |bank |bank |bank address (BA)
+ * page/row |row |page |row address
+ * ??? |column |column |column address
+ *
+ * The naming confusion is further exacerbated by the descriptions of the
+ * memory controller interleaving feature, where accesses are interleaved
+ * _BETWEEN_ two seperate memory controllers. This is configured only in
+ * CS0_CONFIG[INTLV_CTL] of each memory controller.
+ *
+ * memory controller documentation | number of chip selects
+ * | per memory controller supported
+ * --------------------------------|-----------------------------------------
+ * cache line interleaving | 1 (CS0 only)
+ * page interleaving | 1 (CS0 only)
+ * bank interleaving | 1 (CS0 only)
+ * superbank interleraving | depends on bank (chip select)
+ * | interleraving [rank interleaving]
+ * | mode used on every memory controller
+ *
+ * Even further confusing is the existence of the interleaving feature
+ * _WITHIN_ each memory controller. The feature is referred to in
+ * documentation as chip select interleaving or bank interleaving,
+ * although it is configured in the DDR_SDRAM_CFG field.
+ *
+ * Name of field | documentation name | this code
+ * -----------------------------|-----------------------|------------------
+ * DDR_SDRAM_CFG[BA_INTLV_CTL] | Bank (chip select) | rank interleaving
+ * | interleaving
+ */
+
+#ifdef DEBUG
+const char *step_string_tbl[] = {
+ "STEP_GET_SPD",
+ "STEP_COMPUTE_DIMM_PARMS",
+ "STEP_COMPUTE_COMMON_PARMS",
+ "STEP_GATHER_OPTS",
+ "STEP_ASSIGN_ADDRESSES",
+ "STEP_COMPUTE_REGS",
+ "STEP_PROGRAM_REGS",
+ "STEP_ALL"
+};
+
+const char * step_to_string(unsigned int step) {
+
+ unsigned int s = __ilog2(step);
+
+ if ((1 << s) != step)
+ return step_string_tbl[7];
+
+ return step_string_tbl[s];
+}
+#endif
+
+int step_assign_addresses(fsl_ddr_info_t *pinfo,
+ unsigned int dbw_cap_adj[],
+ unsigned int *memctl_interleaving,
+ unsigned int *rank_interleaving)
+{
+ int i, j;
+
+ /*
+ * If a reduced data width is requested, but the SPD
+ * specifies a physically wider device, adjust the
+ * computed dimm capacities accordingly before
+ * assigning addresses.
+ */
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ unsigned int found = 0;
+
+ switch (pinfo->memctl_opts[i].data_bus_width) {
+ case 2:
+ /* 16-bit */
+ printf("can't handle 16-bit mode yet\n");
+ break;
+
+ case 1:
+ /* 32-bit */
+ for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
+ unsigned int dw;
+ dw = pinfo->dimm_params[i][j].data_width;
+ if (pinfo->dimm_params[i][j].n_ranks
+ && (dw == 72 || dw == 64)) {
+ /*
+ * FIXME: can't really do it
+ * like this because this just
+ * further reduces the memory
+ */
+ found = 1;
+ break;
+ }
+ }
+ if (found) {
+ dbw_cap_adj[i] = 1;
+ }
+ break;
+
+ case 0:
+ /* 64-bit */
+ break;
+
+ default:
+ printf("unexpected data bus width "
+ "specified controller %u\n", i);
+ return 1;
+ }
+ }
+
+ /*
+ * Check if all controllers are configured for memory
+ * controller interleaving.
+ */
+ j = 0;
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ if (pinfo->memctl_opts[i].memctl_interleaving) {
+ j++;
+ }
+ }
+ if (j == 2)
+ *memctl_interleaving = 1;
+
+ /* Check that all controllers are rank interleaving. */
+ j = 0;
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ if (pinfo->memctl_opts[i].ba_intlv_ctl) {
+ j++;
+ }
+ }
+ if (j == 2)
+ *rank_interleaving = 1;
+
+ if (*memctl_interleaving) {
+ unsigned long long addr, total_mem_per_ctlr = 0;
+ /*
+ * If interleaving between memory controllers,
+ * make each controller start at a base address
+ * of 0.
+ *
+ * Also, if bank interleaving (chip select
+ * interleaving) is enabled on each memory
+ * controller, CS0 needs to be programmed to
+ * cover the entire memory range on that memory
+ * controller
+ *
+ * Bank interleaving also implies that each
+ * addressed chip select is identical in size.
+ */
+
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ addr = 0;
+ pinfo->common_timing_params[i].base_address = 0ull;
+ for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
+ unsigned long long cap
+ = pinfo->dimm_params[i][j].capacity;
+
+ pinfo->dimm_params[i][j].base_address = addr;
+ addr += cap >> dbw_cap_adj[i];
+ total_mem_per_ctlr += cap >> dbw_cap_adj[i];
+ }
+ }
+ pinfo->common_timing_params[0].total_mem = total_mem_per_ctlr;
+ } else {
+ /*
+ * Simple linear assignment if memory
+ * controllers are not interleaved.
+ */
+ unsigned long long cur_memsize = 0;
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ u64 total_mem_per_ctlr = 0;
+ pinfo->common_timing_params[i].base_address =
+ cur_memsize;
+ for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
+ /* Compute DIMM base addresses. */
+ unsigned long long cap =
+ pinfo->dimm_params[i][j].capacity;
+ pinfo->dimm_params[i][j].base_address =
+ cur_memsize;
+ cur_memsize += cap >> dbw_cap_adj[i];
+ total_mem_per_ctlr += cap >> dbw_cap_adj[i];
+ }
+ pinfo->common_timing_params[i].total_mem =
+ total_mem_per_ctlr;
+ }
+ }
+
+ return 0;
+}
+
+unsigned long long
+fsl_ddr_compute(fsl_ddr_info_t *pinfo, unsigned int start_step)
+{
+ unsigned int i, j;
+ unsigned int all_controllers_memctl_interleaving = 0;
+ unsigned int all_controllers_rank_interleaving = 0;
+ unsigned long long total_mem = 0;
+
+ fsl_ddr_cfg_regs_t *ddr_reg = pinfo->fsl_ddr_config_reg;
+ common_timing_params_t *timing_params = pinfo->common_timing_params;
+
+ /* data bus width capacity adjust shift amount */
+ unsigned int dbw_capacity_adjust[CONFIG_NUM_DDR_CONTROLLERS];
+
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ dbw_capacity_adjust[i] = 0;
+ }
+
+ debug("starting at step %u (%s)\n",
+ start_step, step_to_string(start_step));
+
+ switch (start_step) {
+ case STEP_GET_SPD:
+ /* STEP 1: Gather all DIMM SPD data */
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ fsl_ddr_get_spd(pinfo->spd_installed_dimms[i], i);
+ }
+
+ case STEP_COMPUTE_DIMM_PARMS:
+ /* STEP 2: Compute DIMM parameters from SPD data */
+
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ for (j = 0; j < CONFIG_DIMM_SLOTS_PER_CTLR; j++) {
+ unsigned int retval;
+ generic_spd_eeprom_t *spd =
+ &(pinfo->spd_installed_dimms[i][j]);
+ dimm_params_t *pdimm =
+ &(pinfo->dimm_params[i][j]);
+
+ retval = compute_dimm_parameters(spd, pdimm, i);
+ if (retval == 2) {
+ printf("Error: compute_dimm_parameters"
+ " non-zero returned FATAL value "
+ "for memctl=%u dimm=%u\n", i, j);
+ return 0;
+ }
+ if (retval) {
+ debug("Warning: compute_dimm_parameters"
+ " non-zero return value for memctl=%u "
+ "dimm=%u\n", i, j);
+ }
+ }
+ }
+
+ case STEP_COMPUTE_COMMON_PARMS:
+ /*
+ * STEP 3: Compute a common set of timing parameters
+ * suitable for all of the DIMMs on each memory controller
+ */
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ debug("Computing lowest common DIMM"
+ " parameters for memctl=%u\n", i);
+ compute_lowest_common_dimm_parameters(
+ pinfo->dimm_params[i],
+ &timing_params[i],
+ CONFIG_DIMM_SLOTS_PER_CTLR);
+ }
+
+ case STEP_GATHER_OPTS:
+ /* STEP 4: Gather configuration requirements from user */
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ debug("Reloading memory controller "
+ "configuration options for memctl=%u\n", i);
+ /*
+ * This "reloads" the memory controller options
+ * to defaults. If the user "edits" an option,
+ * next_step points to the step after this,
+ * which is currently STEP_ASSIGN_ADDRESSES.
+ */
+ populate_memctl_options(
+ timing_params[i].all_DIMMs_registered,
+ &pinfo->memctl_opts[i],
+ pinfo->dimm_params[i], i);
+ }
+
+ case STEP_ASSIGN_ADDRESSES:
+ /* STEP 5: Assign addresses to chip selects */
+ step_assign_addresses(pinfo,
+ dbw_capacity_adjust,
+ &all_controllers_memctl_interleaving,
+ &all_controllers_rank_interleaving);
+
+ case STEP_COMPUTE_REGS:
+ /* STEP 6: compute controller register values */
+ debug("FSL Memory ctrl cg register computation\n");
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ if (timing_params[i].ndimms_present == 0) {
+ memset(&ddr_reg[i], 0,
+ sizeof(fsl_ddr_cfg_regs_t));
+ continue;
+ }
+
+ compute_fsl_memctl_config_regs(
+ &pinfo->memctl_opts[i],
+ &ddr_reg[i], &timing_params[i],
+ pinfo->dimm_params[i],
+ dbw_capacity_adjust[i]);
+ }
+
+ default:
+ break;
+ }
+
+ /* Compute the total amount of memory. */
+
+ /*
+ * If bank interleaving but NOT memory controller interleaving
+ * CS_BNDS describe the quantity of memory on each memory
+ * controller, so the total is the sum across.
+ */
+ if (!all_controllers_memctl_interleaving
+ && all_controllers_rank_interleaving) {
+ total_mem = 0;
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ total_mem += timing_params[i].total_mem;
+ }
+
+ } else {
+ /*
+ * Compute the amount of memory available just by
+ * looking for the highest valid CSn_BNDS value.
+ * This allows us to also experiment with using
+ * only CS0 when using dual-rank DIMMs.
+ */
+ unsigned int max_end = 0;
+
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ for (j = 0; j < CONFIG_CHIP_SELECTS_PER_CTRL; j++) {
+ fsl_ddr_cfg_regs_t *reg = &ddr_reg[i];
+ if (reg->cs[j].config & 0x80000000) {
+ unsigned int end;
+ end = reg->cs[j].bnds & 0xFFF;
+ if (end > max_end) {
+ max_end = end;
+ }
+ }
+ }
+ }
+
+ total_mem = 1 + (((unsigned long long)max_end << 24ULL)
+ | 0xFFFFFFULL);
+ }
+
+ return total_mem;
+}
+
+/*
+ * fsl_ddr_sdram() -- this is the main function to be called by
+ * initdram() in the board file.
+ *
+ * It returns amount of memory configured in bytes.
+ */
+phys_size_t fsl_ddr_sdram(void)
+{
+ unsigned int i;
+ unsigned int memctl_interleaved;
+ unsigned long long total_memory;
+ fsl_ddr_info_t info;
+
+ /* Reset info structure. */
+ memset(&info, 0, sizeof(fsl_ddr_info_t));
+
+ /* Compute it once normally. */
+ total_memory = fsl_ddr_compute(&info, STEP_GET_SPD);
+
+ /* Check for memory controller interleaving. */
+ memctl_interleaved = 0;
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ memctl_interleaved +=
+ info.memctl_opts[i].memctl_interleaving;
+ }
+
+ if (memctl_interleaved) {
+ if (memctl_interleaved == CONFIG_NUM_DDR_CONTROLLERS) {
+ debug("memctl interleaving\n");
+ /*
+ * Change the meaning of memctl_interleaved
+ * to be "boolean".
+ */
+ memctl_interleaved = 1;
+ } else {
+ printf("Warning: memctl interleaving not "
+ "properly configured on all controllers\n");
+ memctl_interleaved = 0;
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++)
+ info.memctl_opts[i].memctl_interleaving = 0;
+ debug("Recomputing with memctl_interleaving off.\n");
+ total_memory = fsl_ddr_compute(&info,
+ STEP_ASSIGN_ADDRESSES);
+ }
+ }
+
+ /* Program configuration registers. */
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ debug("Programming controller %u\n", i);
+ if (info.common_timing_params[i].ndimms_present == 0) {
+ debug("No dimms present on controller %u; "
+ "skipping programming\n", i);
+ continue;
+ }
+
+ fsl_ddr_set_memctl_regs(&(info.fsl_ddr_config_reg[i]), i);
+ }
+
+ if (memctl_interleaved) {
+ const unsigned int ctrl_num = 0;
+
+ /* Only set LAWBAR1 if memory controller interleaving is on. */
+ fsl_ddr_set_lawbar(&info.common_timing_params[0],
+ memctl_interleaved, ctrl_num);
+ } else {
+ /*
+ * Memory controller interleaving is NOT on;
+ * set each lawbar individually.
+ */
+ for (i = 0; i < CONFIG_NUM_DDR_CONTROLLERS; i++) {
+ fsl_ddr_set_lawbar(&info.common_timing_params[i],
+ 0, i);
+ }
+ }
+
+ debug("total_memory = %llu\n", total_memory);
+
+#if !defined(CONFIG_PHYS_64BIT)
+ /* Check for 4G or more. Bad. */
+ if (total_memory >= (1ull << 32)) {
+ printf("Detected %lld MB of memory\n", total_memory >> 20);
+ printf("This U-Boot only supports < 4G of DDR\n");
+ printf("You could rebuild it with CONFIG_PHYS_64BIT\n");
+ total_memory = CONFIG_MAX_MEM_MAPPED;
+ }
+#endif
+
+ return total_memory;
+}
diff --git a/arch/ppc/cpu/mpc8xxx/ddr/options.c b/arch/ppc/cpu/mpc8xxx/ddr/options.c
new file mode 100644
index 0000000..46731c8
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/ddr/options.c
@@ -0,0 +1,297 @@
+/*
+ * Copyright 2008, 2010 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <common.h>
+#include <asm/fsl_ddr_sdram.h>
+
+#include "ddr.h"
+
+/* Board-specific functions defined in each board's ddr.c */
+extern void fsl_ddr_board_options(memctl_options_t *popts,
+ dimm_params_t *pdimm,
+ unsigned int ctrl_num);
+
+unsigned int populate_memctl_options(int all_DIMMs_registered,
+ memctl_options_t *popts,
+ dimm_params_t *pdimm,
+ unsigned int ctrl_num)
+{
+ unsigned int i;
+ const char *p;
+
+ /* Chip select options. */
+
+ /* Pick chip-select local options. */
+ for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
+ /* If not DDR2, odt_rd_cfg and odt_wr_cfg need to be 0. */
+
+ /* only for single CS? */
+ popts->cs_local_opts[i].odt_rd_cfg = 0;
+
+ popts->cs_local_opts[i].odt_wr_cfg = 1;
+ popts->cs_local_opts[i].auto_precharge = 0;
+ }
+
+ /* Pick interleaving mode. */
+
+ /*
+ * 0 = no interleaving
+ * 1 = interleaving between 2 controllers
+ */
+ popts->memctl_interleaving = 0;
+
+ /*
+ * 0 = cacheline
+ * 1 = page
+ * 2 = (logical) bank
+ * 3 = superbank (only if CS interleaving is enabled)
+ */
+ popts->memctl_interleaving_mode = 0;
+
+ /*
+ * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
+ * 1: page: bit to the left of the column bits selects the memctl
+ * 2: bank: bit to the left of the bank bits selects the memctl
+ * 3: superbank: bit to the left of the chip select selects the memctl
+ *
+ * NOTE: ba_intlv (rank interleaving) is independent of memory
+ * controller interleaving; it is only within a memory controller.
+ * Must use superbank interleaving if rank interleaving is used and
+ * memory controller interleaving is enabled.
+ */
+
+ /*
+ * 0 = no
+ * 0x40 = CS0,CS1
+ * 0x20 = CS2,CS3
+ * 0x60 = CS0,CS1 + CS2,CS3
+ * 0x04 = CS0,CS1,CS2,CS3
+ */
+ popts->ba_intlv_ctl = 0;
+
+ /* Memory Organization Parameters */
+ popts->registered_dimm_en = all_DIMMs_registered;
+
+ /* Operational Mode Paramters */
+
+ /* Pick ECC modes */
+#ifdef CONFIG_DDR_ECC
+ popts->ECC_mode = 1; /* 0 = disabled, 1 = enabled */
+#else
+ popts->ECC_mode = 0; /* 0 = disabled, 1 = enabled */
+#endif
+ popts->ECC_init_using_memctl = 1; /* 0 = use DMA, 1 = use memctl */
+
+ /*
+ * Choose DQS config
+ * 0 for DDR1
+ * 1 for DDR2
+ */
+#if defined(CONFIG_FSL_DDR1)
+ popts->DQS_config = 0;
+#elif defined(CONFIG_FSL_DDR2) || defined(CONFIG_FSL_DDR3)
+ popts->DQS_config = 1;
+#endif
+
+ /* Choose self-refresh during sleep. */
+ popts->self_refresh_in_sleep = 1;
+
+ /* Choose dynamic power management mode. */
+ popts->dynamic_power = 0;
+
+ /* 0 = 64-bit, 1 = 32-bit, 2 = 16-bit */
+ popts->data_bus_width = 0;
+
+ /* Choose burst length. */
+#if defined(CONFIG_FSL_DDR3)
+#if defined(CONFIG_E500MC)
+ popts->OTF_burst_chop_en = 0; /* on-the-fly burst chop disable */
+ popts->burst_length = DDR_BL8; /* Fixed 8-beat burst len */
+#else
+ popts->OTF_burst_chop_en = 1; /* on-the-fly burst chop */
+ popts->burst_length = DDR_OTF; /* on-the-fly BC4 and BL8 */
+#endif
+#else
+ popts->burst_length = DDR_BL4; /* has to be 4 for DDR2 */
+#endif
+
+ /* Choose ddr controller address mirror mode */
+#if defined(CONFIG_FSL_DDR3)
+ popts->mirrored_dimm = pdimm[0].mirrored_dimm;
+#endif
+
+ /* Global Timing Parameters. */
+ debug("mclk_ps = %u ps\n", get_memory_clk_period_ps());
+
+ /* Pick a caslat override. */
+ popts->cas_latency_override = 0;
+ popts->cas_latency_override_value = 3;
+ if (popts->cas_latency_override) {
+ debug("using caslat override value = %u\n",
+ popts->cas_latency_override_value);
+ }
+
+ /* Decide whether to use the computed derated latency */
+ popts->use_derated_caslat = 0;
+
+ /* Choose an additive latency. */
+ popts->additive_latency_override = 0;
+ popts->additive_latency_override_value = 3;
+ if (popts->additive_latency_override) {
+ debug("using additive latency override value = %u\n",
+ popts->additive_latency_override_value);
+ }
+
+ /*
+ * 2T_EN setting
+ *
+ * Factors to consider for 2T_EN:
+ * - number of DIMMs installed
+ * - number of components, number of active ranks
+ * - how much time you want to spend playing around
+ */
+ popts->twoT_en = 0;
+ popts->threeT_en = 0;
+
+ /*
+ * BSTTOPRE precharge interval
+ *
+ * Set this to 0 for global auto precharge
+ *
+ * FIXME: Should this be configured in picoseconds?
+ * Why it should be in ps: better understanding of this
+ * relative to actual DRAM timing parameters such as tRAS.
+ * e.g. tRAS(min) = 40 ns
+ */
+ popts->bstopre = 0x100;
+
+ /* Minimum CKE pulse width -- tCKE(MIN) */
+ popts->tCKE_clock_pulse_width_ps
+ = mclk_to_picos(FSL_DDR_MIN_TCKE_PULSE_WIDTH_DDR);
+
+ /*
+ * Window for four activates -- tFAW
+ *
+ * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
+ * FIXME: varies depending upon number of column addresses or data
+ * FIXME: width, was considering looking at pdimm->primary_sdram_width
+ */
+#if defined(CONFIG_FSL_DDR1)
+ popts->tFAW_window_four_activates_ps = mclk_to_picos(1);
+
+#elif defined(CONFIG_FSL_DDR2)
+ /*
+ * x4/x8; some datasheets have 35000
+ * x16 wide columns only? Use 50000?
+ */
+ popts->tFAW_window_four_activates_ps = 37500;
+
+#elif defined(CONFIG_FSL_DDR3)
+ popts->tFAW_window_four_activates_ps = pdimm[0].tFAW_ps;
+#endif
+ popts->zq_en = 0;
+ popts->wrlvl_en = 0;
+#if defined(CONFIG_FSL_DDR3)
+ /*
+ * due to ddr3 dimm is fly-by topology
+ * we suggest to enable write leveling to
+ * meet the tQDSS under different loading.
+ */
+ popts->wrlvl_en = 1;
+ popts->wrlvl_override = 0;
+#endif
+
+ /*
+ * Check interleaving configuration from environment.
+ * Please refer to doc/README.fsl-ddr for the detail.
+ *
+ * If memory controller interleaving is enabled, then the data
+ * bus widths must be programmed identically for the 2 memory
+ * controllers.
+ *
+ * XXX: Attempt to set both controllers to the same chip select
+ * interleaving mode. It will do a best effort to get the
+ * requested ranks interleaved together such that the result
+ * should be a subset of the requested configuration.
+ */
+#if (CONFIG_NUM_DDR_CONTROLLERS > 1)
+ if ((p = getenv("memctl_intlv_ctl")) != NULL) {
+ if (pdimm[0].n_ranks == 0) {
+ printf("There is no rank on CS0. Because only rank on "
+ "CS0 and ranks chip-select interleaved with CS0"
+ " are controller interleaved, force non memory "
+ "controller interleaving\n");
+ popts->memctl_interleaving = 0;
+ } else {
+ popts->memctl_interleaving = 1;
+ if (strcmp(p, "cacheline") == 0)
+ popts->memctl_interleaving_mode =
+ FSL_DDR_CACHE_LINE_INTERLEAVING;
+ else if (strcmp(p, "page") == 0)
+ popts->memctl_interleaving_mode =
+ FSL_DDR_PAGE_INTERLEAVING;
+ else if (strcmp(p, "bank") == 0)
+ popts->memctl_interleaving_mode =
+ FSL_DDR_BANK_INTERLEAVING;
+ else if (strcmp(p, "superbank") == 0)
+ popts->memctl_interleaving_mode =
+ FSL_DDR_SUPERBANK_INTERLEAVING;
+ else
+ popts->memctl_interleaving_mode =
+ simple_strtoul(p, NULL, 0);
+ }
+ }
+#endif
+
+ if( ((p = getenv("ba_intlv_ctl")) != NULL) &&
+ (CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
+ if (strcmp(p, "cs0_cs1") == 0)
+ popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
+ else if (strcmp(p, "cs2_cs3") == 0)
+ popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
+ else if (strcmp(p, "cs0_cs1_and_cs2_cs3") == 0)
+ popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
+ else if (strcmp(p, "cs0_cs1_cs2_cs3") == 0)
+ popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
+ else
+ popts->ba_intlv_ctl = simple_strtoul(p, NULL, 0);
+
+ switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
+ case FSL_DDR_CS0_CS1_CS2_CS3:
+ case FSL_DDR_CS0_CS1:
+ if (pdimm[0].n_ranks != 2) {
+ popts->ba_intlv_ctl = 0;
+ printf("Not enough bank(chip-select) for "
+ "CS0+CS1, force non-interleaving!\n");
+ }
+ break;
+ case FSL_DDR_CS2_CS3:
+ if (pdimm[1].n_ranks !=2){
+ popts->ba_intlv_ctl = 0;
+ printf("Not enough bank(CS) for CS2+CS3, "
+ "force non-interleaving!\n");
+ }
+ break;
+ case FSL_DDR_CS0_CS1_AND_CS2_CS3:
+ if ((pdimm[0].n_ranks != 2)||(pdimm[1].n_ranks != 2)) {
+ popts->ba_intlv_ctl = 0;
+ printf("Not enough bank(CS) for CS0+CS1 or "
+ "CS2+CS3, force non-interleaving!\n");
+ }
+ break;
+ default:
+ popts->ba_intlv_ctl = 0;
+ break;
+ }
+ }
+
+ fsl_ddr_board_options(popts, pdimm, ctrl_num);
+
+ return 0;
+}
diff --git a/arch/ppc/cpu/mpc8xxx/ddr/util.c b/arch/ppc/cpu/mpc8xxx/ddr/util.c
new file mode 100644
index 0000000..1e2d921
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/ddr/util.c
@@ -0,0 +1,206 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * Version 2 as published by the Free Software Foundation.
+ */
+
+#include <common.h>
+#include <asm/fsl_law.h>
+
+#include "ddr.h"
+
+unsigned int fsl_ddr_get_mem_data_rate(void);
+
+/*
+ * Round mclk_ps to nearest 10 ps in memory controller code.
+ *
+ * If an imprecise data rate is too high due to rounding error
+ * propagation, compute a suitably rounded mclk_ps to compute
+ * a working memory controller configuration.
+ */
+unsigned int get_memory_clk_period_ps(void)
+{
+ unsigned int mclk_ps;
+
+ mclk_ps = 2000000000000ULL / fsl_ddr_get_mem_data_rate();
+ /* round to nearest 10 ps */
+ return 10 * ((mclk_ps + 5) / 10);
+}
+
+/* Convert picoseconds into DRAM clock cycles (rounding up if needed). */
+unsigned int picos_to_mclk(unsigned int picos)
+{
+ const unsigned long long ULL_2e12 = 2000000000000ULL;
+ const unsigned long long ULL_8Fs = 0xFFFFFFFFULL;
+ unsigned long long clks;
+ unsigned long long clks_temp;
+
+ if (!picos)
+ return 0;
+
+ clks = fsl_ddr_get_mem_data_rate() * (unsigned long long) picos;
+ clks_temp = clks;
+ clks = clks / ULL_2e12;
+ if (clks_temp % ULL_2e12) {
+ clks++;
+ }
+
+ if (clks > ULL_8Fs) {
+ clks = ULL_8Fs;
+ }
+
+ return (unsigned int) clks;
+}
+
+unsigned int mclk_to_picos(unsigned int mclk)
+{
+ return get_memory_clk_period_ps() * mclk;
+}
+
+void
+__fsl_ddr_set_lawbar(const common_timing_params_t *memctl_common_params,
+ unsigned int memctl_interleaved,
+ unsigned int ctrl_num)
+{
+ unsigned long long base = memctl_common_params->base_address;
+ unsigned long long size = memctl_common_params->total_mem;
+
+ /*
+ * If no DIMMs on this controller, do not proceed any further.
+ */
+ if (!memctl_common_params->ndimms_present) {
+ return;
+ }
+
+#if !defined(CONFIG_PHYS_64BIT)
+ if (base >= CONFIG_MAX_MEM_MAPPED)
+ return;
+ if ((base + size) >= CONFIG_MAX_MEM_MAPPED)
+ size = CONFIG_MAX_MEM_MAPPED - base;
+#endif
+
+ if (ctrl_num == 0) {
+ /*
+ * Set up LAW for DDR controller 1 space.
+ */
+ unsigned int lawbar1_target_id = memctl_interleaved
+ ? LAW_TRGT_IF_DDR_INTRLV : LAW_TRGT_IF_DDR_1;
+
+ if (set_ddr_laws(base, size, lawbar1_target_id) < 0) {
+ printf("%s: ERROR (ctrl #0, intrlv=%d)\n", __func__,
+ memctl_interleaved);
+ return ;
+ }
+ } else if (ctrl_num == 1) {
+ if (set_ddr_laws(base, size, LAW_TRGT_IF_DDR_2) < 0) {
+ printf("%s: ERROR (ctrl #1)\n", __func__);
+ return ;
+ }
+ } else {
+ printf("%s: unexpected DDR controller number (%u)\n", __func__,
+ ctrl_num);
+ }
+}
+
+__attribute__((weak, alias("__fsl_ddr_set_lawbar"))) void
+fsl_ddr_set_lawbar(const common_timing_params_t *memctl_common_params,
+ unsigned int memctl_interleaved,
+ unsigned int ctrl_num);
+
+void board_add_ram_info(int use_default)
+{
+#if defined(CONFIG_MPC85xx)
+ volatile ccsr_ddr_t *ddr = (void *)(CONFIG_SYS_MPC85xx_DDR_ADDR);
+#elif defined(CONFIG_MPC86xx)
+ volatile ccsr_ddr_t *ddr = (void *)(CONFIG_SYS_MPC86xx_DDR_ADDR);
+#endif
+#if (CONFIG_NUM_DDR_CONTROLLERS > 1)
+ uint32_t cs0_config = in_be32(&ddr->cs0_config);
+#endif
+ uint32_t sdram_cfg = in_be32(&ddr->sdram_cfg);
+ int cas_lat;
+
+ puts(" (DDR");
+ switch ((sdram_cfg & SDRAM_CFG_SDRAM_TYPE_MASK) >>
+ SDRAM_CFG_SDRAM_TYPE_SHIFT) {
+ case SDRAM_TYPE_DDR1:
+ puts("1");
+ break;
+ case SDRAM_TYPE_DDR2:
+ puts("2");
+ break;
+ case SDRAM_TYPE_DDR3:
+ puts("3");
+ break;
+ default:
+ puts("?");
+ break;
+ }
+
+ if (sdram_cfg & SDRAM_CFG_32_BE)
+ puts(", 32-bit");
+ else
+ puts(", 64-bit");
+
+ /* Calculate CAS latency based on timing cfg values */
+ cas_lat = ((in_be32(&ddr->timing_cfg_1) >> 16) & 0xf) + 1;
+ if ((in_be32(&ddr->timing_cfg_3) >> 12) & 1)
+ cas_lat += (8 << 1);
+ printf(", CL=%d", cas_lat >> 1);
+ if (cas_lat & 0x1)
+ puts(".5");
+
+ if (sdram_cfg & SDRAM_CFG_ECC_EN)
+ puts(", ECC on)");
+ else
+ puts(", ECC off)");
+
+#if (CONFIG_NUM_DDR_CONTROLLERS > 1)
+ if (cs0_config & 0x20000000) {
+ puts("\n");
+ puts(" DDR Controller Interleaving Mode: ");
+
+ switch ((cs0_config >> 24) & 0xf) {
+ case FSL_DDR_CACHE_LINE_INTERLEAVING:
+ puts("cache line");
+ break;
+ case FSL_DDR_PAGE_INTERLEAVING:
+ puts("page");
+ break;
+ case FSL_DDR_BANK_INTERLEAVING:
+ puts("bank");
+ break;
+ case FSL_DDR_SUPERBANK_INTERLEAVING:
+ puts("super-bank");
+ break;
+ default:
+ puts("invalid");
+ break;
+ }
+ }
+#endif
+
+ if ((sdram_cfg >> 8) & 0x7f) {
+ puts("\n");
+ puts(" DDR Chip-Select Interleaving Mode: ");
+ switch(sdram_cfg >> 8 & 0x7f) {
+ case FSL_DDR_CS0_CS1_CS2_CS3:
+ puts("CS0+CS1+CS2+CS3");
+ break;
+ case FSL_DDR_CS0_CS1:
+ puts("CS0+CS1");
+ break;
+ case FSL_DDR_CS2_CS3:
+ puts("CS2+CS3");
+ break;
+ case FSL_DDR_CS0_CS1_AND_CS2_CS3:
+ puts("CS0+CS1 and CS2+CS3");
+ break;
+ default:
+ puts("invalid");
+ break;
+ }
+ }
+}
diff --git a/arch/ppc/cpu/mpc8xxx/fdt.c b/arch/ppc/cpu/mpc8xxx/fdt.c
new file mode 100644
index 0000000..e68d6f3
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/fdt.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2009 Freescale Semiconductor, Inc.
+ *
+ * This file is derived from arch/ppc/cpu/mpc85xx/cpu.c and
+ * arch/ppc/cpu/mpc86xx/cpu.c. Basically this file contains
+ * cpu specific common code for 85xx/86xx processors.
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <libfdt.h>
+#include <fdt_support.h>
+
+void ft_fixup_num_cores(void *blob) {
+ int off, num_cores, del_cores;
+
+ del_cores = 0;
+ num_cores = cpu_numcores();
+
+ off = fdt_node_offset_by_prop_value(blob, -1, "device_type", "cpu", 4);
+ while (off != -FDT_ERR_NOTFOUND) {
+ u32 *reg = (u32 *)fdt_getprop(blob, off, "reg", 0);
+
+ /* if we find a cpu node outside of what we expect delete it
+ * and reset the offset back to the start since we can't
+ * trust the offsets anymore
+ */
+ if (*reg > num_cores-1) {
+ fdt_del_node(blob, off);
+ del_cores++;
+ off = -1;
+ }
+ off = fdt_node_offset_by_prop_value(blob, off,
+ "device_type", "cpu", 4);
+ }
+ debug ("%x core system found\n", num_cores);
+ debug ("deleted %d extra core entry entries from device tree\n",
+ del_cores);
+}
diff --git a/arch/ppc/cpu/mpc8xxx/pci_cfg.c b/arch/ppc/cpu/mpc8xxx/pci_cfg.c
new file mode 100644
index 0000000..9b7181d
--- /dev/null
+++ b/arch/ppc/cpu/mpc8xxx/pci_cfg.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright 2009-2010 Freescale Semiconductor, Inc.
+ *
+ * See file CREDITS for list of people who contributed to this
+ * project.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <common.h>
+#include <asm/fsl_law.h>
+#include <pci.h>
+
+struct pci_info {
+ u32 cfg;
+};
+
+/* The cfg field is a bit mask in which each bit represents the value of
+ * cfg_IO_ports[] signal and the bit is set if the interface would be
+ * enabled based on the value of cfg_IO_ports[] signal
+ *
+ * On MPC86xx/PQ3 based systems:
+ * we extract cfg_IO_ports from GUTS register PORDEVSR
+ *
+ * cfg_IO_ports only exist on systems w/PCIe (we set cfg 0 for systems
+ * without PCIe)
+ */
+
+#if defined(CONFIG_MPC8540) || defined(CONFIG_MPC8560)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCI] = {
+ .cfg = 0,
+ },
+};
+#elif defined(CONFIG_MPC8541) || defined(CONFIG_MPC8555)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCI] = {
+ .cfg = 0,
+ },
+};
+#elif defined(CONFIG_MPC8536)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCI] = {
+ .cfg = 0,
+ },
+ [LAW_TRGT_IF_PCIE_1] = {
+ .cfg = (1 << 2) | (1 << 3) | (1 << 5) | (1 << 7),
+ },
+ [LAW_TRGT_IF_PCIE_2] = {
+ .cfg = (1 << 5) | (1 << 7),
+ },
+ [LAW_TRGT_IF_PCIE_3] = {
+ .cfg = (1 << 7),
+ },
+};
+#elif defined(CONFIG_MPC8544)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCI] = {
+ .cfg = 0,
+ },
+ [LAW_TRGT_IF_PCIE_1] = {
+ .cfg = (1 << 2) | (1 << 3) | (1 << 4) | (1 << 5) |
+ (1 << 6) | (1 << 7),
+ },
+ [LAW_TRGT_IF_PCIE_2] = {
+ .cfg = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7),
+ },
+ [LAW_TRGT_IF_PCIE_3] = {
+ .cfg = (1 << 6) | (1 << 7),
+ },
+};
+#elif defined(CONFIG_MPC8548)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCI_1] = {
+ .cfg = 0,
+ },
+ [LAW_TRGT_IF_PCI_2] = {
+ .cfg = 0,
+ },
+ /* PCI_2 is always host and we dont use iosel to determine enable/disable */
+ [LAW_TRGT_IF_PCIE_1] = {
+ .cfg = (1 << 3) | (1 << 4) | (1 << 7),
+ },
+};
+#elif defined(CONFIG_MPC8568)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCI] = {
+ .cfg = 0,
+ },
+ [LAW_TRGT_IF_PCIE_1] = {
+ .cfg = (1 << 3) | (1 << 4) | (1 << 7),
+ },
+};
+#elif defined(CONFIG_MPC8569)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCIE_1] = {
+ .cfg = (1 << 0) | (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) |
+ (1 << 8) | (1 << 0xc) | (1 << 0xf),
+ },
+};
+#elif defined(CONFIG_MPC8572)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCIE_1] = {
+ .cfg = (1 << 2) | (1 << 3) | (1 << 7) |
+ (1 << 0xb) | (1 << 0xc) | (1 << 0xf),
+ },
+ [LAW_TRGT_IF_PCIE_2] = {
+ .cfg = (1 << 3) | (1 << 7),
+ },
+ [LAW_TRGT_IF_PCIE_3] = {
+ .cfg = (1 << 7),
+ },
+};
+#elif defined(CONFIG_MPC8610)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCI_1] = {
+ .cfg = 0,
+ },
+ [LAW_TRGT_IF_PCIE_1] = {
+ .cfg = (1 << 1) | (1 << 4),
+ },
+ [LAW_TRGT_IF_PCIE_2] = {
+ .cfg = (1 << 0) | (1 << 4),
+ },
+};
+#elif defined(CONFIG_MPC8641)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCIE_1] = {
+ .cfg = (1 << 2) | (1 << 3) | (1 << 5) | (1 << 6) |
+ (1 << 7) | (1 << 0xe) | (1 << 0xf),
+ },
+};
+#elif defined(CONFIG_P1011) || defined(CONFIG_P1020) || \
+ defined(CONFIG_P1012) || defined(CONFIG_P1021)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCIE_1] = {
+ .cfg = (1 << 0) | (1 << 6) | (1 << 0xe) | (1 << 0xf),
+ },
+ [LAW_TRGT_IF_PCIE_2] = {
+ .cfg = (1 << 0xe),
+ },
+};
+#elif defined(CONFIG_P1013) || defined(CONFIG_P1022)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCIE_1] = {
+ .cfg = (1 << 6) | (1 << 7) | (1 << 9) | (1 << 0xa) |
+ (1 << 0xb) | (1 << 0xd) | (1 << 0xe) |
+ (1 << 0xf) | (1 << 0x15) | (1 << 0x16) |
+ (1 << 0x17) | (1 << 0x18) | (1 << 0x19) |
+ (1 << 0x1a) | (1 << 0x1b) | (1 << 0x1c) |
+ (1 << 0x1d) | (1 << 0x1e) | (1 << 0x1f),
+ },
+ [LAW_TRGT_IF_PCIE_2] = {
+ .cfg = (1 << 0) | (1 << 1) | (1 << 6) | (1 << 7) |
+ (1 << 9) | (1 << 0xa) | (1 << 0xb) | (1 << 0xd) |
+ (1 << 0x15) | (1 << 0x16) | (1 << 0x17) |
+ (1 << 0x18) | (1 << 0x1c),
+ },
+ [LAW_TRGT_IF_PCIE_3] = {
+ .cfg = (1 << 6) | (1 << 7) | (1 << 9) | (1 << 0xd) |
+ (1 << 0x15) | (1 << 0x16) | (1 << 0x17) | (1 << 0x18) |
+ (1 << 0x19) | (1 << 0x1a) | (1 << 0x1b),
+ },
+};
+#elif defined(CONFIG_P2010) || defined(CONFIG_P2020)
+static struct pci_info pci_config_info[] =
+{
+ [LAW_TRGT_IF_PCIE_1] = {
+ .cfg = (1 << 0) | (1 << 2) | (1 << 4) | (1 << 6) |
+ (1 << 0xd) | (1 << 0xe) | (1 << 0xf),
+ },
+ [LAW_TRGT_IF_PCIE_2] = {
+ .cfg = (1 << 2) | (1 << 0xe),
+ },
+ [LAW_TRGT_IF_PCIE_3] = {
+ .cfg = (1 << 2) | (1 << 4),
+ },
+};
+#elif defined(CONFIG_FSL_CORENET)
+#else
+#error Need to define pci_config_info for processor
+#endif
+
+#ifndef CONFIG_FSL_CORENET
+int is_fsl_pci_cfg(enum law_trgt_if trgt, u32 io_sel)
+{
+ return ((1 << io_sel) & pci_config_info[trgt].cfg);
+}
+#endif