summaryrefslogtreecommitdiff
path: root/arch/arm/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/cpu')
-rw-r--r--arch/arm/cpu/arm720t/tegra-common/spl.c2
-rw-r--r--arch/arm/cpu/arm926ejs/davinci/da850_lowlevel.c2
-rw-r--r--arch/arm/cpu/arm926ejs/davinci/dm365_lowlevel.c1
-rw-r--r--arch/arm/cpu/armv7/am33xx/ddr.c1
-rw-r--r--arch/arm/cpu/armv7/at91/cpu.c2
-rw-r--r--arch/arm/cpu/armv7/exynos/dmc_common.c2
-rw-r--r--arch/arm/cpu/armv7/exynos/dmc_init_ddr3.c369
-rw-r--r--arch/arm/cpu/armv7/exynos/exynos5_setup.h21
-rw-r--r--arch/arm/cpu/armv7/exynos/lowlevel_init.c2
-rw-r--r--arch/arm/cpu/armv7/keystone/Makefile1
-rw-r--r--arch/arm/cpu/armv7/keystone/aemif.c71
-rw-r--r--arch/arm/cpu/armv7/mx6/Makefile1
-rw-r--r--arch/arm/cpu/armv7/mx6/ddr.c490
-rw-r--r--arch/arm/cpu/armv7/mx6/hab.c73
-rw-r--r--arch/arm/cpu/armv7/omap-common/mem-common.c6
-rw-r--r--arch/arm/cpu/armv7/omap3/board.c4
-rw-r--r--arch/arm/cpu/armv7/tegra20/display.c2
-rw-r--r--arch/arm/cpu/armv7/zynq/u-boot.lds1
-rw-r--r--arch/arm/cpu/armv8/transition.S2
19 files changed, 868 insertions, 185 deletions
diff --git a/arch/arm/cpu/arm720t/tegra-common/spl.c b/arch/arm/cpu/arm720t/tegra-common/spl.c
index 3479541..e0f9d5b 100644
--- a/arch/arm/cpu/arm720t/tegra-common/spl.c
+++ b/arch/arm/cpu/arm720t/tegra-common/spl.c
@@ -15,7 +15,7 @@
#include <asm/arch/tegra.h>
#include <asm/arch-tegra/apb_misc.h>
#include <asm/arch-tegra/board.h>
-#include <asm/arch/spl.h>
+#include <asm/spl.h>
#include "cpu.h"
void spl_board_init(void)
diff --git a/arch/arm/cpu/arm926ejs/davinci/da850_lowlevel.c b/arch/arm/cpu/arm926ejs/davinci/da850_lowlevel.c
index b91e948..19730ce 100644
--- a/arch/arm/cpu/arm926ejs/davinci/da850_lowlevel.c
+++ b/arch/arm/cpu/arm926ejs/davinci/da850_lowlevel.c
@@ -14,7 +14,7 @@
#include <asm/arch/hardware.h>
#include <asm/arch/davinci_misc.h>
#include <asm/arch/ddr2_defs.h>
-#include <asm/arch/emif_defs.h>
+#include <asm/ti-common/davinci_nand.h>
#include <asm/arch/pll_defs.h>
void davinci_enable_uart0(void)
diff --git a/arch/arm/cpu/arm926ejs/davinci/dm365_lowlevel.c b/arch/arm/cpu/arm926ejs/davinci/dm365_lowlevel.c
index ee096fe..c8b4498 100644
--- a/arch/arm/cpu/arm926ejs/davinci/dm365_lowlevel.c
+++ b/arch/arm/cpu/arm926ejs/davinci/dm365_lowlevel.c
@@ -11,6 +11,7 @@
#include <nand.h>
#include <ns16550.h>
#include <post.h>
+#include <asm/ti-common/davinci_nand.h>
#include <asm/arch/dm365_lowlevel.h>
#include <asm/arch/hardware.h>
diff --git a/arch/arm/cpu/armv7/am33xx/ddr.c b/arch/arm/cpu/armv7/am33xx/ddr.c
index 9a625c4..bbe9d1a 100644
--- a/arch/arm/cpu/armv7/am33xx/ddr.c
+++ b/arch/arm/cpu/armv7/am33xx/ddr.c
@@ -95,6 +95,7 @@ void config_sdram_emif4d5(const struct emif_regs *regs, int nr)
&emif_reg[nr]->emif_rd_wr_exec_thresh);
writel(regs->ref_ctrl, &emif_reg[nr]->emif_sdram_ref_ctrl);
+ writel(regs->ref_ctrl, &emif_reg[nr]->emif_sdram_ref_ctrl_shdw);
writel(regs->sdram_config, &emif_reg[nr]->emif_sdram_config);
writel(regs->sdram_config, &cstat->secure_emif_sdram_config);
diff --git a/arch/arm/cpu/armv7/at91/cpu.c b/arch/arm/cpu/armv7/at91/cpu.c
index 2fbf60d..8d86f97 100644
--- a/arch/arm/cpu/armv7/at91/cpu.c
+++ b/arch/arm/cpu/armv7/at91/cpu.c
@@ -61,6 +61,8 @@ int print_cpuinfo(void)
void enable_caches(void)
{
+ icache_enable();
+ dcache_enable();
}
unsigned int get_chip_id(void)
diff --git a/arch/arm/cpu/armv7/exynos/dmc_common.c b/arch/arm/cpu/armv7/exynos/dmc_common.c
index cca925e..9b6ee69 100644
--- a/arch/arm/cpu/armv7/exynos/dmc_common.c
+++ b/arch/arm/cpu/armv7/exynos/dmc_common.c
@@ -162,7 +162,7 @@ void mem_ctrl_init(int reset)
/* If there are any other memory variant, add their init call below */
if (param->mem_type == DDR_MODE_DDR3) {
- ret = ddr3_mem_ctrl_init(mem, param->mem_iv_size, reset);
+ ret = ddr3_mem_ctrl_init(mem, reset);
if (ret) {
/* will hang if failed to init memory control */
while (1)
diff --git a/arch/arm/cpu/armv7/exynos/dmc_init_ddr3.c b/arch/arm/cpu/armv7/exynos/dmc_init_ddr3.c
index 487e6f4..b86dd2d 100644
--- a/arch/arm/cpu/armv7/exynos/dmc_init_ddr3.c
+++ b/arch/arm/cpu/armv7/exynos/dmc_init_ddr3.c
@@ -6,6 +6,7 @@
* SPDX-License-Identifier: GPL-2.0+
*/
+#include <common.h>
#include <config.h>
#include <asm/io.h>
#include <asm/arch/clock.h>
@@ -16,7 +17,11 @@
#include "exynos5_setup.h"
#include "clock_init.h"
-#define TIMEOUT 10000
+#define TIMEOUT_US 10000
+#define NUM_BYTE_LANES 4
+#define DEFAULT_DQS 8
+#define DEFAULT_DQS_X4 (DEFAULT_DQS << 24) || (DEFAULT_DQS << 16) \
+ || (DEFAULT_DQS << 8) || (DEFAULT_DQS << 0)
#ifdef CONFIG_EXYNOS5250
static void reset_phy_ctrl(void)
@@ -28,8 +33,7 @@ static void reset_phy_ctrl(void)
writel(DDR3PHY_CTRL_PHY_RESET, &clk->lpddr3phy_ctrl);
}
-int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
- int reset)
+int ddr3_mem_ctrl_init(struct mem_timings *mem, int reset)
{
unsigned int val;
struct exynos5_phy_control *phy0_ctrl, *phy1_ctrl;
@@ -177,7 +181,7 @@ int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
writel(val, &phy1_ctrl->phy_con1);
writel(CTRL_RDLVL_GATE_ENABLE, &dmc->rdlvl_config);
- i = TIMEOUT;
+ i = TIMEOUT_US;
while ((readl(&dmc->phystatus) &
(RDLVL_COMPLETE_CHO | RDLVL_COMPLETE_CH1)) !=
(RDLVL_COMPLETE_CHO | RDLVL_COMPLETE_CH1) && i > 0) {
@@ -221,8 +225,220 @@ int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
#endif
#ifdef CONFIG_EXYNOS5420
-int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
- int reset)
+/**
+ * RAM address to use in the test.
+ *
+ * We'll use 4 words at this address and 4 at this address + 0x80 (Ares
+ * interleaves channels every 128 bytes). This will allow us to evaluate all of
+ * the chips in a 1 chip per channel (2GB) system and half the chips in a 2
+ * chip per channel (4GB) system. We can't test the 2nd chip since we need to
+ * do tests before the 2nd chip is enabled. Looking at the 2nd chip isn't
+ * critical because the 1st and 2nd chip have very similar timings (they'd
+ * better have similar timings, since there's only a single adjustment that is
+ * shared by both chips).
+ */
+const unsigned int test_addr = CONFIG_SYS_SDRAM_BASE;
+
+/* Test pattern with which RAM will be tested */
+static const unsigned int test_pattern[] = {
+ 0x5a5a5a5a,
+ 0xa5a5a5a5,
+ 0xf0f0f0f0,
+ 0x0f0f0f0f,
+};
+
+/**
+ * This function is a test vector for sw read leveling,
+ * it compares the read data with the written data.
+ *
+ * @param ch DMC channel number
+ * @param byte_lane which DQS byte offset,
+ * possible values are 0,1,2,3
+ * @return TRUE if memory was good, FALSE if not.
+ */
+static bool dmc_valid_window_test_vector(int ch, int byte_lane)
+{
+ unsigned int read_data;
+ unsigned int mask;
+ int i;
+
+ mask = 0xFF << (8 * byte_lane);
+
+ for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
+ read_data = readl(test_addr + i * 4 + ch * 0x80);
+ if ((read_data & mask) != (test_pattern[i] & mask))
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * This function returns current read offset value.
+ *
+ * @param phy_ctrl pointer to the current phy controller
+ */
+static unsigned int dmc_get_read_offset_value(struct exynos5420_phy_control
+ *phy_ctrl)
+{
+ return readl(&phy_ctrl->phy_con4);
+}
+
+/**
+ * This function performs resync, so that slave DLL is updated.
+ *
+ * @param phy_ctrl pointer to the current phy controller
+ */
+static void ddr_phy_set_do_resync(struct exynos5420_phy_control *phy_ctrl)
+{
+ setbits_le32(&phy_ctrl->phy_con10, PHY_CON10_CTRL_OFFSETR3);
+ clrbits_le32(&phy_ctrl->phy_con10, PHY_CON10_CTRL_OFFSETR3);
+}
+
+/**
+ * This function sets read offset value register with 'offset'.
+ *
+ * ...we also call call ddr_phy_set_do_resync().
+ *
+ * @param phy_ctrl pointer to the current phy controller
+ * @param offset offset to read DQS
+ */
+static void dmc_set_read_offset_value(struct exynos5420_phy_control *phy_ctrl,
+ unsigned int offset)
+{
+ writel(offset, &phy_ctrl->phy_con4);
+ ddr_phy_set_do_resync(phy_ctrl);
+}
+
+/**
+ * Convert a 2s complement byte to a byte with a sign bit.
+ *
+ * NOTE: you shouldn't use normal math on the number returned by this function.
+ * As an example, -10 = 0xf6. After this function -10 = 0x8a. If you wanted
+ * to do math and get the average of 10 and -10 (should be 0):
+ * 0x8a + 0xa = 0x94 (-108)
+ * 0x94 / 2 = 0xca (-54)
+ * ...and 0xca = sign bit plus 0x4a, or -74
+ *
+ * Also note that you lose the ability to represent -128 since there are two
+ * representations of 0.
+ *
+ * @param b The byte to convert in two's complement.
+ * @return The 7-bit value + sign bit.
+ */
+
+unsigned char make_signed_byte(signed char b)
+{
+ if (b < 0)
+ return 0x80 | -b;
+ else
+ return b;
+}
+
+/**
+ * Test various shifts starting at 'start' and going to 'end'.
+ *
+ * For each byte lane, we'll walk through shift starting at 'start' and going
+ * to 'end' (inclusive). When we are finally able to read the test pattern
+ * we'll store the value in the results array.
+ *
+ * @param phy_ctrl pointer to the current phy controller
+ * @param ch channel number
+ * @param start the start shift. -127 to 127
+ * @param end the end shift. -127 to 127
+ * @param results we'll store results for each byte lane.
+ */
+
+void test_shifts(struct exynos5420_phy_control *phy_ctrl, int ch,
+ int start, int end, int results[NUM_BYTE_LANES])
+{
+ int incr = (start < end) ? 1 : -1;
+ int byte_lane;
+
+ for (byte_lane = 0; byte_lane < NUM_BYTE_LANES; byte_lane++) {
+ int shift;
+
+ dmc_set_read_offset_value(phy_ctrl, DEFAULT_DQS_X4);
+ results[byte_lane] = DEFAULT_DQS;
+
+ for (shift = start; shift != (end + incr); shift += incr) {
+ unsigned int byte_offsetr;
+ unsigned int offsetr;
+
+ byte_offsetr = make_signed_byte(shift);
+
+ offsetr = dmc_get_read_offset_value(phy_ctrl);
+ offsetr &= ~(0xFF << (8 * byte_lane));
+ offsetr |= (byte_offsetr << (8 * byte_lane));
+ dmc_set_read_offset_value(phy_ctrl, offsetr);
+
+ if (dmc_valid_window_test_vector(ch, byte_lane)) {
+ results[byte_lane] = shift;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * This function performs SW read leveling to compensate DQ-DQS skew at
+ * receiver it first finds the optimal read offset value on each DQS
+ * then applies the value to PHY.
+ *
+ * Read offset value has its min margin and max margin. If read offset
+ * value exceeds its min or max margin, read data will have corruption.
+ * To avoid this we are doing sw read leveling.
+ *
+ * SW read leveling is:
+ * 1> Finding offset value's left_limit and right_limit
+ * 2> and calculate its center value
+ * 3> finally programs that center value to PHY
+ * 4> then PHY gets its optimal offset value.
+ *
+ * @param phy_ctrl pointer to the current phy controller
+ * @param ch channel number
+ * @param coarse_lock_val The coarse lock value read from PHY_CON13.
+ * (0 - 0x7f)
+ */
+static void software_find_read_offset(struct exynos5420_phy_control *phy_ctrl,
+ int ch, unsigned int coarse_lock_val)
+{
+ unsigned int offsetr_cent;
+ int byte_lane;
+ int left_limit;
+ int right_limit;
+ int left[NUM_BYTE_LANES];
+ int right[NUM_BYTE_LANES];
+ int i;
+
+ /* Fill the memory with test patterns */
+ for (i = 0; i < ARRAY_SIZE(test_pattern); i++)
+ writel(test_pattern[i], test_addr + i * 4 + ch * 0x80);
+
+ /* Figure out the limits we'll test with; keep -127 < limit < 127 */
+ left_limit = DEFAULT_DQS - coarse_lock_val;
+ right_limit = DEFAULT_DQS + coarse_lock_val;
+ if (right_limit > 127)
+ right_limit = 127;
+
+ /* Fill in the location where reads were OK from left and right */
+ test_shifts(phy_ctrl, ch, left_limit, right_limit, left);
+ test_shifts(phy_ctrl, ch, right_limit, left_limit, right);
+
+ /* Make a final value by taking the center between the left and right */
+ offsetr_cent = 0;
+ for (byte_lane = 0; byte_lane < NUM_BYTE_LANES; byte_lane++) {
+ int temp_center;
+ unsigned int vmwc;
+
+ temp_center = (left[byte_lane] + right[byte_lane]) / 2;
+ vmwc = make_signed_byte(temp_center);
+ offsetr_cent |= vmwc << (8 * byte_lane);
+ }
+ dmc_set_read_offset_value(phy_ctrl, offsetr_cent);
+}
+
+int ddr3_mem_ctrl_init(struct mem_timings *mem, int reset)
{
struct exynos5420_clock *clk =
(struct exynos5420_clock *)samsung_get_base_clock();
@@ -231,7 +447,9 @@ int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
struct exynos5420_phy_control *phy0_ctrl, *phy1_ctrl;
struct exynos5420_dmc *drex0, *drex1;
struct exynos5420_tzasc *tzasc0, *tzasc1;
+ struct exynos5_power *pmu;
uint32_t val, n_lock_r, n_lock_w_phy0, n_lock_w_phy1;
+ uint32_t lock0_info, lock1_info;
int chip;
int i;
@@ -244,6 +462,7 @@ int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
tzasc0 = (struct exynos5420_tzasc *)samsung_get_base_dmc_tzasc();
tzasc1 = (struct exynos5420_tzasc *)(samsung_get_base_dmc_tzasc()
+ DMC_OFFSET);
+ pmu = (struct exynos5_power *)EXYNOS5420_POWER_BASE;
/* Enable PAUSE for DREX */
setbits_le32(&clk->pause, ENABLE_BIT);
@@ -394,7 +613,41 @@ int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
*/
dmc_config_mrs(mem, &drex0->directcmd);
dmc_config_mrs(mem, &drex1->directcmd);
- } else {
+ }
+
+ /*
+ * Get PHY_CON13 from both phys. Gate CLKM around reading since
+ * PHY_CON13 is glitchy when CLKM is running. We're paranoid and
+ * wait until we get a "fine lock", though a coarse lock is probably
+ * OK (we only use the coarse numbers below). We try to gate the
+ * clock for as short a time as possible in case SDRAM is somehow
+ * sensitive. sdelay(10) in the loop is arbitrary to make sure
+ * there is some time for PHY_CON13 to get updated. In practice
+ * no delay appears to be needed.
+ */
+ val = readl(&clk->gate_bus_cdrex);
+ while (true) {
+ writel(val & ~0x1, &clk->gate_bus_cdrex);
+ lock0_info = readl(&phy0_ctrl->phy_con13);
+ writel(val, &clk->gate_bus_cdrex);
+
+ if ((lock0_info & CTRL_FINE_LOCKED) == CTRL_FINE_LOCKED)
+ break;
+
+ sdelay(10);
+ }
+ while (true) {
+ writel(val & ~0x2, &clk->gate_bus_cdrex);
+ lock1_info = readl(&phy1_ctrl->phy_con13);
+ writel(val, &clk->gate_bus_cdrex);
+
+ if ((lock1_info & CTRL_FINE_LOCKED) == CTRL_FINE_LOCKED)
+ break;
+
+ sdelay(10);
+ }
+
+ if (!reset) {
/*
* During Suspend-Resume & S/W-Reset, as soon as PMU releases
* pad retention, CKE goes high. This causes memory contents
@@ -445,15 +698,13 @@ int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
val |= (RDLVL_PASS_ADJ_VAL << RDLVL_PASS_ADJ_OFFSET);
writel(val, &phy1_ctrl->phy_con1);
- n_lock_r = readl(&phy0_ctrl->phy_con13);
- n_lock_w_phy0 = (n_lock_r & CTRL_LOCK_COARSE_MASK) >> 2;
+ n_lock_w_phy0 = (lock0_info & CTRL_LOCK_COARSE_MASK) >> 2;
n_lock_r = readl(&phy0_ctrl->phy_con12);
n_lock_r &= ~CTRL_DLL_ON;
n_lock_r |= n_lock_w_phy0;
writel(n_lock_r, &phy0_ctrl->phy_con12);
- n_lock_r = readl(&phy1_ctrl->phy_con13);
- n_lock_w_phy1 = (n_lock_r & CTRL_LOCK_COARSE_MASK) >> 2;
+ n_lock_w_phy1 = (lock1_info & CTRL_LOCK_COARSE_MASK) >> 2;
n_lock_r = readl(&phy1_ctrl->phy_con12);
n_lock_r &= ~CTRL_DLL_ON;
n_lock_r |= n_lock_w_phy1;
@@ -482,7 +733,7 @@ int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
writel(val, &phy1_ctrl->phy_con1);
writel(CTRL_RDLVL_GATE_ENABLE, &drex0->rdlvl_config);
- i = TIMEOUT;
+ i = TIMEOUT_US;
while (((readl(&drex0->phystatus) & RDLVL_COMPLETE_CHO) !=
RDLVL_COMPLETE_CHO) && (i > 0)) {
/*
@@ -497,7 +748,7 @@ int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
writel(CTRL_RDLVL_GATE_DISABLE, &drex0->rdlvl_config);
writel(CTRL_RDLVL_GATE_ENABLE, &drex1->rdlvl_config);
- i = TIMEOUT;
+ i = TIMEOUT_US;
while (((readl(&drex1->phystatus) & RDLVL_COMPLETE_CHO) !=
RDLVL_COMPLETE_CHO) && (i > 0)) {
/*
@@ -522,77 +773,6 @@ int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
&drex1->directcmd);
}
- if (mem->read_leveling_enable) {
- /* Set Read DQ Calibration */
- val = (0x3 << DIRECT_CMD_BANK_SHIFT) | 0x4;
- for (chip = 0; chip < mem->chips_to_configure; chip++) {
- writel(val | (chip << DIRECT_CMD_CHIP_SHIFT),
- &drex0->directcmd);
- writel(val | (chip << DIRECT_CMD_CHIP_SHIFT),
- &drex1->directcmd);
- }
-
- val = readl(&phy0_ctrl->phy_con1);
- val |= READ_LEVELLING_DDR3;
- writel(val, &phy0_ctrl->phy_con1);
- val = readl(&phy1_ctrl->phy_con1);
- val |= READ_LEVELLING_DDR3;
- writel(val, &phy1_ctrl->phy_con1);
-
- val = readl(&phy0_ctrl->phy_con2);
- val |= (RDLVL_EN | RDLVL_INCR_ADJ);
- writel(val, &phy0_ctrl->phy_con2);
- val = readl(&phy1_ctrl->phy_con2);
- val |= (RDLVL_EN | RDLVL_INCR_ADJ);
- writel(val, &phy1_ctrl->phy_con2);
-
- setbits_le32(&drex0->rdlvl_config,
- CTRL_RDLVL_DATA_ENABLE);
- i = TIMEOUT;
- while (((readl(&drex0->phystatus) & RDLVL_COMPLETE_CHO)
- != RDLVL_COMPLETE_CHO) && (i > 0)) {
- /*
- * TODO(waihong): Comment on how long this take
- * to timeout
- */
- sdelay(100);
- i--;
- }
- if (!i)
- return SETUP_ERR_RDLV_COMPLETE_TIMEOUT;
-
- clrbits_le32(&drex0->rdlvl_config,
- CTRL_RDLVL_DATA_ENABLE);
- setbits_le32(&drex1->rdlvl_config,
- CTRL_RDLVL_DATA_ENABLE);
- i = TIMEOUT;
- while (((readl(&drex1->phystatus) & RDLVL_COMPLETE_CHO)
- != RDLVL_COMPLETE_CHO) && (i > 0)) {
- /*
- * TODO(waihong): Comment on how long this take
- * to timeout
- */
- sdelay(100);
- i--;
- }
- if (!i)
- return SETUP_ERR_RDLV_COMPLETE_TIMEOUT;
-
- clrbits_le32(&drex1->rdlvl_config,
- CTRL_RDLVL_DATA_ENABLE);
-
- val = (0x3 << DIRECT_CMD_BANK_SHIFT);
- for (chip = 0; chip < mem->chips_to_configure; chip++) {
- writel(val | (chip << DIRECT_CMD_CHIP_SHIFT),
- &drex0->directcmd);
- writel(val | (chip << DIRECT_CMD_CHIP_SHIFT),
- &drex1->directcmd);
- }
-
- update_reset_dll(&drex0->phycontrol0, DDR_MODE_DDR3);
- update_reset_dll(&drex1->phycontrol0, DDR_MODE_DDR3);
- }
-
/* Common Settings for Leveling */
val = PHY_CON12_RESET_VAL;
writel((val + n_lock_w_phy0), &phy0_ctrl->phy_con12);
@@ -602,6 +782,27 @@ int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
setbits_le32(&phy1_ctrl->phy_con2, DLL_DESKEW_EN);
}
+ /*
+ * Do software read leveling
+ *
+ * Do this before we turn on auto refresh since the auto refresh can
+ * be in conflict with the resync operation that's part of setting
+ * read leveling.
+ */
+ if (!reset) {
+ /* restore calibrated value after resume */
+ dmc_set_read_offset_value(phy0_ctrl, readl(&pmu->pmu_spare1));
+ dmc_set_read_offset_value(phy1_ctrl, readl(&pmu->pmu_spare2));
+ } else {
+ software_find_read_offset(phy0_ctrl, 0,
+ CTRL_LOCK_COARSE(lock0_info));
+ software_find_read_offset(phy1_ctrl, 1,
+ CTRL_LOCK_COARSE(lock1_info));
+ /* save calibrated value to restore after resume */
+ writel(dmc_get_read_offset_value(phy0_ctrl), &pmu->pmu_spare1);
+ writel(dmc_get_read_offset_value(phy1_ctrl), &pmu->pmu_spare2);
+ }
+
/* Send PALL command */
dmc_config_prech(mem, &drex0->directcmd);
dmc_config_prech(mem, &drex1->directcmd);
diff --git a/arch/arm/cpu/armv7/exynos/exynos5_setup.h b/arch/arm/cpu/armv7/exynos/exynos5_setup.h
index 53b0ace..3242093 100644
--- a/arch/arm/cpu/armv7/exynos/exynos5_setup.h
+++ b/arch/arm/cpu/armv7/exynos/exynos5_setup.h
@@ -282,8 +282,12 @@
#define PHY_CON12_VAL 0x10107F50
#define CTRL_START (1 << 6)
#define CTRL_DLL_ON (1 << 5)
+#define CTRL_LOCK_COARSE_OFFSET 10
+#define CTRL_LOCK_COARSE_MASK (0x7F << CTRL_LOCK_COARSE_OFFSET)
+#define CTRL_LOCK_COARSE(x) (((x) & CTRL_LOCK_COARSE_MASK) >> \
+ CTRL_LOCK_COARSE_OFFSET)
#define CTRL_FORCE_MASK (0x7F << 8)
-#define CTRL_LOCK_COARSE_MASK (0x7F << 10)
+#define CTRL_FINE_LOCKED 0x7
#define CTRL_OFFSETD_RESET_VAL 0x8
#define CTRL_OFFSETD_VAL 0x7F
@@ -431,10 +435,10 @@
/*
* Definitions that differ with SoC's.
- * Below is the part defining macros for smdk5250.
- * Else part introduces macros for smdk5420.
+ * Below is the part defining macros for Exynos5250.
+ * Else part introduces macros for Exynos5420.
*/
-#ifndef CONFIG_SMDK5420
+#ifndef CONFIG_EXYNOS5420
/* APLL_CON1 */
#define APLL_CON1_VAL (0x00203800)
@@ -890,16 +894,11 @@ enum {
/*
* Memory variant specific initialization code for DDR3
*
- * @param mem Memory timings for this memory type.
- * @param mem_iv_size Memory interleaving size is a configurable parameter
- * which the DMC uses to decide how to split a memory
- * chunk into smaller chunks to support concurrent
- * accesses; may vary across boards.
+ * @param mem Memory timings for this memory type.
* @param reset Reset DDR PHY during initialization.
* @return 0 if ok, SETUP_ERR_... if there is a problem
*/
-int ddr3_mem_ctrl_init(struct mem_timings *mem, unsigned long mem_iv_size,
- int reset);
+int ddr3_mem_ctrl_init(struct mem_timings *mem, int reset);
/* Memory variant specific initialization code for LPDDR3 */
void lpddr3_mem_ctrl_init(void);
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c
index dcc270f..83e1dcf 100644
--- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c
+++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c
@@ -49,8 +49,6 @@ int do_lowlevel_init(void)
arch_cpu_init();
- set_ps_hold_ctrl();
-
reset_status = get_reset_status();
switch (reset_status) {
diff --git a/arch/arm/cpu/armv7/keystone/Makefile b/arch/arm/cpu/armv7/keystone/Makefile
index b1bd022..c4af252 100644
--- a/arch/arm/cpu/armv7/keystone/Makefile
+++ b/arch/arm/cpu/armv7/keystone/Makefile
@@ -5,7 +5,6 @@
# SPDX-License-Identifier: GPL-2.0+
#
-obj-y += aemif.o
obj-y += init.o
obj-y += psc.o
obj-y += clock.o
diff --git a/arch/arm/cpu/armv7/keystone/aemif.c b/arch/arm/cpu/armv7/keystone/aemif.c
deleted file mode 100644
index 9b26886..0000000
--- a/arch/arm/cpu/armv7/keystone/aemif.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Keystone2: Asynchronous EMIF Configuration
- *
- * (C) Copyright 2012-2014
- * Texas Instruments Incorporated, <www.ti.com>
- *
- * SPDX-License-Identifier: GPL-2.0+
- */
-
-#include <common.h>
-#include <asm/io.h>
-#include <asm/arch/clock.h>
-#include <asm/arch/emif_defs.h>
-
-#define AEMIF_CFG_SELECT_STROBE(v) ((v) ? 1 << 31 : 0)
-#define AEMIF_CFG_EXTEND_WAIT(v) ((v) ? 1 << 30 : 0)
-#define AEMIF_CFG_WR_SETUP(v) (((v) & 0x0f) << 26)
-#define AEMIF_CFG_WR_STROBE(v) (((v) & 0x3f) << 20)
-#define AEMIF_CFG_WR_HOLD(v) (((v) & 0x07) << 17)
-#define AEMIF_CFG_RD_SETUP(v) (((v) & 0x0f) << 13)
-#define AEMIF_CFG_RD_STROBE(v) (((v) & 0x3f) << 7)
-#define AEMIF_CFG_RD_HOLD(v) (((v) & 0x07) << 4)
-#define AEMIF_CFG_TURN_AROUND(v) (((v) & 0x03) << 2)
-#define AEMIF_CFG_WIDTH(v) (((v) & 0x03) << 0)
-
-#define set_config_field(reg, field, val) \
- do { \
- if (val != -1) { \
- reg &= ~AEMIF_CFG_##field(0xffffffff); \
- reg |= AEMIF_CFG_##field(val); \
- } \
- } while (0)
-
-void configure_async_emif(int cs, struct async_emif_config *cfg)
-{
- unsigned long tmp;
-
- if (cfg->mode == ASYNC_EMIF_MODE_NAND) {
- tmp = __raw_readl(&davinci_emif_regs->nandfcr);
- tmp |= (1 << cs);
- __raw_writel(tmp, &davinci_emif_regs->nandfcr);
-
- } else if (cfg->mode == ASYNC_EMIF_MODE_ONENAND) {
- tmp = __raw_readl(&davinci_emif_regs->one_nand_cr);
- tmp |= (1 << cs);
- __raw_writel(tmp, &davinci_emif_regs->one_nand_cr);
- }
-
- tmp = __raw_readl(&davinci_emif_regs->abncr[cs]);
-
- set_config_field(tmp, SELECT_STROBE, cfg->select_strobe);
- set_config_field(tmp, EXTEND_WAIT, cfg->extend_wait);
- set_config_field(tmp, WR_SETUP, cfg->wr_setup);
- set_config_field(tmp, WR_STROBE, cfg->wr_strobe);
- set_config_field(tmp, WR_HOLD, cfg->wr_hold);
- set_config_field(tmp, RD_SETUP, cfg->rd_setup);
- set_config_field(tmp, RD_STROBE, cfg->rd_strobe);
- set_config_field(tmp, RD_HOLD, cfg->rd_hold);
- set_config_field(tmp, TURN_AROUND, cfg->turn_around);
- set_config_field(tmp, WIDTH, cfg->width);
-
- __raw_writel(tmp, &davinci_emif_regs->abncr[cs]);
-}
-
-void init_async_emif(int num_cs, struct async_emif_config *config)
-{
- int cs;
-
- for (cs = 0; cs < num_cs; cs++)
- configure_async_emif(cs, config + cs);
-}
diff --git a/arch/arm/cpu/armv7/mx6/Makefile b/arch/arm/cpu/armv7/mx6/Makefile
index d7285fc..6dc9f8e 100644
--- a/arch/arm/cpu/armv7/mx6/Makefile
+++ b/arch/arm/cpu/armv7/mx6/Makefile
@@ -8,4 +8,5 @@
#
obj-y := soc.o clock.o
+obj-$(CONFIG_SPL_BUILD) += ddr.o
obj-$(CONFIG_SECURE_BOOT) += hab.o
diff --git a/arch/arm/cpu/armv7/mx6/ddr.c b/arch/arm/cpu/armv7/mx6/ddr.c
new file mode 100644
index 0000000..0434211
--- /dev/null
+++ b/arch/arm/cpu/armv7/mx6/ddr.c
@@ -0,0 +1,490 @@
+/*
+ * Copyright (C) 2014 Gateworks Corporation
+ * Author: Tim Harvey <tharvey@gateworks.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <common.h>
+#include <linux/types.h>
+#include <asm/arch/mx6-ddr.h>
+#include <asm/arch/sys_proto.h>
+#include <asm/io.h>
+#include <asm/types.h>
+
+#if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6Q) || defined(CONFIG_MX6D)
+/* Configure MX6DQ mmdc iomux */
+void mx6dq_dram_iocfg(unsigned width,
+ const struct mx6dq_iomux_ddr_regs *ddr,
+ const struct mx6dq_iomux_grp_regs *grp)
+{
+ volatile struct mx6dq_iomux_ddr_regs *mx6_ddr_iomux;
+ volatile struct mx6dq_iomux_grp_regs *mx6_grp_iomux;
+
+ mx6_ddr_iomux = (struct mx6dq_iomux_ddr_regs *)MX6DQ_IOM_DDR_BASE;
+ mx6_grp_iomux = (struct mx6dq_iomux_grp_regs *)MX6DQ_IOM_GRP_BASE;
+
+ /* DDR IO Type */
+ mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
+ mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
+
+ /* Clock */
+ mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
+ mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1;
+
+ /* Address */
+ mx6_ddr_iomux->dram_cas = ddr->dram_cas;
+ mx6_ddr_iomux->dram_ras = ddr->dram_ras;
+ mx6_grp_iomux->grp_addds = grp->grp_addds;
+
+ /* Control */
+ mx6_ddr_iomux->dram_reset = ddr->dram_reset;
+ mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0;
+ mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1;
+ mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
+ mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0;
+ mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1;
+ mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
+
+ /* Data Strobes */
+ mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
+ mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
+ mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
+ if (width >= 32) {
+ mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
+ mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
+ }
+ if (width >= 64) {
+ mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4;
+ mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5;
+ mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6;
+ mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7;
+ }
+
+ /* Data */
+ mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
+ mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
+ mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
+ if (width >= 32) {
+ mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
+ mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
+ }
+ if (width >= 64) {
+ mx6_grp_iomux->grp_b4ds = grp->grp_b4ds;
+ mx6_grp_iomux->grp_b5ds = grp->grp_b5ds;
+ mx6_grp_iomux->grp_b6ds = grp->grp_b6ds;
+ mx6_grp_iomux->grp_b7ds = grp->grp_b7ds;
+ }
+ mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
+ mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
+ if (width >= 32) {
+ mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
+ mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
+ }
+ if (width >= 64) {
+ mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4;
+ mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5;
+ mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6;
+ mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7;
+ }
+}
+#endif
+
+#if defined(CONFIG_MX6QDL) || defined(CONFIG_MX6DL) || defined(CONFIG_MX6S)
+/* Configure MX6SDL mmdc iomux */
+void mx6sdl_dram_iocfg(unsigned width,
+ const struct mx6sdl_iomux_ddr_regs *ddr,
+ const struct mx6sdl_iomux_grp_regs *grp)
+{
+ volatile struct mx6sdl_iomux_ddr_regs *mx6_ddr_iomux;
+ volatile struct mx6sdl_iomux_grp_regs *mx6_grp_iomux;
+
+ mx6_ddr_iomux = (struct mx6sdl_iomux_ddr_regs *)MX6SDL_IOM_DDR_BASE;
+ mx6_grp_iomux = (struct mx6sdl_iomux_grp_regs *)MX6SDL_IOM_GRP_BASE;
+
+ /* DDR IO Type */
+ mx6_grp_iomux->grp_ddr_type = grp->grp_ddr_type;
+ mx6_grp_iomux->grp_ddrpke = grp->grp_ddrpke;
+
+ /* Clock */
+ mx6_ddr_iomux->dram_sdclk_0 = ddr->dram_sdclk_0;
+ mx6_ddr_iomux->dram_sdclk_1 = ddr->dram_sdclk_1;
+
+ /* Address */
+ mx6_ddr_iomux->dram_cas = ddr->dram_cas;
+ mx6_ddr_iomux->dram_ras = ddr->dram_ras;
+ mx6_grp_iomux->grp_addds = grp->grp_addds;
+
+ /* Control */
+ mx6_ddr_iomux->dram_reset = ddr->dram_reset;
+ mx6_ddr_iomux->dram_sdcke0 = ddr->dram_sdcke0;
+ mx6_ddr_iomux->dram_sdcke1 = ddr->dram_sdcke1;
+ mx6_ddr_iomux->dram_sdba2 = ddr->dram_sdba2;
+ mx6_ddr_iomux->dram_sdodt0 = ddr->dram_sdodt0;
+ mx6_ddr_iomux->dram_sdodt1 = ddr->dram_sdodt1;
+ mx6_grp_iomux->grp_ctlds = grp->grp_ctlds;
+
+ /* Data Strobes */
+ mx6_grp_iomux->grp_ddrmode_ctl = grp->grp_ddrmode_ctl;
+ mx6_ddr_iomux->dram_sdqs0 = ddr->dram_sdqs0;
+ mx6_ddr_iomux->dram_sdqs1 = ddr->dram_sdqs1;
+ if (width >= 32) {
+ mx6_ddr_iomux->dram_sdqs2 = ddr->dram_sdqs2;
+ mx6_ddr_iomux->dram_sdqs3 = ddr->dram_sdqs3;
+ }
+ if (width >= 64) {
+ mx6_ddr_iomux->dram_sdqs4 = ddr->dram_sdqs4;
+ mx6_ddr_iomux->dram_sdqs5 = ddr->dram_sdqs5;
+ mx6_ddr_iomux->dram_sdqs6 = ddr->dram_sdqs6;
+ mx6_ddr_iomux->dram_sdqs7 = ddr->dram_sdqs7;
+ }
+
+ /* Data */
+ mx6_grp_iomux->grp_ddrmode = grp->grp_ddrmode;
+ mx6_grp_iomux->grp_b0ds = grp->grp_b0ds;
+ mx6_grp_iomux->grp_b1ds = grp->grp_b1ds;
+ if (width >= 32) {
+ mx6_grp_iomux->grp_b2ds = grp->grp_b2ds;
+ mx6_grp_iomux->grp_b3ds = grp->grp_b3ds;
+ }
+ if (width >= 64) {
+ mx6_grp_iomux->grp_b4ds = grp->grp_b4ds;
+ mx6_grp_iomux->grp_b5ds = grp->grp_b5ds;
+ mx6_grp_iomux->grp_b6ds = grp->grp_b6ds;
+ mx6_grp_iomux->grp_b7ds = grp->grp_b7ds;
+ }
+ mx6_ddr_iomux->dram_dqm0 = ddr->dram_dqm0;
+ mx6_ddr_iomux->dram_dqm1 = ddr->dram_dqm1;
+ if (width >= 32) {
+ mx6_ddr_iomux->dram_dqm2 = ddr->dram_dqm2;
+ mx6_ddr_iomux->dram_dqm3 = ddr->dram_dqm3;
+ }
+ if (width >= 64) {
+ mx6_ddr_iomux->dram_dqm4 = ddr->dram_dqm4;
+ mx6_ddr_iomux->dram_dqm5 = ddr->dram_dqm5;
+ mx6_ddr_iomux->dram_dqm6 = ddr->dram_dqm6;
+ mx6_ddr_iomux->dram_dqm7 = ddr->dram_dqm7;
+ }
+}
+#endif
+
+/*
+ * Configure mx6 mmdc registers based on:
+ * - board-specific memory configuration
+ * - board-specific calibration data
+ * - ddr3 chip details
+ *
+ * The various calculations here are derived from the Freescale
+ * i.Mx6DQSDL DDR3 Script Aid spreadsheet (DOC-94917) designed to generate MMDC
+ * configuration registers based on memory system and memory chip parameters.
+ *
+ * The defaults here are those which were specified in the spreadsheet.
+ * For details on each register, refer to the IMX6DQRM and/or IMX6SDLRM
+ * section titled MMDC initialization
+ */
+#define MR(val, ba, cmd, cs1) \
+ ((val << 16) | (1 << 15) | (cmd << 4) | (cs1 << 3) | ba)
+void mx6_dram_cfg(const struct mx6_ddr_sysinfo *i,
+ const struct mx6_mmdc_calibration *c,
+ const struct mx6_ddr3_cfg *m)
+{
+ volatile struct mmdc_p_regs *mmdc0;
+ volatile struct mmdc_p_regs *mmdc1;
+ u32 reg;
+ u8 tcke, tcksrx, tcksre, txpdll, taofpd, taonpd, trrd;
+ u8 todtlon, taxpd, tanpd, tcwl, txp, tfaw, tcl;
+ u8 todt_idle_off = 0x4; /* from DDR3 Script Aid spreadsheet */
+ u16 trcd, trc, tras, twr, tmrd, trtp, trp, twtr, trfc, txs, txpr;
+ u16 CS0_END;
+ u16 tdllk = 0x1ff; /* DLL locking time: 512 cycles (JEDEC DDR3) */
+ int clkper; /* clock period in picoseconds */
+ int clock; /* clock freq in mHz */
+ int cs;
+
+ mmdc0 = (struct mmdc_p_regs *)MMDC_P0_BASE_ADDR;
+ mmdc1 = (struct mmdc_p_regs *)MMDC_P1_BASE_ADDR;
+
+ /* MX6D/MX6Q: 1066 MHz memory clock, clkper = 1.894ns = 1894ps */
+ if (is_cpu_type(MXC_CPU_MX6Q) || is_cpu_type(MXC_CPU_MX6D)) {
+ clock = 528;
+ tcwl = 4;
+ }
+ /* MX6S/MX6DL: 800 MHz memory clock, clkper = 2.5ns = 2500ps */
+ else {
+ clock = 400;
+ tcwl = 3;
+ }
+ clkper = (1000*1000)/clock; /* ps */
+ todtlon = tcwl;
+ taxpd = tcwl;
+ tanpd = tcwl;
+ tcwl = tcwl;
+
+ switch (m->density) {
+ case 1: /* 1Gb per chip */
+ trfc = DIV_ROUND_UP(110000, clkper) - 1;
+ txs = DIV_ROUND_UP(120000, clkper) - 1;
+ break;
+ case 2: /* 2Gb per chip */
+ trfc = DIV_ROUND_UP(160000, clkper) - 1;
+ txs = DIV_ROUND_UP(170000, clkper) - 1;
+ break;
+ case 4: /* 4Gb per chip */
+ trfc = DIV_ROUND_UP(260000, clkper) - 1;
+ txs = DIV_ROUND_UP(270000, clkper) - 1;
+ break;
+ case 8: /* 8Gb per chip */
+ trfc = DIV_ROUND_UP(350000, clkper) - 1;
+ txs = DIV_ROUND_UP(360000, clkper) - 1;
+ break;
+ default:
+ /* invalid density */
+ printf("invalid chip density\n");
+ hang();
+ break;
+ }
+ txpr = txs;
+
+ switch (m->mem_speed) {
+ case 800:
+ txp = DIV_ROUND_UP(MAX(3*clkper, 7500), clkper) - 1;
+ tcke = DIV_ROUND_UP(MAX(3*clkper, 7500), clkper) - 1;
+ if (m->pagesz == 1) {
+ tfaw = DIV_ROUND_UP(40000, clkper) - 1;
+ trrd = DIV_ROUND_UP(MAX(4*clkper, 10000), clkper) - 1;
+ } else {
+ tfaw = DIV_ROUND_UP(50000, clkper) - 1;
+ trrd = DIV_ROUND_UP(MAX(4*clkper, 10000), clkper) - 1;
+ }
+ break;
+ case 1066:
+ txp = DIV_ROUND_UP(MAX(3*clkper, 7500), clkper) - 1;
+ tcke = DIV_ROUND_UP(MAX(3*clkper, 5625), clkper) - 1;
+ if (m->pagesz == 1) {
+ tfaw = DIV_ROUND_UP(37500, clkper) - 1;
+ trrd = DIV_ROUND_UP(MAX(4*clkper, 7500), clkper) - 1;
+ } else {
+ tfaw = DIV_ROUND_UP(50000, clkper) - 1;
+ trrd = DIV_ROUND_UP(MAX(4*clkper, 10000), clkper) - 1;
+ }
+ break;
+ case 1333:
+ txp = DIV_ROUND_UP(MAX(3*clkper, 6000), clkper) - 1;
+ tcke = DIV_ROUND_UP(MAX(3*clkper, 5625), clkper) - 1;
+ if (m->pagesz == 1) {
+ tfaw = DIV_ROUND_UP(30000, clkper) - 1;
+ trrd = DIV_ROUND_UP(MAX(4*clkper, 6000), clkper) - 1;
+ } else {
+ tfaw = DIV_ROUND_UP(45000, clkper) - 1;
+ trrd = DIV_ROUND_UP(MAX(4*clkper, 7500), clkper) - 1;
+ }
+ break;
+ case 1600:
+ txp = DIV_ROUND_UP(MAX(3*clkper, 6000), clkper) - 1;
+ tcke = DIV_ROUND_UP(MAX(3*clkper, 5000), clkper) - 1;
+ if (m->pagesz == 1) {
+ tfaw = DIV_ROUND_UP(30000, clkper) - 1;
+ trrd = DIV_ROUND_UP(MAX(4*clkper, 6000), clkper) - 1;
+ } else {
+ tfaw = DIV_ROUND_UP(40000, clkper) - 1;
+ trrd = DIV_ROUND_UP(MAX(4*clkper, 7500), clkper) - 1;
+ }
+ break;
+ default:
+ printf("invalid memory speed\n");
+ hang();
+ break;
+ }
+ txpdll = DIV_ROUND_UP(MAX(10*clkper, 24000), clkper) - 1;
+ tcl = DIV_ROUND_UP(m->trcd, clkper/10) - 3;
+ tcksre = DIV_ROUND_UP(MAX(5*clkper, 10000), clkper);
+ tcksrx = tcksre;
+ taonpd = DIV_ROUND_UP(2000, clkper) - 1;
+ taofpd = taonpd;
+ trp = DIV_ROUND_UP(m->trcd, clkper/10) - 1;
+ trcd = trp;
+ trc = DIV_ROUND_UP(m->trcmin, clkper/10) - 1;
+ tras = DIV_ROUND_UP(m->trasmin, clkper/10) - 1;
+ twr = DIV_ROUND_UP(15000, clkper) - 1;
+ tmrd = DIV_ROUND_UP(MAX(12*clkper, 15000), clkper) - 1;
+ twtr = ROUND(MAX(4*clkper, 7500)/clkper, 1) - 1;
+ trtp = twtr;
+ CS0_END = ((4*i->cs_density) <= 120) ? (4*i->cs_density)+7 : 127;
+ debug("density:%d Gb (%d Gb per chip)\n", i->cs_density, m->density);
+ debug("clock: %dMHz (%d ps)\n", clock, clkper);
+ debug("memspd:%d\n", m->mem_speed);
+ debug("tcke=%d\n", tcke);
+ debug("tcksrx=%d\n", tcksrx);
+ debug("tcksre=%d\n", tcksre);
+ debug("taofpd=%d\n", taofpd);
+ debug("taonpd=%d\n", taonpd);
+ debug("todtlon=%d\n", todtlon);
+ debug("tanpd=%d\n", tanpd);
+ debug("taxpd=%d\n", taxpd);
+ debug("trfc=%d\n", trfc);
+ debug("txs=%d\n", txs);
+ debug("txp=%d\n", txp);
+ debug("txpdll=%d\n", txpdll);
+ debug("tfaw=%d\n", tfaw);
+ debug("tcl=%d\n", tcl);
+ debug("trcd=%d\n", trcd);
+ debug("trp=%d\n", trp);
+ debug("trc=%d\n", trc);
+ debug("tras=%d\n", tras);
+ debug("twr=%d\n", twr);
+ debug("tmrd=%d\n", tmrd);
+ debug("tcwl=%d\n", tcwl);
+ debug("tdllk=%d\n", tdllk);
+ debug("trtp=%d\n", trtp);
+ debug("twtr=%d\n", twtr);
+ debug("trrd=%d\n", trrd);
+ debug("txpr=%d\n", txpr);
+ debug("CS0_END=%d\n", CS0_END);
+ debug("ncs=%d\n", i->ncs);
+ debug("Rtt_wr=%d\n", i->rtt_wr);
+ debug("Rtt_nom=%d\n", i->rtt_nom);
+ debug("SRT=%d\n", m->SRT);
+ debug("tcl=%d\n", tcl);
+ debug("twr=%d\n", twr);
+
+ /*
+ * board-specific configuration:
+ * These values are determined empirically and vary per board layout
+ * see:
+ * appnote, ddr3 spreadsheet
+ */
+ mmdc0->mpwldectrl0 = c->p0_mpwldectrl0;
+ mmdc0->mpwldectrl1 = c->p0_mpwldectrl1;
+ mmdc0->mpdgctrl0 = c->p0_mpdgctrl0;
+ mmdc0->mpdgctrl1 = c->p0_mpdgctrl1;
+ mmdc0->mprddlctl = c->p0_mprddlctl;
+ mmdc0->mpwrdlctl = c->p0_mpwrdlctl;
+ if (i->dsize > 1) {
+ mmdc1->mpwldectrl0 = c->p1_mpwldectrl0;
+ mmdc1->mpwldectrl1 = c->p1_mpwldectrl1;
+ mmdc1->mpdgctrl0 = c->p1_mpdgctrl0;
+ mmdc1->mpdgctrl1 = c->p1_mpdgctrl1;
+ mmdc1->mprddlctl = c->p1_mprddlctl;
+ mmdc1->mpwrdlctl = c->p1_mpwrdlctl;
+ }
+
+ /* Read data DQ Byte0-3 delay */
+ mmdc0->mprddqby0dl = (u32)0x33333333;
+ mmdc0->mprddqby1dl = (u32)0x33333333;
+ if (i->dsize > 0) {
+ mmdc0->mprddqby2dl = (u32)0x33333333;
+ mmdc0->mprddqby3dl = (u32)0x33333333;
+ }
+ if (i->dsize > 1) {
+ mmdc1->mprddqby0dl = (u32)0x33333333;
+ mmdc1->mprddqby1dl = (u32)0x33333333;
+ mmdc1->mprddqby2dl = (u32)0x33333333;
+ mmdc1->mprddqby3dl = (u32)0x33333333;
+ }
+
+ /* MMDC Termination: rtt_nom:2 RZQ/2(120ohm), rtt_nom:1 RZQ/4(60ohm) */
+ reg = (i->rtt_nom == 2) ? 0x00011117 : 0x00022227;
+ mmdc0->mpodtctrl = reg;
+ if (i->dsize > 1)
+ mmdc1->mpodtctrl = reg;
+
+ /* complete calibration */
+ reg = (1 << 11); /* Force measurement on delay-lines */
+ mmdc0->mpmur0 = reg;
+ if (i->dsize > 1)
+ mmdc1->mpmur0 = reg;
+
+ /* Step 1: configuration request */
+ mmdc0->mdscr = (u32)(1 << 15); /* config request */
+
+ /* Step 2: Timing configuration */
+ reg = (trfc << 24) | (txs << 16) | (txp << 13) | (txpdll << 9) |
+ (tfaw << 4) | tcl;
+ mmdc0->mdcfg0 = reg;
+ reg = (trcd << 29) | (trp << 26) | (trc << 21) | (tras << 16) |
+ (1 << 15) | /* trpa */
+ (twr << 9) | (tmrd << 5) | tcwl;
+ mmdc0->mdcfg1 = reg;
+ reg = (tdllk << 16) | (trtp << 6) | (twtr << 3) | trrd;
+ mmdc0->mdcfg2 = reg;
+ reg = (taofpd << 27) | (taonpd << 24) | (tanpd << 20) | (taxpd << 16) |
+ (todtlon << 12) | (todt_idle_off << 4);
+ mmdc0->mdotc = reg;
+ mmdc0->mdasp = CS0_END; /* CS addressing */
+
+ /* Step 3: Configure DDR type */
+ reg = (i->cs1_mirror << 19) | (i->walat << 16) | (i->bi_on << 12) |
+ (i->mif3_mode << 9) | (i->ralat << 6);
+ mmdc0->mdmisc = reg;
+
+ /* Step 4: Configure delay while leaving reset */
+ reg = (txpr << 16) | (i->sde_to_rst << 8) | (i->rst_to_cke << 0);
+ mmdc0->mdor = reg;
+
+ /* Step 5: Configure DDR physical parameters (density and burst len) */
+ reg = (m->rowaddr - 11) << 24 | /* ROW */
+ (m->coladdr - 9) << 20 | /* COL */
+ (1 << 19) | /* Burst Length = 8 for DDR3 */
+ (i->dsize << 16); /* DDR data bus size */
+ mmdc0->mdctl = reg;
+
+ /* Step 6: Perform ZQ calibration */
+ reg = (u32)0xa1390001; /* one-time HW ZQ calib */
+ mmdc0->mpzqhwctrl = reg;
+ if (i->dsize > 1)
+ mmdc1->mpzqhwctrl = reg;
+
+ /* Step 7: Enable MMDC with desired chip select */
+ reg = mmdc0->mdctl |
+ (1 << 31) | /* SDE_0 for CS0 */
+ ((i->ncs == 2) ? 1 : 0) << 30; /* SDE_1 for CS1 */
+ mmdc0->mdctl = reg;
+
+ /* Step 8: Write Mode Registers to Init DDR3 devices */
+ for (cs = 0; cs < i->ncs; cs++) {
+ /* MR2 */
+ reg = (i->rtt_wr & 3) << 9 | (m->SRT & 1) << 7 |
+ ((tcwl - 3) & 3) << 3;
+ mmdc0->mdscr = (u32)MR(reg, 2, 3, cs);
+ /* MR3 */
+ mmdc0->mdscr = (u32)MR(0, 3, 3, cs);
+ /* MR1 */
+ reg = ((i->rtt_nom & 1) ? 1 : 0) << 2 |
+ ((i->rtt_nom & 2) ? 1 : 0) << 6;
+ mmdc0->mdscr = (u32)MR(reg, 1, 3, cs);
+ reg = ((tcl - 1) << 4) | /* CAS */
+ (1 << 8) | /* DLL Reset */
+ ((twr - 3) << 9); /* Write Recovery */
+ /* MR0 */
+ mmdc0->mdscr = (u32)MR(reg, 0, 3, cs);
+ /* ZQ calibration */
+ reg = (1 << 10);
+ mmdc0->mdscr = (u32)MR(reg, 0, 4, cs);
+ }
+
+ /* Step 10: Power down control and self-refresh */
+ reg = (tcke & 0x7) << 16 |
+ 5 << 12 | /* PWDT_1: 256 cycles */
+ 5 << 8 | /* PWDT_0: 256 cycles */
+ 1 << 6 | /* BOTH_CS_PD */
+ (tcksrx & 0x7) << 3 |
+ (tcksre & 0x7);
+ mmdc0->mdpdc = reg;
+ mmdc0->mapsr = (u32)0x00011006; /* ADOPT power down enabled */
+
+ /* Step 11: Configure ZQ calibration: one-time and periodic 1ms */
+ mmdc0->mpzqhwctrl = (u32)0xa1390003;
+ if (i->dsize > 1)
+ mmdc1->mpzqhwctrl = (u32)0xa1390003;
+
+ /* Step 12: Configure and activate periodic refresh */
+ reg = (1 << 14) | /* REF_SEL: Periodic refresh cycles of 32kHz */
+ (7 << 11); /* REFR: Refresh Rate - 8 refreshes */
+ mmdc0->mdref = reg;
+
+ /* Step 13: Deassert config request - init complete */
+ mmdc0->mdscr = (u32)0x00000000;
+
+ /* wait for auto-ZQ calibration to complete */
+ mdelay(1);
+}
diff --git a/arch/arm/cpu/armv7/mx6/hab.c b/arch/arm/cpu/armv7/mx6/hab.c
index 5187775..f6810a6 100644
--- a/arch/arm/cpu/armv7/mx6/hab.c
+++ b/arch/arm/cpu/armv7/mx6/hab.c
@@ -7,15 +7,69 @@
#include <common.h>
#include <asm/io.h>
#include <asm/arch/hab.h>
+#include <asm/arch/sys_proto.h>
/* -------- start of HAB API updates ------------*/
-#define hab_rvt_report_event ((hab_rvt_report_event_t *)HAB_RVT_REPORT_EVENT)
-#define hab_rvt_report_status ((hab_rvt_report_status_t *)HAB_RVT_REPORT_STATUS)
-#define hab_rvt_authenticate_image \
- ((hab_rvt_authenticate_image_t *)HAB_RVT_AUTHENTICATE_IMAGE)
-#define hab_rvt_entry ((hab_rvt_entry_t *)HAB_RVT_ENTRY)
-#define hab_rvt_exit ((hab_rvt_exit_t *)HAB_RVT_EXIT)
-#define hab_rvt_clock_init HAB_RVT_CLOCK_INIT
+
+#define hab_rvt_report_event_p \
+( \
+ ((is_cpu_type(MXC_CPU_MX6Q) || \
+ is_cpu_type(MXC_CPU_MX6D)) && \
+ (soc_rev() >= CHIP_REV_1_5)) ? \
+ ((hab_rvt_report_event_t *)HAB_RVT_REPORT_EVENT_NEW) : \
+ (is_cpu_type(MXC_CPU_MX6DL) && \
+ (soc_rev() >= CHIP_REV_1_2)) ? \
+ ((hab_rvt_report_event_t *)HAB_RVT_REPORT_EVENT_NEW) : \
+ ((hab_rvt_report_event_t *)HAB_RVT_REPORT_EVENT) \
+)
+
+#define hab_rvt_report_status_p \
+( \
+ ((is_cpu_type(MXC_CPU_MX6Q) || \
+ is_cpu_type(MXC_CPU_MX6D)) && \
+ (soc_rev() >= CHIP_REV_1_5)) ? \
+ ((hab_rvt_report_status_t *)HAB_RVT_REPORT_STATUS_NEW) :\
+ (is_cpu_type(MXC_CPU_MX6DL) && \
+ (soc_rev() >= CHIP_REV_1_2)) ? \
+ ((hab_rvt_report_status_t *)HAB_RVT_REPORT_STATUS_NEW) :\
+ ((hab_rvt_report_status_t *)HAB_RVT_REPORT_STATUS) \
+)
+
+#define hab_rvt_authenticate_image_p \
+( \
+ ((is_cpu_type(MXC_CPU_MX6Q) || \
+ is_cpu_type(MXC_CPU_MX6D)) && \
+ (soc_rev() >= CHIP_REV_1_5)) ? \
+ ((hab_rvt_authenticate_image_t *)HAB_RVT_AUTHENTICATE_IMAGE_NEW) : \
+ (is_cpu_type(MXC_CPU_MX6DL) && \
+ (soc_rev() >= CHIP_REV_1_2)) ? \
+ ((hab_rvt_authenticate_image_t *)HAB_RVT_AUTHENTICATE_IMAGE_NEW) : \
+ ((hab_rvt_authenticate_image_t *)HAB_RVT_AUTHENTICATE_IMAGE) \
+)
+
+#define hab_rvt_entry_p \
+( \
+ ((is_cpu_type(MXC_CPU_MX6Q) || \
+ is_cpu_type(MXC_CPU_MX6D)) && \
+ (soc_rev() >= CHIP_REV_1_5)) ? \
+ ((hab_rvt_entry_t *)HAB_RVT_ENTRY_NEW) : \
+ (is_cpu_type(MXC_CPU_MX6DL) && \
+ (soc_rev() >= CHIP_REV_1_2)) ? \
+ ((hab_rvt_entry_t *)HAB_RVT_ENTRY_NEW) : \
+ ((hab_rvt_entry_t *)HAB_RVT_ENTRY) \
+)
+
+#define hab_rvt_exit_p \
+( \
+ ((is_cpu_type(MXC_CPU_MX6Q) || \
+ is_cpu_type(MXC_CPU_MX6D)) && \
+ (soc_rev() >= CHIP_REV_1_5)) ? \
+ ((hab_rvt_exit_t *)HAB_RVT_EXIT_NEW) : \
+ (is_cpu_type(MXC_CPU_MX6DL) && \
+ (soc_rev() >= CHIP_REV_1_2)) ? \
+ ((hab_rvt_exit_t *)HAB_RVT_EXIT_NEW) : \
+ ((hab_rvt_exit_t *)HAB_RVT_EXIT) \
+)
bool is_hab_enabled(void)
{
@@ -52,6 +106,11 @@ int get_hab_status(void)
size_t bytes = sizeof(event_data); /* Event size in bytes */
enum hab_config config = 0;
enum hab_state state = 0;
+ hab_rvt_report_event_t *hab_rvt_report_event;
+ hab_rvt_report_status_t *hab_rvt_report_status;
+
+ hab_rvt_report_event = hab_rvt_report_event_p;
+ hab_rvt_report_status = hab_rvt_report_status_p;
if (is_hab_enabled())
puts("\nSecure boot enabled\n");
diff --git a/arch/arm/cpu/armv7/omap-common/mem-common.c b/arch/arm/cpu/armv7/omap-common/mem-common.c
index 944ef84..5bc7e1f 100644
--- a/arch/arm/cpu/armv7/omap-common/mem-common.c
+++ b/arch/arm/cpu/armv7/omap-common/mem-common.c
@@ -121,7 +121,8 @@ void gpmc_init(void)
writel(0x00000008, &gpmc_cfg->sysconfig);
writel(0x00000000, &gpmc_cfg->irqstatus);
writel(0x00000000, &gpmc_cfg->irqenable);
- writel(0x00000000, &gpmc_cfg->timeout_control);
+ /* disable timeout, set a safe reset value */
+ writel(0x00001ff0, &gpmc_cfg->timeout_control);
#ifdef CONFIG_NOR
writel(0x00000200, &gpmc_cfg->config);
#else
@@ -133,5 +134,6 @@ void gpmc_init(void)
writel(0, &gpmc_cfg->cs[0].config7);
sdelay(1000);
/* enable chip-select specific configurations */
- enable_gpmc_cs_config(gpmc_regs, &gpmc_cfg->cs[0], base, size);
+ if (base != 0)
+ enable_gpmc_cs_config(gpmc_regs, &gpmc_cfg->cs[0], base, size);
}
diff --git a/arch/arm/cpu/armv7/omap3/board.c b/arch/arm/cpu/armv7/omap3/board.c
index e252e7f..667e77f 100644
--- a/arch/arm/cpu/armv7/omap3/board.c
+++ b/arch/arm/cpu/armv7/omap3/board.c
@@ -147,7 +147,7 @@ void secure_unlock_mem(void)
* configure secure registers and exit secure world
* general use.
*****************************************************************************/
-void secureworld_exit()
+void secureworld_exit(void)
{
unsigned long i;
@@ -178,7 +178,7 @@ void secureworld_exit()
* Description: If chip is GP/EMU(special) type, unlock the SRAM for
* general use.
*****************************************************************************/
-void try_unlock_memory()
+void try_unlock_memory(void)
{
int mode;
int in_sdram = is_running_in_sdram();
diff --git a/arch/arm/cpu/armv7/tegra20/display.c b/arch/arm/cpu/armv7/tegra20/display.c
index 488f0c6..fd77f3f 100644
--- a/arch/arm/cpu/armv7/tegra20/display.c
+++ b/arch/arm/cpu/armv7/tegra20/display.c
@@ -328,7 +328,7 @@ static int tegra_display_decode_config(const void *blob,
rgb = fdt_subnode_offset(blob, node, "rgb");
config->panel_node = fdtdec_lookup_phandle(blob, rgb, "nvidia,panel");
- if (!config->panel_node < 0) {
+ if (config->panel_node < 0) {
debug("%s: Cannot find panel information\n", __func__);
return -1;
}
diff --git a/arch/arm/cpu/armv7/zynq/u-boot.lds b/arch/arm/cpu/armv7/zynq/u-boot.lds
index 69500a6..4dc9bb0 100644
--- a/arch/arm/cpu/armv7/zynq/u-boot.lds
+++ b/arch/arm/cpu/armv7/zynq/u-boot.lds
@@ -18,6 +18,7 @@ SECTIONS
.text :
{
*(.__image_copy_start)
+ *(.vectors)
CPUDIR/start.o (.text*)
*(.text*)
}
diff --git a/arch/arm/cpu/armv8/transition.S b/arch/arm/cpu/armv8/transition.S
index e0a5946..38dea5c 100644
--- a/arch/arm/cpu/armv8/transition.S
+++ b/arch/arm/cpu/armv8/transition.S
@@ -43,7 +43,7 @@ ENTRY(armv8_switch_to_el1)
mrs x0, cnthctl_el2
orr x0, x0, #0x3 /* Enable EL1 access to timers */
msr cnthctl_el2, x0
- msr cntvoff_el2, x0
+ msr cntvoff_el2, xzr
mrs x0, cntkctl_el1
orr x0, x0, #0x3 /* Enable EL0 access to timers */
msr cntkctl_el1, x0