diff options
author | Paul Burton <paul.burton@imgtec.com> | 2015-01-29 10:04:08 +0000 |
---|---|---|
committer | Daniel Schwierzeck <daniel.schwierzeck@gmail.com> | 2015-01-30 14:19:57 +0100 |
commit | a39b1cb7f0ff6be3f39c25b9ec625de2a352da9e (patch) | |
tree | bd4afabc6c78cbac4c1b433816642e892d503431 /arch/mips/cpu | |
parent | d4d774e00e7232b524ddb20d992c5b12d84e3ef2 (diff) | |
download | u-boot-imx-a39b1cb7f0ff6be3f39c25b9ec625de2a352da9e.zip u-boot-imx-a39b1cb7f0ff6be3f39c25b9ec625de2a352da9e.tar.gz u-boot-imx-a39b1cb7f0ff6be3f39c25b9ec625de2a352da9e.tar.bz2 |
MIPS: use asm.h macros in mips32 start.S
Where the mips32 & mips64 implementations of start.S differ in terms of
access sizes & offsets, use the appropriate macros from asm.h to
abstract those differences away. This is in preparation for sharing a
single copy of start.S between mips32 & mips64.
The exception to this is loads of immediates to be written to the cop0
Config register, which is a 32bit register on mips64 and therefore
constants written to it can be loaded as such.
Signed-off-by: Paul Burton <paul.burton@imgtec.com>
Cc: Daniel Schwierzeck <daniel.schwierzeck@gmail.com>
Diffstat (limited to 'arch/mips/cpu')
-rw-r--r-- | arch/mips/cpu/mips32/start.S | 116 |
1 files changed, 61 insertions, 55 deletions
diff --git a/arch/mips/cpu/mips32/start.S b/arch/mips/cpu/mips32/start.S index 36b92cc..227af6d 100644 --- a/arch/mips/cpu/mips32/start.S +++ b/arch/mips/cpu/mips32/start.S @@ -8,6 +8,7 @@ #include <asm-offsets.h> #include <config.h> +#include <asm/asm.h> #include <asm/regdef.h> #include <asm/mipsregs.h> @@ -98,8 +99,8 @@ _start: reset: /* Clear watch registers */ - mtc0 zero, CP0_WATCHLO - mtc0 zero, CP0_WATCHHI + MTC0 zero, CP0_WATCHLO + MTC0 zero, CP0_WATCHHI /* WP(Watch Pending), SW0/1 should be cleared */ mtc0 zero, CP0_CAUSE @@ -116,21 +117,26 @@ reset: mtc0 t0, CP0_CONFIG #endif - /* Initialize $gp */ + /* + * Initialize $gp, force pointer sized alignment of bal instruction to + * forbid the compiler to put nop's between bal and _gp. This is + * required to keep _gp and ra aligned to 8 byte. + */ + .align PTRLOG bal 1f nop - .word _gp + PTR _gp 1: - lw gp, 0(ra) + PTR_L gp, 0(ra) #ifndef CONFIG_SKIP_LOWLEVEL_INIT /* Initialize any external memory */ - la t9, lowlevel_init + PTR_LA t9, lowlevel_init jalr t9 nop /* Initialize caches... */ - la t9, mips_cache_reset + PTR_LA t9, mips_cache_reset jalr t9 nop @@ -140,15 +146,15 @@ reset: #endif /* Set up temporary stack */ - li t0, -16 - li t1, CONFIG_SYS_INIT_SP_ADDR + PTR_LI t0, -16 + PTR_LI t1, CONFIG_SYS_INIT_SP_ADDR and sp, t1, t0 # force 16 byte alignment - sub sp, sp, GD_SIZE # reserve space for gd + PTR_SUB sp, sp, GD_SIZE # reserve space for gd and sp, sp, t0 # force 16 byte alignment move k0, sp # save gd pointer #ifdef CONFIG_SYS_MALLOC_F_LEN - li t2, CONFIG_SYS_MALLOC_F_LEN - sub sp, sp, t2 # reserve space for early malloc + PTR_LI t2, CONFIG_SYS_MALLOC_F_LEN + PTR_SUB sp, sp, t2 # reserve space for early malloc and sp, sp, t0 # force 16 byte alignment #endif move fp, sp @@ -158,14 +164,14 @@ reset: 1: sw zero, 0(t0) blt t0, t1, 1b - addi t0, 4 + PTR_ADDI t0, 4 #ifdef CONFIG_SYS_MALLOC_F_LEN - addu t0, k0, GD_MALLOC_BASE # gd->malloc_base offset + PTR_ADDU t0, k0, GD_MALLOC_BASE # gd->malloc_base offset sw sp, 0(t0) #endif - la t9, board_init_f + PTR_LA t9, board_init_f jr t9 move ra, zero @@ -188,14 +194,14 @@ relocate_code: move s0, a1 # save gd in s0 move s2, a2 # save destination address in s2 - li t0, CONFIG_SYS_MONITOR_BASE - sub s1, s2, t0 # s1 <-- relocation offset + PTR_LI t0, CONFIG_SYS_MONITOR_BASE + PTR_SUB s1, s2, t0 # s1 <-- relocation offset - la t3, in_ram - lw t2, -12(t3) # t2 <-- __image_copy_end + PTR_LA t3, in_ram + PTR_L t2, -(3 * PTRSIZE)(t3) # t2 <-- __image_copy_end move t1, a2 - add gp, s1 # adjust gp + PTR_ADD gp, s1 # adjust gp /* * t0 = source address @@ -205,26 +211,26 @@ relocate_code: 1: lw t3, 0(t0) sw t3, 0(t1) - addu t0, 4 + PTR_ADDU t0, 4 blt t0, t2, 1b - addu t1, 4 + PTR_ADDU t1, 4 /* If caches were enabled, we would have to flush them here. */ - sub a1, t1, s2 # a1 <-- size - la t9, flush_cache + PTR_SUB a1, t1, s2 # a1 <-- size + PTR_LA t9, flush_cache jalr t9 move a0, s2 # a0 <-- destination address /* Jump to where we've relocated ourselves */ - addi t0, s2, in_ram - _start + PTR_ADDI t0, s2, in_ram - _start jr t0 nop - .word __rel_dyn_end - .word __rel_dyn_start - .word __image_copy_end - .word _GLOBAL_OFFSET_TABLE_ - .word num_got_entries + PTR __rel_dyn_end + PTR __rel_dyn_start + PTR __image_copy_end + PTR _GLOBAL_OFFSET_TABLE_ + PTR num_got_entries in_ram: /* @@ -233,46 +239,46 @@ in_ram: * GOT[0] is reserved. GOT[1] is also reserved for the dynamic object * generated by GNU ld. Skip these reserved entries from relocation. */ - lw t3, -4(t0) # t3 <-- num_got_entries - lw t8, -8(t0) # t8 <-- _GLOBAL_OFFSET_TABLE_ - add t8, s1 # t8 now holds relocated _G_O_T_ - addi t8, t8, 8 # skipping first two entries - li t2, 2 + PTR_L t3, -(1 * PTRSIZE)(t0) # t3 <-- num_got_entries + PTR_L t8, -(2 * PTRSIZE)(t0) # t8 <-- _GLOBAL_OFFSET_TABLE_ + PTR_ADD t8, s1 # t8 now holds relocated _G_O_T_ + PTR_ADDI t8, t8, 2 * PTRSIZE # skipping first two entries + PTR_LI t2, 2 1: - lw t1, 0(t8) + PTR_L t1, 0(t8) beqz t1, 2f - add t1, s1 - sw t1, 0(t8) + PTR_ADD t1, s1 + PTR_S t1, 0(t8) 2: - addi t2, 1 + PTR_ADDI t2, 1 blt t2, t3, 1b - addi t8, 4 + PTR_ADDI t8, PTRSIZE /* Update dynamic relocations */ - lw t1, -16(t0) # t1 <-- __rel_dyn_start - lw t2, -20(t0) # t2 <-- __rel_dyn_end + PTR_L t1, -(4 * PTRSIZE)(t0) # t1 <-- __rel_dyn_start + PTR_L t2, -(5 * PTRSIZE)(t0) # t2 <-- __rel_dyn_end b 2f # skip first reserved entry - addi t1, 8 + PTR_ADDI t1, 2 * PTRSIZE 1: lw t8, -4(t1) # t8 <-- relocation info - li t3, 3 + PTR_LI t3, 3 bne t8, t3, 2f # skip non R_MIPS_REL32 entries nop - lw t3, -8(t1) # t3 <-- location to fix up in FLASH + PTR_L t3, -(2 * PTRSIZE)(t1) # t3 <-- location to fix up in FLASH - lw t8, 0(t3) # t8 <-- original pointer - add t8, s1 # t8 <-- adjusted pointer + PTR_L t8, 0(t3) # t8 <-- original pointer + PTR_ADD t8, s1 # t8 <-- adjusted pointer - add t3, s1 # t3 <-- location to fix up in RAM - sw t8, 0(t3) + PTR_ADD t3, s1 # t3 <-- location to fix up in RAM + PTR_S t8, 0(t3) 2: blt t1, t2, 1b - addi t1, 8 # each rel.dyn entry is 8 bytes + PTR_ADDI t1, 2 * PTRSIZE # each rel.dyn entry is 2*PTRSIZE bytes /* * Clear BSS @@ -280,17 +286,17 @@ in_ram: * GOT is now relocated. Thus __bss_start and __bss_end can be * accessed directly via $gp. */ - la t1, __bss_start # t1 <-- __bss_start - la t2, __bss_end # t2 <-- __bss_end + PTR_LA t1, __bss_start # t1 <-- __bss_start + PTR_LA t2, __bss_end # t2 <-- __bss_end 1: - sw zero, 0(t1) + PTR_S zero, 0(t1) blt t1, t2, 1b - addi t1, 4 + PTR_ADDI t1, PTRSIZE move a0, s0 # a0 <-- gd move a1, s2 - la t9, board_init_r + PTR_LA t9, board_init_r jr t9 move ra, zero |