Print this page
armv6: simplify highvecs enabling code
Use the barrel shifter, Luke.
armv6: bcm2835 & qvpb have nearly identical locore _start
It makes sense to common-ize _start for all armv6 machines.  They will all
have to do the same basic setup.  If there is any machine specific setup
they need to do, they can do so in the new _mach_start function.
armv6: bit 2 (0x4) enables the dcache
This fixes a pretty simple typo.  Sadly, this still isn't enough to get
bcm2835 past mutex_enter.

*** 18,35 **** #include <sys/machparam.h> #include <sys/cpu_asm.h> #include "assym.h" ! #if defined(__lint) ! ! #endif /* * Each of the different machines has its own locore.s to take care of getting ! * us into fakebop for the first time. After that, they all return here to a ! * generic locore to take us into mlsetup and then to main forever more. */ /* * External globals */ --- 18,50 ---- #include <sys/machparam.h> #include <sys/cpu_asm.h> #include "assym.h" ! /* ! * Every story needs a beginning. This is ours. ! */ /* * Each of the different machines has its own locore.s to take care of getting ! * the machine specific setup done. Just before jumping into fakebop the ! * first time, we call this machine specific code. ! */ ! ! /* ! * We are in a primordial world here. The loader is going to come along and ! * boot us at _start. As we've started the world, we also need to set up a ! * few things about us, for example our stack pointer. To help us out, it's ! * useful to remember what the loader set up for us: ! * ! * - unaligned access are allowed (A = 0, U = 1) ! * - virtual memory is enabled ! * - we (unix) are mapped right were we want to be ! * - a UART has been enabled & any memory mapped registers have been 1:1 ! * mapped ! * - ATAGs have been updated to tell us what the mappings are ! * - I/D L1 caches have been enabled */ /* * External globals */
*** 42,51 **** --- 57,103 ---- .data .comm t0stack, DEFAULTSTKSZ, 32 .comm t0, 4094, 32 + + /* + * Recall that _start is the traditional entry point for an ELF binary. + */ + ENTRY(_start) + ldr sp, =t0stack + ldr r4, =DEFAULTSTKSZ + add sp, r4 + bic sp, sp, #0xff + + /* + * establish bogus stacks for exceptional CPU states, our exception + * code should never make use of these, and we want loud and violent + * failure should we accidentally try. + */ + cps #(CPU_MODE_UND) + mov sp, #-1 + cps #(CPU_MODE_ABT) + mov sp, #-1 + cps #(CPU_MODE_FIQ) + mov sp, #-1 + cps #(CPU_MODE_IRQ) + mov sp, #-1 + cps #(CPU_MODE_SVC) + + /* Enable highvecs (moves the base of the exception vector) */ + mrc p15, 0, r3, c1, c0, 0 + orr r3, r3, #(1 << 13) + mcr p15, 0, r3, c1, c0, 0 + + /* invoke machine specific setup */ + bl _mach_start + + bl _fakebop_start + SET_SIZE(_start) + + #if defined(__lint) /* ARGSUSED */ void _locore_start(struct boot_syscalls *sysp, struct bootops *bop)
*** 57,67 **** * We got here from _kobj_init() via exitto(). We have a few different * tasks that we need to take care of before we hop into mlsetup and * then main. We're never going back so we shouldn't feel compelled to * preserve any registers. * - * o Enable unaligned access * o Enable our I/D-caches * o Save the boot syscalls and bootops for later * o Set up our stack to be the real stack of t0stack. * o Save t0 as curthread * o Set up a struct REGS for mlsetup --- 109,118 ----
*** 109,128 **** */ ldr r0, =t0 mcr p15, 0, r0, c13, c0, 4 /* ! * Go ahead now and enable unaligned access, the L1 I/D caches. ! * ! * Bit 2 is for the D cache ! * Bit 12 is for the I cache ! * Bit 22 is for unaligned access */ mrc p15, 0, r0, c1, c0, 0 ! orr r0, #0x02 ! orr r0, #0x1000 ! orr r0, #0x400000 mcr p15, 0, r0, c1, c0, 0 /* * mlsetup() takes the struct regs as an argument. main doesn't take * any and should never return. Currently, we have an 8-byte aligned --- 160,174 ---- */ ldr r0, =t0 mcr p15, 0, r0, c13, c0, 4 /* ! * Go ahead now and enable the L1 I/D caches. */ mrc p15, 0, r0, c1, c0, 0 ! orr r0, #0x04 /* D-cache */ ! orr r0, #0x1000 /* I-cache */ mcr p15, 0, r0, c1, c0, 0 /* * mlsetup() takes the struct regs as an argument. main doesn't take * any and should never return. Currently, we have an 8-byte aligned
*** 146,150 **** --- 192,206 ---- SET_SIZE(_locore_start) __return_from_main: .string "main() returned" #endif /* __lint */ + + ENTRY(arm_reg_read) + ldr r0, [r0] + bx lr + SET_SIZE(arm_reg_read) + + ENTRY(arm_reg_write) + str r1, [r0] + bx lr + SET_SIZE(arm_reg_write)