Print this page
unix: enable caches in locore
The loader should really be as simple as possible to be as small as
possible.  It should configure the machine so that unix can make certain
assumptions but it should leave more complex initialization to unix.


  25  */
  26 
  27 /*
  28  * Each of the different machines has its own locore.s to take care of getting
  29  * the machine specific setup done.  Just before jumping into fakebop the
  30  * first time, we call this machine specific code.
  31  */
  32 
  33 /*
  34  * We are in a primordial world here. The loader is going to come along and
  35  * boot us at _start. As we've started the world, we also need to set up a
  36  * few things about us, for example our stack pointer. To help us out, it's
  37  * useful to remember what the loader set up for us:
  38  *
  39  * - unaligned access are allowed (A = 0, U = 1)
  40  * - virtual memory is enabled
  41  *   - we (unix) are mapped right were we want to be
  42  *   - a UART has been enabled & any memory mapped registers have been 1:1
  43  *     mapped
  44  *   - ATAGs have been updated to tell us what the mappings are
  45  * - I/D L1 caches have been enabled
  46  */
  47 
  48         /*
  49          * External globals
  50          */
  51         .globl  _locore_start
  52         .globl  mlsetup
  53         .globl  sysp
  54         .globl  bootops
  55         .globl  bootopsp
  56         .globl  t0
  57 
  58         .data
  59         .comm   t0stack, DEFAULTSTKSZ, 32
  60         .comm   t0, 4094, 32
  61 
  62 
  63 /*
  64  * Recall that _start is the traditional entry point for an ELF binary.
  65  */


  72         /*
  73          * establish bogus stacks for exceptional CPU states, our exception
  74          * code should never make use of these, and we want loud and violent
  75          * failure should we accidentally try.
  76          */
  77         cps     #(CPU_MODE_UND)
  78         mov     sp, #-1
  79         cps     #(CPU_MODE_ABT)
  80         mov     sp, #-1
  81         cps     #(CPU_MODE_FIQ)
  82         mov     sp, #-1
  83         cps     #(CPU_MODE_IRQ)
  84         mov     sp, #-1
  85         cps     #(CPU_MODE_SVC)
  86 
  87         /* Enable highvecs (moves the base of the exception vector) */
  88         mrc     p15, 0, r3, c1, c0, 0
  89         orr     r3, r3, #(1 << 13)
  90         mcr     p15, 0, r3, c1, c0, 0
  91 














  92         /* invoke machine specific setup */
  93         bl      _mach_start
  94 
  95         bl      _fakebop_start
  96         SET_SIZE(_start)
  97 
  98 
  99 #if defined(__lint)
 100 
 101 /* ARGSUSED */
 102 void
 103 _locore_start(struct boot_syscalls *sysp, struct bootops *bop)
 104 {}
 105 
 106 #else   /* __lint */
 107 
 108         /*
 109          * We got here from _kobj_init() via exitto().  We have a few different
 110          * tasks that we need to take care of before we hop into mlsetup and
 111          * then main. We're never going back so we shouldn't feel compelled to
 112          * preserve any registers.
 113          *
 114          *  o Enable our I/D-caches
 115          *  o Save the boot syscalls and bootops for later
 116          *  o Set up our stack to be the real stack of t0stack.
 117          *  o Save t0 as curthread
 118          *  o Set up a struct REGS for mlsetup
 119          *  o Make sure that we're 8 byte aligned for the call
 120          */
 121 
 122         ENTRY(_locore_start)
 123 
 124 
 125         /*
 126          * We've been running in t0stack anyway, up to this point, but
 127          * _locore_start represents what is in effect a fresh start in the
 128          * real kernel -- We'll never return back through here.
 129          *
 130          * So reclaim those few bytes
 131          */
 132         ldr     sp, =t0stack
 133         ldr     r4, =(DEFAULTSTKSZ - REGSIZE)
 134         add     sp, r4


 143         str     r3, [sp, #REGOFF_R3]
 144         mrs     r4, CPSR
 145         str     r4, [sp, #REGOFF_CPSR]
 146 
 147         /*
 148          * Save back the bootops and boot_syscalls.
 149          */
 150         ldr     r2, =sysp
 151         str     r0, [r2]
 152         ldr     r2, =bootops
 153         str     r1, [r2]
 154         ldr     r2, =bootopsp
 155         ldr     r2, [r2]
 156         str     r1, [r2]
 157 
 158         /*
 159          * Set up our curthread pointer
 160          */
 161         ldr     r0, =t0
 162         mcr     p15, 0, r0, c13, c0, 4
 163 
 164         /*
 165          * Go ahead now and enable the L1 I/D caches.  
 166          */
 167         mrc     p15, 0, r0, c1, c0, 0
 168         orr     r0, #0x04       /* D-cache */
 169         orr     r0, #0x1000     /* I-cache */
 170         mcr     p15, 0, r0, c1, c0, 0
 171 
 172         /*
 173          * mlsetup() takes the struct regs as an argument. main doesn't take
 174          * any and should never return. Currently, we have an 8-byte aligned
 175          * stack.  We want to push a zero frame pointer to terminate any
 176          * stack walking, but that would cause us to end up with only a
 177          * 4-byte aligned stack.  So, to keep things nice and correct, we
 178          * push a zero value twice - it's similar to a typical function
 179          * entry:
 180          *      push { r9, lr }
 181          */
 182         mov     r9,#0
 183         push    { r9 }          /* link register */
 184         push    { r9 }          /* frame pointer */
 185         mov     r0, sp
 186         bl      mlsetup
 187         bl      main
 188         /* NOTREACHED */
 189         ldr     r0,=__return_from_main
 190         ldr     r0,[r0]


  25  */
  26 
  27 /*
  28  * Each of the different machines has its own locore.s to take care of getting
  29  * the machine specific setup done.  Just before jumping into fakebop the
  30  * first time, we call this machine specific code.
  31  */
  32 
  33 /*
  34  * We are in a primordial world here. The loader is going to come along and
  35  * boot us at _start. As we've started the world, we also need to set up a
  36  * few things about us, for example our stack pointer. To help us out, it's
  37  * useful to remember what the loader set up for us:
  38  *
  39  * - unaligned access are allowed (A = 0, U = 1)
  40  * - virtual memory is enabled
  41  *   - we (unix) are mapped right were we want to be
  42  *   - a UART has been enabled & any memory mapped registers have been 1:1
  43  *     mapped
  44  *   - ATAGs have been updated to tell us what the mappings are
  45  * - I/D L1 caches have may be disabled
  46  */
  47 
  48         /*
  49          * External globals
  50          */
  51         .globl  _locore_start
  52         .globl  mlsetup
  53         .globl  sysp
  54         .globl  bootops
  55         .globl  bootopsp
  56         .globl  t0
  57 
  58         .data
  59         .comm   t0stack, DEFAULTSTKSZ, 32
  60         .comm   t0, 4094, 32
  61 
  62 
  63 /*
  64  * Recall that _start is the traditional entry point for an ELF binary.
  65  */


  72         /*
  73          * establish bogus stacks for exceptional CPU states, our exception
  74          * code should never make use of these, and we want loud and violent
  75          * failure should we accidentally try.
  76          */
  77         cps     #(CPU_MODE_UND)
  78         mov     sp, #-1
  79         cps     #(CPU_MODE_ABT)
  80         mov     sp, #-1
  81         cps     #(CPU_MODE_FIQ)
  82         mov     sp, #-1
  83         cps     #(CPU_MODE_IRQ)
  84         mov     sp, #-1
  85         cps     #(CPU_MODE_SVC)
  86 
  87         /* Enable highvecs (moves the base of the exception vector) */
  88         mrc     p15, 0, r3, c1, c0, 0
  89         orr     r3, r3, #(1 << 13)
  90         mcr     p15, 0, r3, c1, c0, 0
  91 
  92         /*
  93          * Go ahead now and enable the L1 I/D caches.  (Involves
  94          * invalidating the caches and the TLB.)
  95          */
  96         mov     r4, #0
  97         mov     r5, #0
  98         mcr     p15, 0, r4, c7, c7, 0   /* invalidate caches */
  99         mcr     p15, 0, r4, c8, c7, 0   /* invalidate tlb */
 100         mcr     p15, 0, r5, c7, c10, 4  /* DSB */
 101         mrc     p15, 0, r4, c1, c0, 0
 102         orr     r4, #0x04       /* D-cache */
 103         orr     r4, #0x1000     /* I-cache */
 104         mcr     p15, 0, r4, c1, c0, 0
 105 
 106         /* invoke machine specific setup */
 107         bl      _mach_start
 108 
 109         bl      _fakebop_start
 110         SET_SIZE(_start)
 111 
 112 
 113 #if defined(__lint)
 114 
 115 /* ARGSUSED */
 116 void
 117 _locore_start(struct boot_syscalls *sysp, struct bootops *bop)
 118 {}
 119 
 120 #else   /* __lint */
 121 
 122         /*
 123          * We got here from _kobj_init() via exitto().  We have a few different
 124          * tasks that we need to take care of before we hop into mlsetup and
 125          * then main. We're never going back so we shouldn't feel compelled to
 126          * preserve any registers.
 127          *

 128          *  o Save the boot syscalls and bootops for later
 129          *  o Set up our stack to be the real stack of t0stack.
 130          *  o Save t0 as curthread
 131          *  o Set up a struct REGS for mlsetup
 132          *  o Make sure that we're 8 byte aligned for the call
 133          */
 134 
 135         ENTRY(_locore_start)
 136 
 137 
 138         /*
 139          * We've been running in t0stack anyway, up to this point, but
 140          * _locore_start represents what is in effect a fresh start in the
 141          * real kernel -- We'll never return back through here.
 142          *
 143          * So reclaim those few bytes
 144          */
 145         ldr     sp, =t0stack
 146         ldr     r4, =(DEFAULTSTKSZ - REGSIZE)
 147         add     sp, r4


 156         str     r3, [sp, #REGOFF_R3]
 157         mrs     r4, CPSR
 158         str     r4, [sp, #REGOFF_CPSR]
 159 
 160         /*
 161          * Save back the bootops and boot_syscalls.
 162          */
 163         ldr     r2, =sysp
 164         str     r0, [r2]
 165         ldr     r2, =bootops
 166         str     r1, [r2]
 167         ldr     r2, =bootopsp
 168         ldr     r2, [r2]
 169         str     r1, [r2]
 170 
 171         /*
 172          * Set up our curthread pointer
 173          */
 174         ldr     r0, =t0
 175         mcr     p15, 0, r0, c13, c0, 4








 176 
 177         /*
 178          * mlsetup() takes the struct regs as an argument. main doesn't take
 179          * any and should never return. Currently, we have an 8-byte aligned
 180          * stack.  We want to push a zero frame pointer to terminate any
 181          * stack walking, but that would cause us to end up with only a
 182          * 4-byte aligned stack.  So, to keep things nice and correct, we
 183          * push a zero value twice - it's similar to a typical function
 184          * entry:
 185          *      push { r9, lr }
 186          */
 187         mov     r9,#0
 188         push    { r9 }          /* link register */
 189         push    { r9 }          /* frame pointer */
 190         mov     r0, sp
 191         bl      mlsetup
 192         bl      main
 193         /* NOTREACHED */
 194         ldr     r0,=__return_from_main
 195         ldr     r0,[r0]