Print this page
armv6: simplify highvecs enabling code
Use the barrel shifter, Luke.
armv6: bcm2835 & qvpb have nearly identical locore _start
It makes sense to common-ize _start for all armv6 machines.  They will all
have to do the same basic setup.  If there is any machine specific setup
they need to do, they can do so in the new _mach_start function.
armv6: bit 2 (0x4) enables the dcache
This fixes a pretty simple typo.  Sadly, this still isn't enough to get
bcm2835 past mutex_enter.

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/armv6/ml/glocore.s
          +++ new/usr/src/uts/armv6/ml/glocore.s
↓ open down ↓ 12 lines elided ↑ open up ↑
  13   13   * Copyright 2013 (c) Joyent, Inc. All rights reserved.
  14   14   * Copyright (c) 2015 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
  15   15   */
  16   16  
  17   17  #include <sys/asm_linkage.h>
  18   18  #include <sys/machparam.h>
  19   19  #include <sys/cpu_asm.h>
  20   20  
  21   21  #include "assym.h"
  22   22  
  23      -#if defined(__lint)
  24      -
  25      -#endif
       23 +/*
       24 + * Every story needs a beginning. This is ours.
       25 + */
  26   26  
  27   27  /*
  28   28   * Each of the different machines has its own locore.s to take care of getting
  29      - * us into fakebop for the first time. After that, they all return here to a
  30      - * generic locore to take us into mlsetup and then to main forever more.
       29 + * the machine specific setup done.  Just before jumping into fakebop the
       30 + * first time, we call this machine specific code.
       31 + */
       32 +
       33 +/*
       34 + * We are in a primordial world here. The loader is going to come along and
       35 + * boot us at _start. As we've started the world, we also need to set up a
       36 + * few things about us, for example our stack pointer. To help us out, it's
       37 + * useful to remember what the loader set up for us:
       38 + *
       39 + * - unaligned access are allowed (A = 0, U = 1)
       40 + * - virtual memory is enabled
       41 + *   - we (unix) are mapped right were we want to be
       42 + *   - a UART has been enabled & any memory mapped registers have been 1:1
       43 + *     mapped
       44 + *   - ATAGs have been updated to tell us what the mappings are
       45 + * - I/D L1 caches have been enabled
  31   46   */
  32   47  
  33   48          /*
  34   49           * External globals
  35   50           */
  36   51          .globl  _locore_start
  37   52          .globl  mlsetup
  38   53          .globl  sysp
  39   54          .globl  bootops
  40   55          .globl  bootopsp
  41   56          .globl  t0
  42   57  
  43   58          .data
  44   59          .comm   t0stack, DEFAULTSTKSZ, 32
  45   60          .comm   t0, 4094, 32
  46   61  
       62 +
       63 +/*
       64 + * Recall that _start is the traditional entry point for an ELF binary.
       65 + */
       66 +        ENTRY(_start)
       67 +        ldr     sp, =t0stack
       68 +        ldr     r4, =DEFAULTSTKSZ
       69 +        add     sp, r4
       70 +        bic     sp, sp, #0xff
       71 +
       72 +        /*
       73 +         * establish bogus stacks for exceptional CPU states, our exception
       74 +         * code should never make use of these, and we want loud and violent
       75 +         * failure should we accidentally try.
       76 +         */
       77 +        cps     #(CPU_MODE_UND)
       78 +        mov     sp, #-1
       79 +        cps     #(CPU_MODE_ABT)
       80 +        mov     sp, #-1
       81 +        cps     #(CPU_MODE_FIQ)
       82 +        mov     sp, #-1
       83 +        cps     #(CPU_MODE_IRQ)
       84 +        mov     sp, #-1
       85 +        cps     #(CPU_MODE_SVC)
       86 +
       87 +        /* Enable highvecs (moves the base of the exception vector) */
       88 +        mrc     p15, 0, r3, c1, c0, 0
       89 +        orr     r3, r3, #(1 << 13)
       90 +        mcr     p15, 0, r3, c1, c0, 0
       91 +
       92 +        /* invoke machine specific setup */
       93 +        bl      _mach_start
       94 +
       95 +        bl      _fakebop_start
       96 +        SET_SIZE(_start)
       97 +
       98 +
  47   99  #if defined(__lint)
  48  100  
  49  101  /* ARGSUSED */
  50  102  void
  51  103  _locore_start(struct boot_syscalls *sysp, struct bootops *bop)
  52  104  {}
  53  105  
  54  106  #else   /* __lint */
  55  107  
  56  108          /*
  57  109           * We got here from _kobj_init() via exitto().  We have a few different
  58  110           * tasks that we need to take care of before we hop into mlsetup and
  59  111           * then main. We're never going back so we shouldn't feel compelled to
  60  112           * preserve any registers.
  61  113           *
  62      -         *  o Enable unaligned access
  63  114           *  o Enable our I/D-caches
  64  115           *  o Save the boot syscalls and bootops for later
  65  116           *  o Set up our stack to be the real stack of t0stack.
  66  117           *  o Save t0 as curthread
  67  118           *  o Set up a struct REGS for mlsetup
  68  119           *  o Make sure that we're 8 byte aligned for the call
  69  120           */
  70  121  
  71  122          ENTRY(_locore_start)
  72  123  
↓ open down ↓ 31 lines elided ↑ open up ↑
 104  155          ldr     r2, [r2]
 105  156          str     r1, [r2]
 106  157  
 107  158          /*
 108  159           * Set up our curthread pointer
 109  160           */
 110  161          ldr     r0, =t0
 111  162          mcr     p15, 0, r0, c13, c0, 4
 112  163  
 113  164          /*
 114      -         * Go ahead now and enable unaligned access, the L1 I/D caches.
 115      -         *
 116      -         * Bit 2 is for the D cache
 117      -         * Bit 12 is for the I cache
 118      -         * Bit 22 is for unaligned access
      165 +         * Go ahead now and enable the L1 I/D caches.  
 119  166           */
 120  167          mrc     p15, 0, r0, c1, c0, 0
 121      -        orr     r0, #0x02
 122      -        orr     r0, #0x1000
 123      -        orr     r0, #0x400000
      168 +        orr     r0, #0x04       /* D-cache */
      169 +        orr     r0, #0x1000     /* I-cache */
 124  170          mcr     p15, 0, r0, c1, c0, 0
 125  171  
 126  172          /*
 127  173           * mlsetup() takes the struct regs as an argument. main doesn't take
 128  174           * any and should never return. Currently, we have an 8-byte aligned
 129  175           * stack.  We want to push a zero frame pointer to terminate any
 130  176           * stack walking, but that would cause us to end up with only a
 131  177           * 4-byte aligned stack.  So, to keep things nice and correct, we
 132  178           * push a zero value twice - it's similar to a typical function
 133  179           * entry:
↓ open down ↓ 7 lines elided ↑ open up ↑
 141  187          bl      main
 142  188          /* NOTREACHED */
 143  189          ldr     r0,=__return_from_main
 144  190          ldr     r0,[r0]
 145  191          bl      panic
 146  192          SET_SIZE(_locore_start)
 147  193  
 148  194  __return_from_main:
 149  195          .string "main() returned"
 150  196  #endif  /* __lint */
      197 +
      198 +        ENTRY(arm_reg_read)
      199 +        ldr r0, [r0]
      200 +        bx lr
      201 +        SET_SIZE(arm_reg_read)
      202 +
      203 +        ENTRY(arm_reg_write)
      204 +        str r1, [r0]
      205 +        bx lr
      206 +        SET_SIZE(arm_reg_write)
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX