Print this page
armv6: bit 2 (0x4) enables the dcache
This fixes a pretty simple typo. Sadly, this still isn't enough to get
bcm2835 past mutex_enter.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/armv6/ml/glocore.s
+++ new/usr/src/uts/armv6/ml/glocore.s
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright 2013 (c) Joyent, Inc. All rights reserved.
14 14 * Copyright (c) 2015 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
15 15 */
16 16
17 17 #include <sys/asm_linkage.h>
18 18 #include <sys/machparam.h>
19 19 #include <sys/cpu_asm.h>
20 20
21 21 #include "assym.h"
22 22
23 23 #if defined(__lint)
24 24
25 25 #endif
26 26
27 27 /*
28 28 * Each of the different machines has its own locore.s to take care of getting
29 29 * us into fakebop for the first time. After that, they all return here to a
30 30 * generic locore to take us into mlsetup and then to main forever more.
31 31 */
32 32
33 33 /*
34 34 * External globals
35 35 */
36 36 .globl _locore_start
37 37 .globl mlsetup
38 38 .globl sysp
39 39 .globl bootops
40 40 .globl bootopsp
41 41 .globl t0
42 42
43 43 .data
44 44 .comm t0stack, DEFAULTSTKSZ, 32
45 45 .comm t0, 4094, 32
46 46
47 47 #if defined(__lint)
48 48
49 49 /* ARGSUSED */
50 50 void
51 51 _locore_start(struct boot_syscalls *sysp, struct bootops *bop)
↓ open down ↓ |
51 lines elided |
↑ open up ↑ |
52 52 {}
53 53
54 54 #else /* __lint */
55 55
56 56 /*
57 57 * We got here from _kobj_init() via exitto(). We have a few different
58 58 * tasks that we need to take care of before we hop into mlsetup and
59 59 * then main. We're never going back so we shouldn't feel compelled to
60 60 * preserve any registers.
61 61 *
62 - * o Enable unaligned access
63 62 * o Enable our I/D-caches
64 63 * o Save the boot syscalls and bootops for later
65 64 * o Set up our stack to be the real stack of t0stack.
66 65 * o Save t0 as curthread
67 66 * o Set up a struct REGS for mlsetup
68 67 * o Make sure that we're 8 byte aligned for the call
69 68 */
70 69
71 70 ENTRY(_locore_start)
72 71
73 72
74 73 /*
75 74 * We've been running in t0stack anyway, up to this point, but
76 75 * _locore_start represents what is in effect a fresh start in the
77 76 * real kernel -- We'll never return back through here.
78 77 *
79 78 * So reclaim those few bytes
80 79 */
81 80 ldr sp, =t0stack
82 81 ldr r4, =(DEFAULTSTKSZ - REGSIZE)
83 82 add sp, r4
84 83 bic sp, sp, #0xff
85 84
86 85 /*
87 86 * Save flags and arguments for potential debugging
88 87 */
89 88 str r0, [sp, #REGOFF_R0]
90 89 str r1, [sp, #REGOFF_R1]
91 90 str r2, [sp, #REGOFF_R2]
92 91 str r3, [sp, #REGOFF_R3]
93 92 mrs r4, CPSR
94 93 str r4, [sp, #REGOFF_CPSR]
95 94
96 95 /*
97 96 * Save back the bootops and boot_syscalls.
98 97 */
99 98 ldr r2, =sysp
100 99 str r0, [r2]
101 100 ldr r2, =bootops
102 101 str r1, [r2]
103 102 ldr r2, =bootopsp
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
104 103 ldr r2, [r2]
105 104 str r1, [r2]
106 105
107 106 /*
108 107 * Set up our curthread pointer
109 108 */
110 109 ldr r0, =t0
111 110 mcr p15, 0, r0, c13, c0, 4
112 111
113 112 /*
114 - * Go ahead now and enable unaligned access, the L1 I/D caches.
115 - *
116 - * Bit 2 is for the D cache
117 - * Bit 12 is for the I cache
118 - * Bit 22 is for unaligned access
113 + * Go ahead now and enable the L1 I/D caches.
119 114 */
120 115 mrc p15, 0, r0, c1, c0, 0
121 - orr r0, #0x02
122 - orr r0, #0x1000
123 - orr r0, #0x400000
116 + orr r0, #0x04 /* D-cache */
117 + orr r0, #0x1000 /* I-cache */
124 118 mcr p15, 0, r0, c1, c0, 0
125 119
126 120 /*
127 121 * mlsetup() takes the struct regs as an argument. main doesn't take
128 122 * any and should never return. Currently, we have an 8-byte aligned
129 123 * stack. We want to push a zero frame pointer to terminate any
130 124 * stack walking, but that would cause us to end up with only a
131 125 * 4-byte aligned stack. So, to keep things nice and correct, we
132 126 * push a zero value twice - it's similar to a typical function
133 127 * entry:
134 128 * push { r9, lr }
135 129 */
136 130 mov r9,#0
137 131 push { r9 } /* link register */
138 132 push { r9 } /* frame pointer */
139 133 mov r0, sp
140 134 bl mlsetup
141 135 bl main
142 136 /* NOTREACHED */
143 137 ldr r0,=__return_from_main
144 138 ldr r0,[r0]
145 139 bl panic
146 140 SET_SIZE(_locore_start)
147 141
148 142 __return_from_main:
149 143 .string "main() returned"
150 144 #endif /* __lint */
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX