Print this page
unix: enable caches in locore
The loader should really be as simple as possible to be as small as
possible. It should configure the machine so that unix can make certain
assumptions but it should leave more complex initialization to unix.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/armv6/ml/glocore.s
+++ new/usr/src/uts/armv6/ml/glocore.s
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright 2013 (c) Joyent, Inc. All rights reserved.
14 14 * Copyright (c) 2015 Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
15 15 */
16 16
17 17 #include <sys/asm_linkage.h>
18 18 #include <sys/machparam.h>
19 19 #include <sys/cpu_asm.h>
20 20
21 21 #include "assym.h"
22 22
23 23 /*
24 24 * Every story needs a beginning. This is ours.
25 25 */
26 26
27 27 /*
28 28 * Each of the different machines has its own locore.s to take care of getting
29 29 * the machine specific setup done. Just before jumping into fakebop the
30 30 * first time, we call this machine specific code.
31 31 */
32 32
33 33 /*
34 34 * We are in a primordial world here. The loader is going to come along and
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
35 35 * boot us at _start. As we've started the world, we also need to set up a
36 36 * few things about us, for example our stack pointer. To help us out, it's
37 37 * useful to remember what the loader set up for us:
38 38 *
39 39 * - unaligned access are allowed (A = 0, U = 1)
40 40 * - virtual memory is enabled
41 41 * - we (unix) are mapped right were we want to be
42 42 * - a UART has been enabled & any memory mapped registers have been 1:1
43 43 * mapped
44 44 * - ATAGs have been updated to tell us what the mappings are
45 - * - I/D L1 caches have been enabled
45 + * - I/D L1 caches have may be disabled
46 46 */
47 47
48 48 /*
49 49 * External globals
50 50 */
51 51 .globl _locore_start
52 52 .globl mlsetup
53 53 .globl sysp
54 54 .globl bootops
55 55 .globl bootopsp
56 56 .globl t0
57 57
58 58 .data
59 59 .comm t0stack, DEFAULTSTKSZ, 32
60 60 .comm t0, 4094, 32
61 61
62 62
63 63 /*
64 64 * Recall that _start is the traditional entry point for an ELF binary.
65 65 */
66 66 ENTRY(_start)
67 67 ldr sp, =t0stack
68 68 ldr r4, =DEFAULTSTKSZ
69 69 add sp, r4
70 70 bic sp, sp, #0xff
71 71
72 72 /*
73 73 * establish bogus stacks for exceptional CPU states, our exception
74 74 * code should never make use of these, and we want loud and violent
75 75 * failure should we accidentally try.
76 76 */
77 77 cps #(CPU_MODE_UND)
78 78 mov sp, #-1
79 79 cps #(CPU_MODE_ABT)
80 80 mov sp, #-1
81 81 cps #(CPU_MODE_FIQ)
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
82 82 mov sp, #-1
83 83 cps #(CPU_MODE_IRQ)
84 84 mov sp, #-1
85 85 cps #(CPU_MODE_SVC)
86 86
87 87 /* Enable highvecs (moves the base of the exception vector) */
88 88 mrc p15, 0, r3, c1, c0, 0
89 89 orr r3, r3, #(1 << 13)
90 90 mcr p15, 0, r3, c1, c0, 0
91 91
92 + /*
93 + * Go ahead now and enable the L1 I/D caches. (Involves
94 + * invalidating the caches and the TLB.)
95 + */
96 + mov r4, #0
97 + mov r5, #0
98 + mcr p15, 0, r4, c7, c7, 0 /* invalidate caches */
99 + mcr p15, 0, r4, c8, c7, 0 /* invalidate tlb */
100 + mcr p15, 0, r5, c7, c10, 4 /* DSB */
101 + mrc p15, 0, r4, c1, c0, 0
102 + orr r4, #0x04 /* D-cache */
103 + orr r4, #0x1000 /* I-cache */
104 + mcr p15, 0, r4, c1, c0, 0
105 +
92 106 /* invoke machine specific setup */
93 107 bl _mach_start
94 108
95 109 bl _fakebop_start
96 110 SET_SIZE(_start)
97 111
98 112
99 113 #if defined(__lint)
100 114
101 115 /* ARGSUSED */
102 116 void
103 117 _locore_start(struct boot_syscalls *sysp, struct bootops *bop)
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
104 118 {}
105 119
106 120 #else /* __lint */
107 121
108 122 /*
109 123 * We got here from _kobj_init() via exitto(). We have a few different
110 124 * tasks that we need to take care of before we hop into mlsetup and
111 125 * then main. We're never going back so we shouldn't feel compelled to
112 126 * preserve any registers.
113 127 *
114 - * o Enable our I/D-caches
115 128 * o Save the boot syscalls and bootops for later
116 129 * o Set up our stack to be the real stack of t0stack.
117 130 * o Save t0 as curthread
118 131 * o Set up a struct REGS for mlsetup
119 132 * o Make sure that we're 8 byte aligned for the call
120 133 */
121 134
122 135 ENTRY(_locore_start)
123 136
124 137
125 138 /*
126 139 * We've been running in t0stack anyway, up to this point, but
127 140 * _locore_start represents what is in effect a fresh start in the
128 141 * real kernel -- We'll never return back through here.
129 142 *
130 143 * So reclaim those few bytes
131 144 */
132 145 ldr sp, =t0stack
133 146 ldr r4, =(DEFAULTSTKSZ - REGSIZE)
134 147 add sp, r4
135 148 bic sp, sp, #0xff
136 149
137 150 /*
138 151 * Save flags and arguments for potential debugging
139 152 */
140 153 str r0, [sp, #REGOFF_R0]
141 154 str r1, [sp, #REGOFF_R1]
142 155 str r2, [sp, #REGOFF_R2]
143 156 str r3, [sp, #REGOFF_R3]
144 157 mrs r4, CPSR
145 158 str r4, [sp, #REGOFF_CPSR]
146 159
147 160 /*
148 161 * Save back the bootops and boot_syscalls.
149 162 */
150 163 ldr r2, =sysp
151 164 str r0, [r2]
152 165 ldr r2, =bootops
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
153 166 str r1, [r2]
154 167 ldr r2, =bootopsp
155 168 ldr r2, [r2]
156 169 str r1, [r2]
157 170
158 171 /*
159 172 * Set up our curthread pointer
160 173 */
161 174 ldr r0, =t0
162 175 mcr p15, 0, r0, c13, c0, 4
163 -
164 - /*
165 - * Go ahead now and enable the L1 I/D caches.
166 - */
167 - mrc p15, 0, r0, c1, c0, 0
168 - orr r0, #0x04 /* D-cache */
169 - orr r0, #0x1000 /* I-cache */
170 - mcr p15, 0, r0, c1, c0, 0
171 176
172 177 /*
173 178 * mlsetup() takes the struct regs as an argument. main doesn't take
174 179 * any and should never return. Currently, we have an 8-byte aligned
175 180 * stack. We want to push a zero frame pointer to terminate any
176 181 * stack walking, but that would cause us to end up with only a
177 182 * 4-byte aligned stack. So, to keep things nice and correct, we
178 183 * push a zero value twice - it's similar to a typical function
179 184 * entry:
180 185 * push { r9, lr }
181 186 */
182 187 mov r9,#0
183 188 push { r9 } /* link register */
184 189 push { r9 } /* frame pointer */
185 190 mov r0, sp
186 191 bl mlsetup
187 192 bl main
188 193 /* NOTREACHED */
189 194 ldr r0,=__return_from_main
190 195 ldr r0,[r0]
191 196 bl panic
192 197 SET_SIZE(_locore_start)
193 198
194 199 __return_from_main:
195 200 .string "main() returned"
196 201 #endif /* __lint */
197 202
198 203 ENTRY(arm_reg_read)
199 204 ldr r0, [r0]
200 205 bx lr
201 206 SET_SIZE(arm_reg_read)
202 207
203 208 ENTRY(arm_reg_write)
204 209 str r1, [r0]
205 210 bx lr
206 211 SET_SIZE(arm_reg_write)
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX