Print this page
armv6: bit 2 (0x4) enables the dcache
This fixes a pretty simple typo. Sadly, this still isn't enough to get
bcm2835 past mutex_enter.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/armv6/ml/cache.s
+++ new/usr/src/uts/armv6/ml/cache.s
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright 2013 Joyent, Inc. All rights reserved.
14 14 */
15 15
16 16 .file "cache.s"
17 17
18 18 /*
19 19 * Cache and memory barrier operations
20 20 */
21 21
22 22 #include <sys/asm_linkage.h>
23 23 #include <sys/atomic_impl.h>
24 24
25 25 #if defined(lint) || defined(__lint)
26 26
27 27 void
28 28 membar_sync(void)
29 29 {}
30 30
31 31 void
32 32 membar_enter(void)
33 33 {}
34 34
35 35 void
36 36 membar_exit(void)
37 37 {}
38 38
39 39 void
40 40 membar_producer(void)
41 41 {}
42 42
43 43 void
44 44 membar_consumer(void)
45 45 {}
46 46
47 47 void
48 48 instr_sbarrier(void)
49 49 {}
50 50
51 51 void
52 52 data_sbarrier(void)
53 53 {}
54 54
55 55 #else /* __lint */
56 56
57 57 /*
58 58 * NOTE: membar_enter, membar_exit, membar_producer, and
59 59 * membar_consumer are identical routines. We define them
60 60 * separately, instead of using ALTENTRY definitions to alias
61 61 * them together, so that DTrace and debuggers will see a unique
62 62 * address for them, allowing more accurate tracing.
63 63 */
64 64 ENTRY(membar_enter)
65 65 ALTENTRY(membar_sync)
66 66 ARM_DMB_INSTR(r0)
67 67 bx lr
68 68 SET_SIZE(membar_sync)
69 69 SET_SIZE(membar_enter)
70 70
71 71 ENTRY(membar_exit)
72 72 ARM_DMB_INSTR(r0)
73 73 bx lr
74 74 SET_SIZE(membar_exit)
75 75
76 76 ENTRY(membar_producer)
77 77 ARM_DMB_INSTR(r0)
78 78 bx lr
79 79 SET_SIZE(membar_producer)
80 80
81 81 ENTRY(membar_consumer)
82 82 ARM_DMB_INSTR(r0)
83 83 bx lr
84 84 SET_SIZE(membar_consumer)
85 85
86 86 ENTRY(instr_sbarrier)
87 87 ARM_ISB_INSTR(r0)
88 88 bx lr
89 89 SET_SIZE(membar_consumer)
90 90
91 91 ENTRY(data_sbarrier)
92 92 ARM_ISB_INSTR(r0)
93 93 bx lr
94 94 SET_SIZE(data_sbarrier)
95 95
96 96 #endif /* __lint */
97 97
98 98 #if defined(lint) || defined(__lint)
99 99
100 100 /* The ARM architecture uses a modified Harvard Architecture which means that we
101 101 * get the joys of fixing up this mess. Primarily this means that when we update
102 102 * data, it gets written to do the data cache. That needs to be flushed to main
103 103 * memory and then the instruction cache needs to be invalidated. This is
104 104 * particularly important for things like krtld and DTrace. While the data cache
105 105 * does write itself out over time, we cannot rely on it having written itself
106 106 * out to the state that we care about by the time that we'd like it to. As
107 107 * such, we need to ensure that it's been flushed out ourselves. This also means
108 108 * that we could accidentally flush a region of the icache that's already
109 109 * updated itself, but that's just what we have to do to keep Von Neumann's
110 110 * spirt and great gift alive.
111 111 *
112 112 * The controllers for the caches have a few different options for invalidation.
113 113 * One may:
114 114 *
115 115 * o Invalidate or flush the entire cache
116 116 * o Invalidate or flush a cache line
117 117 * o Invalidate or flush a cache range
118 118 *
119 119 * We opt to take the third option here for the general case of making sure that
120 120 * text has been synchronized. While the data cache allows us to both invalidate
121 121 * and flush the cache line, we don't currently have a need to do the
122 122 * invalidation.
123 123 *
124 124 * Note that all of these operations should be aligned on an 8-byte boundary.
125 125 * The instructions actually only end up using bits [31:5] of an address.
126 126 * Callers are required to ensure that this is the case.
127 127 */
128 128
129 129 void
130 130 armv6_icache_disable(void)
131 131 {}
132 132
133 133 void
134 134 armv6_icache_enable(void)
135 135 {}
136 136
137 137 void
138 138 armv6_dcache_disable(void)
139 139 {}
140 140
141 141 void
142 142 armv6_dcache_enable(void)
143 143 {}
144 144
145 145 void
146 146 armv6_icache_inval(void)
147 147 {}
148 148
149 149 void
150 150 armv6_dcache_inval(void)
151 151 {}
152 152
153 153 void
154 154 armv6_dcache_flush(void)
155 155 {}
156 156
157 157 void
158 158 armv6_text_flush_range(caddr_t start, size_t len)
159 159 {}
160 160
161 161 void
162 162 armv6_text_flush(void)
163 163 {}
164 164
↓ open down ↓ |
164 lines elided |
↑ open up ↑ |
165 165 #else /* __lint */
166 166
167 167 ENTRY(armv6_icache_enable)
168 168 mrc p15, 0, r0, c1, c0, 0
169 169 orr r0, #0x1000
170 170 mcr p15, 0, r0, c1, c0, 0
171 171 SET_SIZE(armv6_icache_enable)
172 172
173 173 ENTRY(armv6_dcache_enable)
174 174 mrc p15, 0, r0, c1, c0, 0
175 - orr r0, #0x2
175 + orr r0, #0x4
176 176 mcr p15, 0, r0, c1, c0, 0
177 177 SET_SIZE(armv6_dcache_enable)
178 178
179 179 ENTRY(armv6_icache_disable)
180 180 mrc p15, 0, r0, c1, c0, 0
181 181 bic r0, #0x1000
182 182 mcr p15, 0, r0, c1, c0, 0
183 183 SET_SIZE(armv6_icache_disable)
184 184
185 185 ENTRY(armv6_dcache_disable)
186 186 mrc p15, 0, r0, c1, c0, 0
187 - bic r0, #0x2
187 + bic r0, #0x4
188 188 mcr p15, 0, r0, c1, c0, 0
189 189 SET_SIZE(armv6_dcache_disable)
190 190
191 191 ENTRY(armv6_icache_inval)
192 192 mcr p15, 0, r0, c7, c5, 0 @ Invalidate i-cache
193 193 bx lr
194 194 SET_SIZE(armv6_icache_inval)
195 195
196 196 ENTRY(armv6_dcache_inval)
197 197 mcr p15, 0, r0, c7, c6, 0 @ Invalidate d-cache
198 198 ARM_DSB_INSTR(r2)
199 199 bx lr
200 200 SET_SIZE(armv6_dcache_inval)
201 201
202 202 ENTRY(armv6_dcache_flush)
203 203 mcr p15, 0, r0, c7, c10, 4 @ Flush d-cache
204 204 ARM_DSB_INSTR(r2)
205 205 bx lr
206 206 SET_SIZE(armv6_dcache_flush)
207 207
208 208 ENTRY(armv6_text_flush_range)
209 209 add r1, r1, r0
210 210 sub r1, r1, r0
211 211 mcrr p15, 0, r1, r0, c5 @ Invalidate i-cache range
212 212 mcrr p15, 0, r1, r0, c12 @ Flush d-cache range
213 213 ARM_DSB_INSTR(r2)
214 214 ARM_ISB_INSTR(r2)
215 215 bx lr
216 216 SET_SIZE(armv6_text_flush_range)
217 217
218 218 ENTRY(armv6_text_flush)
219 219 mcr p15, 0, r0, c7, c5, 0 @ Invalidate i-cache
220 220 mcr p15, 0, r0, c7, c10, 4 @ Flush d-cache
221 221 ARM_DSB_INSTR(r2)
222 222 ARM_ISB_INSTR(r2)
223 223 bx lr
224 224 SET_SIZE(armv6_text_flush)
225 225
226 226 #endif
227 227
228 228 #ifdef __lint
229 229
230 230 /*
231 231 * Perform all of the operations necessary for tlb maintenance after an update
232 232 * to the page tables.
233 233 */
234 234 void
235 235 armv6_tlb_sync(void)
236 236 {}
237 237
238 238 #else /* __lint */
239 239
240 240 ENTRY(armv6_tlb_sync)
241 241 mov r0, #0
242 242 mcr p15, 0, r0, c7, c10, 4 @ Flush d-cache
243 243 ARM_DSB_INSTR(r0)
244 244 mcr p15, 0, r0, c8, c7, 0 @ invalidate tlb
245 245 mcr p15, 0, r0, c8, c5, 0 @ Invalidate I-cache + btc
246 246 ARM_DSB_INSTR(r0)
247 247 ARM_ISB_INSTR(r0)
248 248 bx lr
249 249 SET_SIZE(armv6_tlb_sync)
250 250
251 251 #endif /* __lint */
↓ open down ↓ |
54 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX