Print this page
armv6: p15 cache functions say that value passed in should be zero
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/armv6/ml/cache.s
+++ new/usr/src/uts/armv6/ml/cache.s
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright 2013 Joyent, Inc. All rights reserved.
14 14 */
15 15
16 16 .file "cache.s"
17 17
18 18 /*
19 19 * Cache and memory barrier operations
20 20 */
21 21
22 22 #include <sys/asm_linkage.h>
23 23 #include <sys/atomic_impl.h>
24 24
25 25 #if defined(lint) || defined(__lint)
26 26
27 27 void
28 28 membar_sync(void)
29 29 {}
30 30
31 31 void
32 32 membar_enter(void)
33 33 {}
34 34
35 35 void
36 36 membar_exit(void)
37 37 {}
38 38
39 39 void
40 40 membar_producer(void)
41 41 {}
42 42
43 43 void
44 44 membar_consumer(void)
45 45 {}
46 46
47 47 void
48 48 instr_sbarrier(void)
49 49 {}
50 50
51 51 void
52 52 data_sbarrier(void)
53 53 {}
54 54
55 55 #else /* __lint */
56 56
57 57 /*
58 58 * NOTE: membar_enter, membar_exit, membar_producer, and
59 59 * membar_consumer are identical routines. We define them
60 60 * separately, instead of using ALTENTRY definitions to alias
61 61 * them together, so that DTrace and debuggers will see a unique
62 62 * address for them, allowing more accurate tracing.
63 63 */
64 64 ENTRY(membar_enter)
65 65 ALTENTRY(membar_sync)
66 66 ARM_DMB_INSTR(r0)
67 67 bx lr
68 68 SET_SIZE(membar_sync)
69 69 SET_SIZE(membar_enter)
70 70
71 71 ENTRY(membar_exit)
72 72 ARM_DMB_INSTR(r0)
73 73 bx lr
74 74 SET_SIZE(membar_exit)
75 75
76 76 ENTRY(membar_producer)
77 77 ARM_DMB_INSTR(r0)
78 78 bx lr
79 79 SET_SIZE(membar_producer)
80 80
81 81 ENTRY(membar_consumer)
82 82 ARM_DMB_INSTR(r0)
83 83 bx lr
84 84 SET_SIZE(membar_consumer)
85 85
86 86 ENTRY(instr_sbarrier)
87 87 ARM_ISB_INSTR(r0)
88 88 bx lr
89 89 SET_SIZE(membar_consumer)
90 90
91 91 ENTRY(data_sbarrier)
92 92 ARM_ISB_INSTR(r0)
93 93 bx lr
94 94 SET_SIZE(data_sbarrier)
95 95
96 96 #endif /* __lint */
97 97
98 98 #if defined(lint) || defined(__lint)
99 99
100 100 /* The ARM architecture uses a modified Harvard Architecture which means that we
101 101 * get the joys of fixing up this mess. Primarily this means that when we update
102 102 * data, it gets written to do the data cache. That needs to be flushed to main
103 103 * memory and then the instruction cache needs to be invalidated. This is
104 104 * particularly important for things like krtld and DTrace. While the data cache
105 105 * does write itself out over time, we cannot rely on it having written itself
106 106 * out to the state that we care about by the time that we'd like it to. As
107 107 * such, we need to ensure that it's been flushed out ourselves. This also means
108 108 * that we could accidentally flush a region of the icache that's already
109 109 * updated itself, but that's just what we have to do to keep Von Neumann's
110 110 * spirt and great gift alive.
111 111 *
112 112 * The controllers for the caches have a few different options for invalidation.
113 113 * One may:
114 114 *
115 115 * o Invalidate or flush the entire cache
116 116 * o Invalidate or flush a cache line
117 117 * o Invalidate or flush a cache range
118 118 *
119 119 * We opt to take the third option here for the general case of making sure that
120 120 * text has been synchronized. While the data cache allows us to both invalidate
121 121 * and flush the cache line, we don't currently have a need to do the
122 122 * invalidation.
123 123 *
124 124 * Note that all of these operations should be aligned on an 8-byte boundary.
125 125 * The instructions actually only end up using bits [31:5] of an address.
126 126 * Callers are required to ensure that this is the case.
127 127 */
128 128
129 129 void
130 130 armv6_icache_disable(void)
131 131 {}
132 132
133 133 void
134 134 armv6_icache_enable(void)
135 135 {}
136 136
137 137 void
138 138 armv6_dcache_disable(void)
139 139 {}
140 140
141 141 void
142 142 armv6_dcache_enable(void)
143 143 {}
144 144
145 145 void
146 146 armv6_icache_inval(void)
147 147 {}
148 148
149 149 void
150 150 armv6_dcache_inval(void)
151 151 {}
152 152
153 153 void
154 154 armv6_dcache_flush(void)
155 155 {}
156 156
157 157 void
158 158 armv6_text_flush_range(caddr_t start, size_t len)
159 159 {}
160 160
161 161 void
162 162 armv6_text_flush(void)
163 163 {}
164 164
165 165 #else /* __lint */
166 166
167 167 ENTRY(armv6_icache_enable)
168 168 mrc p15, 0, r0, c1, c0, 0
169 169 orr r0, #0x1000
170 170 mcr p15, 0, r0, c1, c0, 0
171 171 SET_SIZE(armv6_icache_enable)
172 172
173 173 ENTRY(armv6_dcache_enable)
174 174 mrc p15, 0, r0, c1, c0, 0
175 175 orr r0, #0x4
176 176 mcr p15, 0, r0, c1, c0, 0
177 177 SET_SIZE(armv6_dcache_enable)
178 178
179 179 ENTRY(armv6_icache_disable)
180 180 mrc p15, 0, r0, c1, c0, 0
181 181 bic r0, #0x1000
↓ open down ↓ |
181 lines elided |
↑ open up ↑ |
182 182 mcr p15, 0, r0, c1, c0, 0
183 183 SET_SIZE(armv6_icache_disable)
184 184
185 185 ENTRY(armv6_dcache_disable)
186 186 mrc p15, 0, r0, c1, c0, 0
187 187 bic r0, #0x4
188 188 mcr p15, 0, r0, c1, c0, 0
189 189 SET_SIZE(armv6_dcache_disable)
190 190
191 191 ENTRY(armv6_icache_inval)
192 + mov r0, #0
192 193 mcr p15, 0, r0, c7, c5, 0 @ Invalidate i-cache
193 194 bx lr
194 195 SET_SIZE(armv6_icache_inval)
195 196
196 197 ENTRY(armv6_dcache_inval)
198 + mov r0, #0
197 199 mcr p15, 0, r0, c7, c6, 0 @ Invalidate d-cache
198 200 ARM_DSB_INSTR(r2)
199 201 bx lr
200 202 SET_SIZE(armv6_dcache_inval)
201 203
202 204 ENTRY(armv6_dcache_flush)
205 + mov r0, #0
203 206 mcr p15, 0, r0, c7, c10, 4 @ Flush d-cache
204 207 ARM_DSB_INSTR(r2)
205 208 bx lr
206 209 SET_SIZE(armv6_dcache_flush)
207 210
208 211 ENTRY(armv6_text_flush_range)
209 212 add r1, r1, r0
210 213 sub r1, r1, r0
211 214 mcrr p15, 0, r1, r0, c5 @ Invalidate i-cache range
212 215 mcrr p15, 0, r1, r0, c12 @ Flush d-cache range
213 216 ARM_DSB_INSTR(r2)
214 217 ARM_ISB_INSTR(r2)
215 218 bx lr
216 219 SET_SIZE(armv6_text_flush_range)
217 220
218 221 ENTRY(armv6_text_flush)
222 + mov r0, #0
219 223 mcr p15, 0, r0, c7, c5, 0 @ Invalidate i-cache
220 224 mcr p15, 0, r0, c7, c10, 4 @ Flush d-cache
221 225 ARM_DSB_INSTR(r2)
222 226 ARM_ISB_INSTR(r2)
223 227 bx lr
224 228 SET_SIZE(armv6_text_flush)
225 229
226 230 #endif
227 231
228 232 #ifdef __lint
229 233
230 234 /*
231 235 * Perform all of the operations necessary for tlb maintenance after an update
232 236 * to the page tables.
233 237 */
234 238 void
235 239 armv6_tlb_sync(void)
236 240 {}
237 241
238 242 #else /* __lint */
239 243
240 244 ENTRY(armv6_tlb_sync)
241 245 mov r0, #0
242 246 mcr p15, 0, r0, c7, c10, 4 @ Flush d-cache
243 247 ARM_DSB_INSTR(r0)
244 248 mcr p15, 0, r0, c8, c7, 0 @ invalidate tlb
245 249 mcr p15, 0, r0, c8, c5, 0 @ Invalidate I-cache + btc
246 250 ARM_DSB_INSTR(r0)
247 251 ARM_ISB_INSTR(r0)
248 252 bx lr
249 253 SET_SIZE(armv6_tlb_sync)
250 254
251 255 #endif /* __lint */
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX