Print this page
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_kpm.c
+++ new/usr/src/uts/common/vm/seg_kpm.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Kernel Physical Mapping (kpm) segment driver (segkpm).
29 29 *
30 30 * This driver delivers along with the hat_kpm* interfaces an alternative
31 31 * mechanism for kernel mappings within the 64-bit Solaris operating system,
32 32 * which allows the mapping of all physical memory into the kernel address
33 33 * space at once. This is feasible in 64 bit kernels, e.g. for Ultrasparc II
34 34 * and beyond processors, since the available VA range is much larger than
35 35 * possible physical memory. Momentarily all physical memory is supported,
36 36 * that is represented by the list of memory segments (memsegs).
37 37 *
38 38 * Segkpm mappings have also very low overhead and large pages are used
39 39 * (when possible) to minimize the TLB and TSB footprint. It is also
40 40 * extentable for other than Sparc architectures (e.g. AMD64). Main
41 41 * advantage is the avoidance of the TLB-shootdown X-calls, which are
42 42 * normally needed when a kernel (global) mapping has to be removed.
43 43 *
44 44 * First example of a kernel facility that uses the segkpm mapping scheme
45 45 * is seg_map, where it is used as an alternative to hat_memload().
46 46 * See also hat layer for more information about the hat_kpm* routines.
47 47 * The kpm facilty can be turned off at boot time (e.g. /etc/system).
48 48 */
49 49
50 50 #include <sys/types.h>
51 51 #include <sys/param.h>
52 52 #include <sys/sysmacros.h>
53 53 #include <sys/systm.h>
54 54 #include <sys/vnode.h>
55 55 #include <sys/cmn_err.h>
56 56 #include <sys/debug.h>
57 57 #include <sys/thread.h>
58 58 #include <sys/cpuvar.h>
59 59 #include <sys/bitmap.h>
60 60 #include <sys/atomic.h>
61 61 #include <sys/lgrp.h>
62 62
63 63 #include <vm/seg_kmem.h>
64 64 #include <vm/seg_kpm.h>
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/page.h>
69 69
70 70 /*
71 71 * Global kpm controls.
72 72 * See also platform and mmu specific controls.
73 73 *
74 74 * kpm_enable -- global on/off switch for segkpm.
75 75 * . Set by default on 64bit platforms that have kpm support.
76 76 * . Will be disabled from platform layer if not supported.
77 77 * . Can be disabled via /etc/system.
78 78 *
79 79 * kpm_smallpages -- use only regular/system pagesize for kpm mappings.
80 80 * . Can be useful for critical debugging of kpm clients.
81 81 * . Set to zero by default for platforms that support kpm large pages.
82 82 * The use of kpm large pages reduces the footprint of kpm meta data
83 83 * and has all the other advantages of using large pages (e.g TLB
84 84 * miss reduction).
85 85 * . Set by default for platforms that don't support kpm large pages or
86 86 * where large pages cannot be used for other reasons (e.g. there are
87 87 * only few full associative TLB entries available for large pages).
88 88 *
89 89 * segmap_kpm -- separate on/off switch for segmap using segkpm:
90 90 * . Set by default.
91 91 * . Will be disabled when kpm_enable is zero.
92 92 * . Will be disabled when MAXBSIZE != PAGESIZE.
93 93 * . Can be disabled via /etc/system.
94 94 *
95 95 */
96 96 int kpm_enable = 1;
97 97 int kpm_smallpages = 0;
98 98 int segmap_kpm = 1;
99 99
↓ open down ↓ |
99 lines elided |
↑ open up ↑ |
100 100 /*
101 101 * Private seg op routines.
102 102 */
103 103 faultcode_t segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr,
104 104 size_t len, enum fault_type type, enum seg_rw rw);
105 105 static void segkpm_dump(struct seg *);
106 106 static int segkpm_pagelock(struct seg *seg, caddr_t addr, size_t len,
107 107 struct page ***page, enum lock_type type,
108 108 enum seg_rw rw);
109 109
110 -static struct seg_ops segkpm_ops = {
110 +static const struct seg_ops segkpm_ops = {
111 111 .fault = segkpm_fault,
112 112 .dump = segkpm_dump,
113 113 .pagelock = segkpm_pagelock,
114 114 //#ifndef SEGKPM_SUPPORT
115 115 #if 0
116 116 #error FIXME: define nop
117 117 .dup = nop,
118 118 .unmap = nop,
119 119 .free = nop,
120 120 .faulta = nop,
121 121 .setprot = nop,
122 122 .checkprot = nop,
123 123 .kluster = nop,
124 124 .sync = nop,
125 125 .incore = nop,
126 126 .lockop = nop,
127 127 .getprot = nop,
128 128 .getoffset = nop,
129 129 .gettype = nop,
130 130 .getvp = nop,
131 131 .advise = nop,
132 132 .getpolicy = nop,
133 133 #endif
134 134 };
135 135
136 136 /*
137 137 * kpm_pgsz and kpm_pgshft are set by platform layer.
138 138 */
139 139 size_t kpm_pgsz; /* kpm page size */
140 140 uint_t kpm_pgshft; /* kpm page shift */
141 141 u_offset_t kpm_pgoff; /* kpm page offset mask */
142 142 uint_t kpmp2pshft; /* kpm page to page shift */
143 143 pgcnt_t kpmpnpgs; /* how many pages per kpm page */
144 144
145 145
146 146 #ifdef SEGKPM_SUPPORT
147 147
148 148 int
149 149 segkpm_create(struct seg *seg, void *argsp)
150 150 {
151 151 struct segkpm_data *skd;
152 152 struct segkpm_crargs *b = (struct segkpm_crargs *)argsp;
153 153 ushort_t *p;
154 154 int i, j;
155 155
156 156 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
157 157 ASSERT(btokpmp(seg->s_size) >= 1 &&
158 158 kpmpageoff((uintptr_t)seg->s_base) == 0 &&
159 159 kpmpageoff((uintptr_t)seg->s_base + seg->s_size) == 0);
160 160
161 161 skd = kmem_zalloc(sizeof (struct segkpm_data), KM_SLEEP);
162 162
163 163 seg->s_data = (void *)skd;
164 164 seg->s_ops = &segkpm_ops;
165 165 skd->skd_prot = b->prot;
166 166
167 167 /*
168 168 * (1) Segkpm virtual addresses are based on physical adresses.
169 169 * From this and in opposite to other segment drivers it is
170 170 * often required to allocate a page first to be able to
171 171 * calculate the final segkpm virtual address.
172 172 * (2) Page allocation is done by calling page_create_va(),
173 173 * one important input argument is a virtual address (also
174 174 * expressed by the "va" in the function name). This function
175 175 * is highly optimized to select the right page for an optimal
176 176 * processor and platform support (e.g. virtual addressed
177 177 * caches (VAC), physical addressed caches, NUMA).
178 178 *
179 179 * Because of (1) the approach is to generate a faked virtual
180 180 * address for calling page_create_va(). In order to exploit
181 181 * the abilities of (2), especially to utilize the cache
182 182 * hierarchy (3) and to avoid VAC alias conflicts (4) the
183 183 * selection has to be done carefully. For each virtual color
184 184 * a separate counter is provided (4). The count values are
185 185 * used for the utilization of all cache lines (3) and are
186 186 * corresponding to the cache bins.
187 187 */
188 188 skd->skd_nvcolors = b->nvcolors;
189 189
190 190 p = skd->skd_va_select =
191 191 kmem_zalloc(NCPU * b->nvcolors * sizeof (ushort_t), KM_SLEEP);
192 192
193 193 for (i = 0; i < NCPU; i++)
194 194 for (j = 0; j < b->nvcolors; j++, p++)
195 195 *p = j;
196 196
197 197 return (0);
198 198 }
199 199
200 200 /*
201 201 * This routine is called via a machine specific fault handling
202 202 * routine.
203 203 */
204 204 /* ARGSUSED */
205 205 faultcode_t
206 206 segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
207 207 enum fault_type type, enum seg_rw rw)
208 208 {
209 209 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
210 210
211 211 switch (type) {
212 212 case F_INVAL:
213 213 return (hat_kpm_fault(hat, addr));
214 214 case F_SOFTLOCK:
215 215 case F_SOFTUNLOCK:
216 216 return (0);
217 217 default:
218 218 return (FC_NOSUPPORT);
219 219 }
220 220 /*NOTREACHED*/
221 221 }
222 222
223 223 #define addr_to_vcolor(addr, vcolors) \
224 224 ((int)(((uintptr_t)(addr) & ((vcolors << PAGESHIFT) - 1)) >> PAGESHIFT))
225 225
226 226 /*
227 227 * Create a virtual address that can be used for invocations of
228 228 * page_create_va. Goal is to utilize the cache hierarchy (round
229 229 * robin bins) and to select the right color for virtual indexed
230 230 * caches. It isn't exact since we also increment the bin counter
231 231 * when the caller uses VOP_GETPAGE and gets a hit in the page
232 232 * cache, but we keep the bins turning for cache distribution
233 233 * (see also segkpm_create block comment).
234 234 */
235 235 caddr_t
236 236 segkpm_create_va(u_offset_t off)
237 237 {
238 238 int vcolor;
239 239 ushort_t *p;
240 240 struct segkpm_data *skd = (struct segkpm_data *)segkpm->s_data;
241 241 int nvcolors = skd->skd_nvcolors;
242 242 caddr_t va;
243 243
244 244 vcolor = (nvcolors > 1) ? addr_to_vcolor(off, nvcolors) : 0;
245 245 p = &skd->skd_va_select[(CPU->cpu_id * nvcolors) + vcolor];
246 246 va = (caddr_t)ptob(*p);
247 247
248 248 atomic_add_16(p, nvcolors);
249 249
250 250 return (va);
251 251 }
252 252
253 253 /*
254 254 * Unload mapping if the instance has an active kpm mapping.
255 255 */
256 256 void
257 257 segkpm_mapout_validkpme(struct kpme *kpme)
258 258 {
259 259 caddr_t vaddr;
260 260 page_t *pp;
261 261
262 262 retry:
263 263 if ((pp = kpme->kpe_page) == NULL) {
264 264 return;
265 265 }
266 266
267 267 if (page_lock(pp, SE_SHARED, (kmutex_t *)NULL, P_RECLAIM) == 0)
268 268 goto retry;
269 269
270 270 /*
271 271 * Check if segkpm mapping is not unloaded in the meantime
272 272 */
273 273 if (kpme->kpe_page == NULL) {
274 274 page_unlock(pp);
275 275 return;
276 276 }
277 277
278 278 vaddr = hat_kpm_page2va(pp, 1);
279 279 hat_kpm_mapout(pp, kpme, vaddr);
280 280 page_unlock(pp);
281 281 }
282 282
283 283 #else /* SEGKPM_SUPPORT */
284 284
285 285 /* segkpm stubs */
286 286
287 287 /*ARGSUSED*/
288 288 int segkpm_create(struct seg *seg, void *argsp)
289 289 {
290 290 return (0);
291 291 }
292 292
293 293 /* ARGSUSED */
294 294 faultcode_t
295 295 segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
296 296 enum fault_type type, enum seg_rw rw)
297 297 {
298 298 return (0);
299 299 }
300 300
301 301 /* ARGSUSED */
302 302 caddr_t segkpm_create_va(u_offset_t off)
303 303 {
304 304 return (NULL);
305 305 }
306 306
307 307 /* ARGSUSED */
308 308 void segkpm_mapout_validkpme(struct kpme *kpme)
309 309 {
310 310 }
311 311
312 312 #endif /* SEGKPM_SUPPORT */
313 313
314 314 /* ARGSUSED */
315 315 static int
316 316 segkpm_pagelock(struct seg *seg, caddr_t addr, size_t len,
317 317 struct page ***page, enum lock_type type, enum seg_rw rw)
318 318 {
319 319 return (ENOTSUP);
320 320 }
321 321
322 322 /*
323 323 * segkpm pages are not dumped, so we just return
324 324 */
325 325 /*ARGSUSED*/
326 326 static void
327 327 segkpm_dump(struct seg *seg)
328 328 {
329 329 }
↓ open down ↓ |
209 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX