Print this page
use NULL getmemid segop as a shorthand for ENODEV
Instead of forcing every segment driver to implement a dummy function to
return (hopefully) ENODEV, handle NULL getmemid segop function pointer as
"return ENODEV" shorthand.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_kpm.c
+++ new/usr/src/uts/common/vm/seg_kpm.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Kernel Physical Mapping (kpm) segment driver (segkpm).
29 29 *
30 30 * This driver delivers along with the hat_kpm* interfaces an alternative
31 31 * mechanism for kernel mappings within the 64-bit Solaris operating system,
32 32 * which allows the mapping of all physical memory into the kernel address
33 33 * space at once. This is feasible in 64 bit kernels, e.g. for Ultrasparc II
34 34 * and beyond processors, since the available VA range is much larger than
35 35 * possible physical memory. Momentarily all physical memory is supported,
36 36 * that is represented by the list of memory segments (memsegs).
37 37 *
38 38 * Segkpm mappings have also very low overhead and large pages are used
39 39 * (when possible) to minimize the TLB and TSB footprint. It is also
40 40 * extentable for other than Sparc architectures (e.g. AMD64). Main
41 41 * advantage is the avoidance of the TLB-shootdown X-calls, which are
42 42 * normally needed when a kernel (global) mapping has to be removed.
43 43 *
44 44 * First example of a kernel facility that uses the segkpm mapping scheme
45 45 * is seg_map, where it is used as an alternative to hat_memload().
46 46 * See also hat layer for more information about the hat_kpm* routines.
47 47 * The kpm facilty can be turned off at boot time (e.g. /etc/system).
48 48 */
49 49
50 50 #include <sys/types.h>
51 51 #include <sys/param.h>
52 52 #include <sys/sysmacros.h>
53 53 #include <sys/systm.h>
54 54 #include <sys/vnode.h>
55 55 #include <sys/cmn_err.h>
56 56 #include <sys/debug.h>
57 57 #include <sys/thread.h>
58 58 #include <sys/cpuvar.h>
59 59 #include <sys/bitmap.h>
60 60 #include <sys/atomic.h>
61 61 #include <sys/lgrp.h>
62 62
63 63 #include <vm/seg_kmem.h>
64 64 #include <vm/seg_kpm.h>
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/page.h>
69 69
70 70 /*
71 71 * Global kpm controls.
72 72 * See also platform and mmu specific controls.
73 73 *
74 74 * kpm_enable -- global on/off switch for segkpm.
75 75 * . Set by default on 64bit platforms that have kpm support.
76 76 * . Will be disabled from platform layer if not supported.
77 77 * . Can be disabled via /etc/system.
78 78 *
79 79 * kpm_smallpages -- use only regular/system pagesize for kpm mappings.
80 80 * . Can be useful for critical debugging of kpm clients.
81 81 * . Set to zero by default for platforms that support kpm large pages.
82 82 * The use of kpm large pages reduces the footprint of kpm meta data
83 83 * and has all the other advantages of using large pages (e.g TLB
84 84 * miss reduction).
85 85 * . Set by default for platforms that don't support kpm large pages or
86 86 * where large pages cannot be used for other reasons (e.g. there are
87 87 * only few full associative TLB entries available for large pages).
88 88 *
89 89 * segmap_kpm -- separate on/off switch for segmap using segkpm:
90 90 * . Set by default.
91 91 * . Will be disabled when kpm_enable is zero.
92 92 * . Will be disabled when MAXBSIZE != PAGESIZE.
93 93 * . Can be disabled via /etc/system.
94 94 *
95 95 */
96 96 int kpm_enable = 1;
97 97 int kpm_smallpages = 0;
98 98 int segmap_kpm = 1;
99 99
100 100 /*
101 101 * Private seg op routines.
102 102 */
103 103 faultcode_t segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr,
104 104 size_t len, enum fault_type type, enum seg_rw rw);
105 105 static void segkpm_dump(struct seg *);
106 106 static int segkpm_pagelock(struct seg *seg, caddr_t addr, size_t len,
107 107 struct page ***page, enum lock_type type,
108 108 enum seg_rw rw);
109 109
110 110 static struct seg_ops segkpm_ops = {
111 111 .fault = segkpm_fault,
112 112 .dump = segkpm_dump,
113 113 .pagelock = segkpm_pagelock,
114 114 //#ifndef SEGKPM_SUPPORT
115 115 #if 0
116 116 #error FIXME: define nop
117 117 .dup = nop,
118 118 .unmap = nop,
119 119 .free = nop,
120 120 .faulta = nop,
121 121 .setprot = nop,
122 122 .checkprot = nop,
↓ open down ↓ |
122 lines elided |
↑ open up ↑ |
123 123 .kluster = nop,
124 124 .sync = nop,
125 125 .incore = nop,
126 126 .lockop = nop,
127 127 .getprot = nop,
128 128 .getoffset = nop,
129 129 .gettype = nop,
130 130 .getvp = nop,
131 131 .advise = nop,
132 132 .setpagesize = nop,
133 - .getmemid = nop,
134 133 .getpolicy = nop,
135 134 #endif
136 135 };
137 136
138 137 /*
139 138 * kpm_pgsz and kpm_pgshft are set by platform layer.
140 139 */
141 140 size_t kpm_pgsz; /* kpm page size */
142 141 uint_t kpm_pgshft; /* kpm page shift */
143 142 u_offset_t kpm_pgoff; /* kpm page offset mask */
144 143 uint_t kpmp2pshft; /* kpm page to page shift */
145 144 pgcnt_t kpmpnpgs; /* how many pages per kpm page */
146 145
147 146
148 147 #ifdef SEGKPM_SUPPORT
149 148
150 149 int
151 150 segkpm_create(struct seg *seg, void *argsp)
152 151 {
153 152 struct segkpm_data *skd;
154 153 struct segkpm_crargs *b = (struct segkpm_crargs *)argsp;
155 154 ushort_t *p;
156 155 int i, j;
157 156
158 157 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock));
159 158 ASSERT(btokpmp(seg->s_size) >= 1 &&
160 159 kpmpageoff((uintptr_t)seg->s_base) == 0 &&
161 160 kpmpageoff((uintptr_t)seg->s_base + seg->s_size) == 0);
162 161
163 162 skd = kmem_zalloc(sizeof (struct segkpm_data), KM_SLEEP);
164 163
165 164 seg->s_data = (void *)skd;
166 165 seg->s_ops = &segkpm_ops;
167 166 skd->skd_prot = b->prot;
168 167
169 168 /*
170 169 * (1) Segkpm virtual addresses are based on physical adresses.
171 170 * From this and in opposite to other segment drivers it is
172 171 * often required to allocate a page first to be able to
173 172 * calculate the final segkpm virtual address.
174 173 * (2) Page allocation is done by calling page_create_va(),
175 174 * one important input argument is a virtual address (also
176 175 * expressed by the "va" in the function name). This function
177 176 * is highly optimized to select the right page for an optimal
178 177 * processor and platform support (e.g. virtual addressed
179 178 * caches (VAC), physical addressed caches, NUMA).
180 179 *
181 180 * Because of (1) the approach is to generate a faked virtual
182 181 * address for calling page_create_va(). In order to exploit
183 182 * the abilities of (2), especially to utilize the cache
184 183 * hierarchy (3) and to avoid VAC alias conflicts (4) the
185 184 * selection has to be done carefully. For each virtual color
186 185 * a separate counter is provided (4). The count values are
187 186 * used for the utilization of all cache lines (3) and are
188 187 * corresponding to the cache bins.
189 188 */
190 189 skd->skd_nvcolors = b->nvcolors;
191 190
192 191 p = skd->skd_va_select =
193 192 kmem_zalloc(NCPU * b->nvcolors * sizeof (ushort_t), KM_SLEEP);
194 193
195 194 for (i = 0; i < NCPU; i++)
196 195 for (j = 0; j < b->nvcolors; j++, p++)
197 196 *p = j;
198 197
199 198 return (0);
200 199 }
201 200
202 201 /*
203 202 * This routine is called via a machine specific fault handling
204 203 * routine.
205 204 */
206 205 /* ARGSUSED */
207 206 faultcode_t
208 207 segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
209 208 enum fault_type type, enum seg_rw rw)
210 209 {
211 210 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
212 211
213 212 switch (type) {
214 213 case F_INVAL:
215 214 return (hat_kpm_fault(hat, addr));
216 215 case F_SOFTLOCK:
217 216 case F_SOFTUNLOCK:
218 217 return (0);
219 218 default:
220 219 return (FC_NOSUPPORT);
221 220 }
222 221 /*NOTREACHED*/
223 222 }
224 223
225 224 #define addr_to_vcolor(addr, vcolors) \
226 225 ((int)(((uintptr_t)(addr) & ((vcolors << PAGESHIFT) - 1)) >> PAGESHIFT))
227 226
228 227 /*
229 228 * Create a virtual address that can be used for invocations of
230 229 * page_create_va. Goal is to utilize the cache hierarchy (round
231 230 * robin bins) and to select the right color for virtual indexed
232 231 * caches. It isn't exact since we also increment the bin counter
233 232 * when the caller uses VOP_GETPAGE and gets a hit in the page
234 233 * cache, but we keep the bins turning for cache distribution
235 234 * (see also segkpm_create block comment).
236 235 */
237 236 caddr_t
238 237 segkpm_create_va(u_offset_t off)
239 238 {
240 239 int vcolor;
241 240 ushort_t *p;
242 241 struct segkpm_data *skd = (struct segkpm_data *)segkpm->s_data;
243 242 int nvcolors = skd->skd_nvcolors;
244 243 caddr_t va;
245 244
246 245 vcolor = (nvcolors > 1) ? addr_to_vcolor(off, nvcolors) : 0;
247 246 p = &skd->skd_va_select[(CPU->cpu_id * nvcolors) + vcolor];
248 247 va = (caddr_t)ptob(*p);
249 248
250 249 atomic_add_16(p, nvcolors);
251 250
252 251 return (va);
253 252 }
254 253
255 254 /*
256 255 * Unload mapping if the instance has an active kpm mapping.
257 256 */
258 257 void
259 258 segkpm_mapout_validkpme(struct kpme *kpme)
260 259 {
261 260 caddr_t vaddr;
262 261 page_t *pp;
263 262
264 263 retry:
265 264 if ((pp = kpme->kpe_page) == NULL) {
266 265 return;
267 266 }
268 267
269 268 if (page_lock(pp, SE_SHARED, (kmutex_t *)NULL, P_RECLAIM) == 0)
270 269 goto retry;
271 270
272 271 /*
273 272 * Check if segkpm mapping is not unloaded in the meantime
274 273 */
275 274 if (kpme->kpe_page == NULL) {
276 275 page_unlock(pp);
277 276 return;
278 277 }
279 278
280 279 vaddr = hat_kpm_page2va(pp, 1);
281 280 hat_kpm_mapout(pp, kpme, vaddr);
282 281 page_unlock(pp);
283 282 }
284 283
285 284 #else /* SEGKPM_SUPPORT */
286 285
287 286 /* segkpm stubs */
288 287
289 288 /*ARGSUSED*/
290 289 int segkpm_create(struct seg *seg, void *argsp)
291 290 {
292 291 return (0);
293 292 }
294 293
295 294 /* ARGSUSED */
296 295 faultcode_t
297 296 segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
298 297 enum fault_type type, enum seg_rw rw)
299 298 {
300 299 return (0);
301 300 }
302 301
303 302 /* ARGSUSED */
304 303 caddr_t segkpm_create_va(u_offset_t off)
305 304 {
306 305 return (NULL);
307 306 }
308 307
309 308 /* ARGSUSED */
310 309 void segkpm_mapout_validkpme(struct kpme *kpme)
311 310 {
312 311 }
313 312
314 313 #endif /* SEGKPM_SUPPORT */
315 314
316 315 /* ARGSUSED */
317 316 static int
318 317 segkpm_pagelock(struct seg *seg, caddr_t addr, size_t len,
319 318 struct page ***page, enum lock_type type, enum seg_rw rw)
320 319 {
321 320 return (ENOTSUP);
322 321 }
323 322
324 323 /*
325 324 * segkpm pages are not dumped, so we just return
326 325 */
327 326 /*ARGSUSED*/
328 327 static void
329 328 segkpm_dump(struct seg *seg)
330 329 {
331 330 }
↓ open down ↓ |
188 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX