1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License, Version 1.0 only 6 * (the "License"). You may not use this file except in compliance 7 * with the License. 8 * 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10 * or http://www.opensolaris.org/os/licensing. 11 * See the License for the specific language governing permissions 12 * and limitations under the License. 13 * 14 * When distributing Covered Code, include this CDDL HEADER in each 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16 * If applicable, add the following below this CDDL HEADER, with the 17 * fields enclosed by brackets "[]" replaced with your own identifying 18 * information: Portions Copyright [yyyy] [name of copyright owner] 19 * 20 * CDDL HEADER END 21 */ 22 /* 23 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 /* 28 * Kernel Physical Mapping (kpm) segment driver (segkpm). 29 * 30 * This driver delivers along with the hat_kpm* interfaces an alternative 31 * mechanism for kernel mappings within the 64-bit Solaris operating system, 32 * which allows the mapping of all physical memory into the kernel address 33 * space at once. This is feasible in 64 bit kernels, e.g. for Ultrasparc II 34 * and beyond processors, since the available VA range is much larger than 35 * possible physical memory. Momentarily all physical memory is supported, 36 * that is represented by the list of memory segments (memsegs). 37 * 38 * Segkpm mappings have also very low overhead and large pages are used 39 * (when possible) to minimize the TLB and TSB footprint. It is also 40 * extentable for other than Sparc architectures (e.g. AMD64). Main 41 * advantage is the avoidance of the TLB-shootdown X-calls, which are 42 * normally needed when a kernel (global) mapping has to be removed. 43 * 44 * First example of a kernel facility that uses the segkpm mapping scheme 45 * is seg_map, where it is used as an alternative to hat_memload(). 46 * See also hat layer for more information about the hat_kpm* routines. 47 * The kpm facilty can be turned off at boot time (e.g. /etc/system). 48 */ 49 50 #include <sys/types.h> 51 #include <sys/param.h> 52 #include <sys/sysmacros.h> 53 #include <sys/systm.h> 54 #include <sys/vnode.h> 55 #include <sys/cmn_err.h> 56 #include <sys/debug.h> 57 #include <sys/thread.h> 58 #include <sys/cpuvar.h> 59 #include <sys/bitmap.h> 60 #include <sys/atomic.h> 61 #include <sys/lgrp.h> 62 63 #include <vm/seg_kmem.h> 64 #include <vm/seg_kpm.h> 65 #include <vm/hat.h> 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/page.h> 69 70 /* 71 * Global kpm controls. 72 * See also platform and mmu specific controls. 73 * 74 * kpm_enable -- global on/off switch for segkpm. 75 * . Set by default on 64bit platforms that have kpm support. 76 * . Will be disabled from platform layer if not supported. 77 * . Can be disabled via /etc/system. 78 * 79 * kpm_smallpages -- use only regular/system pagesize for kpm mappings. 80 * . Can be useful for critical debugging of kpm clients. 81 * . Set to zero by default for platforms that support kpm large pages. 82 * The use of kpm large pages reduces the footprint of kpm meta data 83 * and has all the other advantages of using large pages (e.g TLB 84 * miss reduction). 85 * . Set by default for platforms that don't support kpm large pages or 86 * where large pages cannot be used for other reasons (e.g. there are 87 * only few full associative TLB entries available for large pages). 88 * 89 * segmap_kpm -- separate on/off switch for segmap using segkpm: 90 * . Set by default. 91 * . Will be disabled when kpm_enable is zero. 92 * . Will be disabled when MAXBSIZE != PAGESIZE. 93 * . Can be disabled via /etc/system. 94 * 95 */ 96 int kpm_enable = 1; 97 int kpm_smallpages = 0; 98 int segmap_kpm = 1; 99 100 /* 101 * Private seg op routines. 102 */ 103 faultcode_t segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, 104 size_t len, enum fault_type type, enum seg_rw rw); 105 static void segkpm_dump(struct seg *); 106 static int segkpm_pagelock(struct seg *seg, caddr_t addr, size_t len, 107 struct page ***page, enum lock_type type, 108 enum seg_rw rw); 109 110 static struct seg_ops segkpm_ops = { 111 .fault = segkpm_fault, 112 .dump = segkpm_dump, 113 .pagelock = segkpm_pagelock, 114 //#ifndef SEGKPM_SUPPORT 115 #if 0 116 #error FIXME: define nop 117 .dup = nop, 118 .unmap = nop, 119 .free = nop, 120 .faulta = nop, 121 .setprot = nop, 122 .checkprot = nop, 123 .kluster = nop, 124 .sync = nop, 125 .incore = nop, 126 .lockop = nop, 127 .getprot = nop, 128 .getoffset = nop, 129 .gettype = nop, 130 .getvp = nop, 131 .advise = nop, 132 .setpagesize = nop, 133 .getpolicy = nop, 134 #endif 135 }; 136 137 /* 138 * kpm_pgsz and kpm_pgshft are set by platform layer. 139 */ 140 size_t kpm_pgsz; /* kpm page size */ 141 uint_t kpm_pgshft; /* kpm page shift */ 142 u_offset_t kpm_pgoff; /* kpm page offset mask */ 143 uint_t kpmp2pshft; /* kpm page to page shift */ 144 pgcnt_t kpmpnpgs; /* how many pages per kpm page */ 145 146 147 #ifdef SEGKPM_SUPPORT 148 149 int 150 segkpm_create(struct seg *seg, void *argsp) 151 { 152 struct segkpm_data *skd; 153 struct segkpm_crargs *b = (struct segkpm_crargs *)argsp; 154 ushort_t *p; 155 int i, j; 156 157 ASSERT(seg->s_as && RW_WRITE_HELD(&seg->s_as->a_lock)); 158 ASSERT(btokpmp(seg->s_size) >= 1 && 159 kpmpageoff((uintptr_t)seg->s_base) == 0 && 160 kpmpageoff((uintptr_t)seg->s_base + seg->s_size) == 0); 161 162 skd = kmem_zalloc(sizeof (struct segkpm_data), KM_SLEEP); 163 164 seg->s_data = (void *)skd; 165 seg->s_ops = &segkpm_ops; 166 skd->skd_prot = b->prot; 167 168 /* 169 * (1) Segkpm virtual addresses are based on physical adresses. 170 * From this and in opposite to other segment drivers it is 171 * often required to allocate a page first to be able to 172 * calculate the final segkpm virtual address. 173 * (2) Page allocation is done by calling page_create_va(), 174 * one important input argument is a virtual address (also 175 * expressed by the "va" in the function name). This function 176 * is highly optimized to select the right page for an optimal 177 * processor and platform support (e.g. virtual addressed 178 * caches (VAC), physical addressed caches, NUMA). 179 * 180 * Because of (1) the approach is to generate a faked virtual 181 * address for calling page_create_va(). In order to exploit 182 * the abilities of (2), especially to utilize the cache 183 * hierarchy (3) and to avoid VAC alias conflicts (4) the 184 * selection has to be done carefully. For each virtual color 185 * a separate counter is provided (4). The count values are 186 * used for the utilization of all cache lines (3) and are 187 * corresponding to the cache bins. 188 */ 189 skd->skd_nvcolors = b->nvcolors; 190 191 p = skd->skd_va_select = 192 kmem_zalloc(NCPU * b->nvcolors * sizeof (ushort_t), KM_SLEEP); 193 194 for (i = 0; i < NCPU; i++) 195 for (j = 0; j < b->nvcolors; j++, p++) 196 *p = j; 197 198 return (0); 199 } 200 201 /* 202 * This routine is called via a machine specific fault handling 203 * routine. 204 */ 205 /* ARGSUSED */ 206 faultcode_t 207 segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 208 enum fault_type type, enum seg_rw rw) 209 { 210 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 211 212 switch (type) { 213 case F_INVAL: 214 return (hat_kpm_fault(hat, addr)); 215 case F_SOFTLOCK: 216 case F_SOFTUNLOCK: 217 return (0); 218 default: 219 return (FC_NOSUPPORT); 220 } 221 /*NOTREACHED*/ 222 } 223 224 #define addr_to_vcolor(addr, vcolors) \ 225 ((int)(((uintptr_t)(addr) & ((vcolors << PAGESHIFT) - 1)) >> PAGESHIFT)) 226 227 /* 228 * Create a virtual address that can be used for invocations of 229 * page_create_va. Goal is to utilize the cache hierarchy (round 230 * robin bins) and to select the right color for virtual indexed 231 * caches. It isn't exact since we also increment the bin counter 232 * when the caller uses VOP_GETPAGE and gets a hit in the page 233 * cache, but we keep the bins turning for cache distribution 234 * (see also segkpm_create block comment). 235 */ 236 caddr_t 237 segkpm_create_va(u_offset_t off) 238 { 239 int vcolor; 240 ushort_t *p; 241 struct segkpm_data *skd = (struct segkpm_data *)segkpm->s_data; 242 int nvcolors = skd->skd_nvcolors; 243 caddr_t va; 244 245 vcolor = (nvcolors > 1) ? addr_to_vcolor(off, nvcolors) : 0; 246 p = &skd->skd_va_select[(CPU->cpu_id * nvcolors) + vcolor]; 247 va = (caddr_t)ptob(*p); 248 249 atomic_add_16(p, nvcolors); 250 251 return (va); 252 } 253 254 /* 255 * Unload mapping if the instance has an active kpm mapping. 256 */ 257 void 258 segkpm_mapout_validkpme(struct kpme *kpme) 259 { 260 caddr_t vaddr; 261 page_t *pp; 262 263 retry: 264 if ((pp = kpme->kpe_page) == NULL) { 265 return; 266 } 267 268 if (page_lock(pp, SE_SHARED, (kmutex_t *)NULL, P_RECLAIM) == 0) 269 goto retry; 270 271 /* 272 * Check if segkpm mapping is not unloaded in the meantime 273 */ 274 if (kpme->kpe_page == NULL) { 275 page_unlock(pp); 276 return; 277 } 278 279 vaddr = hat_kpm_page2va(pp, 1); 280 hat_kpm_mapout(pp, kpme, vaddr); 281 page_unlock(pp); 282 } 283 284 #else /* SEGKPM_SUPPORT */ 285 286 /* segkpm stubs */ 287 288 /*ARGSUSED*/ 289 int segkpm_create(struct seg *seg, void *argsp) 290 { 291 return (0); 292 } 293 294 /* ARGSUSED */ 295 faultcode_t 296 segkpm_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 297 enum fault_type type, enum seg_rw rw) 298 { 299 return (0); 300 } 301 302 /* ARGSUSED */ 303 caddr_t segkpm_create_va(u_offset_t off) 304 { 305 return (NULL); 306 } 307 308 /* ARGSUSED */ 309 void segkpm_mapout_validkpme(struct kpme *kpme) 310 { 311 } 312 313 #endif /* SEGKPM_SUPPORT */ 314 315 /* ARGSUSED */ 316 static int 317 segkpm_pagelock(struct seg *seg, caddr_t addr, size_t len, 318 struct page ***page, enum lock_type type, enum seg_rw rw) 319 { 320 return (ENOTSUP); 321 } 322 323 /* 324 * segkpm pages are not dumped, so we just return 325 */ 326 /*ARGSUSED*/ 327 static void 328 segkpm_dump(struct seg *seg) 329 { 330 }