1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright 2015, Joyent, Inc. All rights reserved. 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved. 25 */ 26 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28 /* All Rights Reserved */ 29 30 /* 31 * University Copyright- Copyright (c) 1982, 1986, 1988 32 * The Regents of the University of California 33 * All Rights Reserved 34 * 35 * University Acknowledgment- Portions of this document are derived from 36 * software developed by the University of California, Berkeley, and its 37 * contributors. 38 */ 39 40 /* 41 * VM - shared or copy-on-write from a vnode/anonymous memory. 42 */ 43 44 #include <sys/types.h> 45 #include <sys/param.h> 46 #include <sys/t_lock.h> 47 #include <sys/errno.h> 48 #include <sys/systm.h> 49 #include <sys/mman.h> 50 #include <sys/debug.h> 51 #include <sys/cred.h> 52 #include <sys/vmsystm.h> 53 #include <sys/tuneable.h> 54 #include <sys/bitmap.h> 55 #include <sys/swap.h> 56 #include <sys/kmem.h> 57 #include <sys/sysmacros.h> 58 #include <sys/vtrace.h> 59 #include <sys/cmn_err.h> 60 #include <sys/callb.h> 61 #include <sys/vm.h> 62 #include <sys/dumphdr.h> 63 #include <sys/lgrp.h> 64 65 #include <vm/hat.h> 66 #include <vm/as.h> 67 #include <vm/seg.h> 68 #include <vm/seg_vn.h> 69 #include <vm/pvn.h> 70 #include <vm/anon.h> 71 #include <vm/page.h> 72 #include <vm/vpage.h> 73 #include <sys/proc.h> 74 #include <sys/task.h> 75 #include <sys/project.h> 76 #include <sys/zone.h> 77 #include <sys/shm_impl.h> 78 79 /* 80 * segvn_fault needs a temporary page list array. To avoid calling kmem all 81 * the time, it creates a small (FAULT_TMP_PAGES_NUM entry) array and uses 82 * it if it can. In the rare case when this page list is not large enough, 83 * it goes and gets a large enough array from kmem. 84 */ 85 #define FAULT_TMP_PAGES_NUM 0x8 86 #define FAULT_TMP_PAGES_SZ ptob(FAULT_TMP_PAGES_NUM) 87 88 /* 89 * Private seg op routines. 90 */ 91 static int segvn_dup(struct seg *seg, struct seg *newseg); 92 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len); 93 static void segvn_free(struct seg *seg); 94 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg, 95 caddr_t addr, size_t len, enum fault_type type, 96 enum seg_rw rw); 97 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr); 98 static int segvn_setprot(struct seg *seg, caddr_t addr, 99 size_t len, uint_t prot); 100 static int segvn_checkprot(struct seg *seg, caddr_t addr, 101 size_t len, uint_t prot); 102 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta); 103 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len, 104 int attr, uint_t flags); 105 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len, 106 char *vec); 107 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 108 int attr, int op, ulong_t *lockmap, size_t pos); 109 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len, 110 uint_t *protv); 111 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr); 112 static int segvn_gettype(struct seg *seg, caddr_t addr); 113 static int segvn_getvp(struct seg *seg, caddr_t addr, 114 struct vnode **vpp); 115 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len, 116 uint_t behav); 117 static void segvn_dump(struct seg *seg); 118 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, 119 struct page ***ppp, enum lock_type type, enum seg_rw rw); 120 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, 121 uint_t szc); 122 static int segvn_getmemid(struct seg *seg, caddr_t addr, 123 memid_t *memidp); 124 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t); 125 static int segvn_inherit(struct seg *, caddr_t, size_t, uint_t); 126 127 const struct seg_ops segvn_ops = { 128 .dup = segvn_dup, 129 .unmap = segvn_unmap, 130 .free = segvn_free, 131 .fault = segvn_fault, 132 .faulta = segvn_faulta, 133 .setprot = segvn_setprot, 134 .checkprot = segvn_checkprot, 135 .kluster = segvn_kluster, 136 .sync = segvn_sync, 137 .incore = segvn_incore, 138 .lockop = segvn_lockop, 139 .getprot = segvn_getprot, 140 .getoffset = segvn_getoffset, 141 .gettype = segvn_gettype, 142 .getvp = segvn_getvp, 143 .advise = segvn_advise, 144 .dump = segvn_dump, 145 .pagelock = segvn_pagelock, 146 .setpagesize = segvn_setpagesize, 147 .getmemid = segvn_getmemid, 148 .getpolicy = segvn_getpolicy, 149 .inherit = segvn_inherit, 150 }; 151 152 /* 153 * Common zfod structures, provided as a shorthand for others to use. 154 */ 155 static segvn_crargs_t zfod_segvn_crargs = 156 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL); 157 static segvn_crargs_t kzfod_segvn_crargs = 158 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER, 159 PROT_ALL & ~PROT_USER); 160 static segvn_crargs_t stack_noexec_crargs = 161 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL); 162 163 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */ 164 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */ 165 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */ 166 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */ 167 168 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */ 169 170 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */ 171 172 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */ 173 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */ 174 uint_t segvn_pglock_comb_bshift; 175 size_t segvn_pglock_comb_palign; 176 177 static int segvn_concat(struct seg *, struct seg *, int); 178 static int segvn_extend_prev(struct seg *, struct seg *, 179 struct segvn_crargs *, size_t); 180 static int segvn_extend_next(struct seg *, struct seg *, 181 struct segvn_crargs *, size_t); 182 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw); 183 static void segvn_pagelist_rele(page_t **); 184 static void segvn_setvnode_mpss(vnode_t *); 185 static void segvn_relocate_pages(page_t **, page_t *); 186 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *); 187 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t, 188 uint_t, page_t **, page_t **, uint_t *, int *); 189 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t, 190 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 191 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t, 192 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int); 193 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t, 194 u_offset_t, struct vpage *, page_t **, uint_t, 195 enum fault_type, enum seg_rw, int); 196 static void segvn_vpage(struct seg *); 197 static size_t segvn_count_swap_by_vpages(struct seg *); 198 199 static void segvn_purge(struct seg *seg); 200 static int segvn_reclaim(void *, caddr_t, size_t, struct page **, 201 enum seg_rw, int); 202 static int shamp_reclaim(void *, caddr_t, size_t, struct page **, 203 enum seg_rw, int); 204 205 static int sameprot(struct seg *, caddr_t, size_t); 206 207 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t); 208 static int segvn_clrszc(struct seg *); 209 static struct seg *segvn_split_seg(struct seg *, caddr_t); 210 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t, 211 ulong_t, uint_t); 212 213 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t, 214 size_t, void *, u_offset_t); 215 216 static struct kmem_cache *segvn_cache; 217 static struct kmem_cache **segvn_szc_cache; 218 219 #ifdef VM_STATS 220 static struct segvnvmstats_str { 221 ulong_t fill_vp_pages[31]; 222 ulong_t fltvnpages[49]; 223 ulong_t fullszcpages[10]; 224 ulong_t relocatepages[3]; 225 ulong_t fltanpages[17]; 226 ulong_t pagelock[2]; 227 ulong_t demoterange[3]; 228 } segvnvmstats; 229 #endif /* VM_STATS */ 230 231 #define SDR_RANGE 1 /* demote entire range */ 232 #define SDR_END 2 /* demote non aligned ends only */ 233 234 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \ 235 if ((len) != 0) { \ 236 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \ 237 ASSERT(lpgaddr >= (seg)->s_base); \ 238 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \ 239 (len)), pgsz); \ 240 ASSERT(lpgeaddr > lpgaddr); \ 241 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \ 242 } else { \ 243 lpgeaddr = lpgaddr = (addr); \ 244 } \ 245 } 246 247 /*ARGSUSED*/ 248 static int 249 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags) 250 { 251 struct segvn_data *svd = buf; 252 253 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL); 254 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL); 255 svd->svn_trnext = svd->svn_trprev = NULL; 256 return (0); 257 } 258 259 /*ARGSUSED1*/ 260 static void 261 segvn_cache_destructor(void *buf, void *cdrarg) 262 { 263 struct segvn_data *svd = buf; 264 265 rw_destroy(&svd->lock); 266 mutex_destroy(&svd->segfree_syncmtx); 267 } 268 269 /*ARGSUSED*/ 270 static int 271 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags) 272 { 273 bzero(buf, sizeof (svntr_t)); 274 return (0); 275 } 276 277 /* 278 * Patching this variable to non-zero allows the system to run with 279 * stacks marked as "not executable". It's a bit of a kludge, but is 280 * provided as a tweakable for platforms that export those ABIs 281 * (e.g. sparc V8) that have executable stacks enabled by default. 282 * There are also some restrictions for platforms that don't actually 283 * implement 'noexec' protections. 284 * 285 * Once enabled, the system is (therefore) unable to provide a fully 286 * ABI-compliant execution environment, though practically speaking, 287 * most everything works. The exceptions are generally some interpreters 288 * and debuggers that create executable code on the stack and jump 289 * into it (without explicitly mprotecting the address range to include 290 * PROT_EXEC). 291 * 292 * One important class of applications that are disabled are those 293 * that have been transformed into malicious agents using one of the 294 * numerous "buffer overflow" attacks. See 4007890. 295 */ 296 int noexec_user_stack = 0; 297 int noexec_user_stack_log = 1; 298 299 int segvn_lpg_disable = 0; 300 uint_t segvn_maxpgszc = 0; 301 302 ulong_t segvn_vmpss_clrszc_cnt; 303 ulong_t segvn_vmpss_clrszc_err; 304 ulong_t segvn_fltvnpages_clrszc_cnt; 305 ulong_t segvn_fltvnpages_clrszc_err; 306 ulong_t segvn_setpgsz_align_err; 307 ulong_t segvn_setpgsz_anon_align_err; 308 ulong_t segvn_setpgsz_getattr_err; 309 ulong_t segvn_setpgsz_eof_err; 310 ulong_t segvn_faultvnmpss_align_err1; 311 ulong_t segvn_faultvnmpss_align_err2; 312 ulong_t segvn_faultvnmpss_align_err3; 313 ulong_t segvn_faultvnmpss_align_err4; 314 ulong_t segvn_faultvnmpss_align_err5; 315 ulong_t segvn_vmpss_pageio_deadlk_err; 316 317 int segvn_use_regions = 1; 318 319 /* 320 * Segvn supports text replication optimization for NUMA platforms. Text 321 * replica's are represented by anon maps (amp). There's one amp per text file 322 * region per lgroup. A process chooses the amp for each of its text mappings 323 * based on the lgroup assignment of its main thread (t_tid = 1). All 324 * processes that want a replica on a particular lgroup for the same text file 325 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table 326 * with vp,off,size,szc used as a key. Text replication segments are read only 327 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by 328 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode 329 * pages. Replication amp is assigned to a segment when it gets its first 330 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread 331 * rechecks periodically if the process still maps an amp local to the main 332 * thread. If not async thread forces process to remap to an amp in the new 333 * home lgroup of the main thread. Current text replication implementation 334 * only provides the benefit to workloads that do most of their work in the 335 * main thread of a process or all the threads of a process run in the same 336 * lgroup. To extend text replication benefit to different types of 337 * multithreaded workloads further work would be needed in the hat layer to 338 * allow the same virtual address in the same hat to simultaneously map 339 * different physical addresses (i.e. page table replication would be needed 340 * for x86). 341 * 342 * amp pages are used instead of vnode pages as long as segment has a very 343 * simple life cycle. It's created via segvn_create(), handles S_EXEC 344 * (S_READ) pagefaults and is fully unmapped. If anything more complicated 345 * happens such as protection is changed, real COW fault happens, pagesize is 346 * changed, MC_LOCK is requested or segment is partially unmapped we turn off 347 * text replication by converting the segment back to vnode only segment 348 * (unmap segment's address range and set svd->amp to NULL). 349 * 350 * The original file can be changed after amp is inserted into 351 * svntr_hashtab. Processes that are launched after the file is already 352 * changed can't use the replica's created prior to the file change. To 353 * implement this functionality hash entries are timestamped. Replica's can 354 * only be used if current file modification time is the same as the timestamp 355 * saved when hash entry was created. However just timestamps alone are not 356 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We 357 * deal with file changes via MAP_SHARED mappings differently. When writable 358 * MAP_SHARED mappings are created to vnodes marked as executable we mark all 359 * existing replica's for this vnode as not usable for future text 360 * mappings. And we don't create new replica's for files that currently have 361 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is 362 * true). 363 */ 364 365 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20) 366 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR; 367 368 static ulong_t svntr_hashtab_sz = 512; 369 static svntr_bucket_t *svntr_hashtab = NULL; 370 static struct kmem_cache *svntr_cache; 371 static svntr_stats_t *segvn_textrepl_stats; 372 static ksema_t segvn_trasync_sem; 373 374 int segvn_disable_textrepl = 1; 375 size_t textrepl_size_thresh = (size_t)-1; 376 size_t segvn_textrepl_bytes = 0; 377 size_t segvn_textrepl_max_bytes = 0; 378 clock_t segvn_update_textrepl_interval = 0; 379 int segvn_update_tr_time = 10; 380 int segvn_disable_textrepl_update = 0; 381 382 static void segvn_textrepl(struct seg *); 383 static void segvn_textunrepl(struct seg *, int); 384 static void segvn_inval_trcache(vnode_t *); 385 static void segvn_trasync_thread(void); 386 static void segvn_trupdate_wakeup(void *); 387 static void segvn_trupdate(void); 388 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *, 389 ulong_t); 390 391 /* 392 * Initialize segvn data structures 393 */ 394 void 395 segvn_init(void) 396 { 397 uint_t maxszc; 398 uint_t szc; 399 size_t pgsz; 400 401 segvn_cache = kmem_cache_create("segvn_cache", 402 sizeof (struct segvn_data), 0, 403 segvn_cache_constructor, segvn_cache_destructor, NULL, 404 NULL, NULL, 0); 405 406 if (segvn_lpg_disable == 0) { 407 szc = maxszc = page_num_pagesizes() - 1; 408 if (szc == 0) { 409 segvn_lpg_disable = 1; 410 } 411 if (page_get_pagesize(0) != PAGESIZE) { 412 panic("segvn_init: bad szc 0"); 413 /*NOTREACHED*/ 414 } 415 while (szc != 0) { 416 pgsz = page_get_pagesize(szc); 417 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) { 418 panic("segvn_init: bad szc %d", szc); 419 /*NOTREACHED*/ 420 } 421 szc--; 422 } 423 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc) 424 segvn_maxpgszc = maxszc; 425 } 426 427 if (segvn_maxpgszc) { 428 segvn_szc_cache = (struct kmem_cache **)kmem_alloc( 429 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *), 430 KM_SLEEP); 431 } 432 433 for (szc = 1; szc <= segvn_maxpgszc; szc++) { 434 char str[32]; 435 436 (void) sprintf(str, "segvn_szc_cache%d", szc); 437 segvn_szc_cache[szc] = kmem_cache_create(str, 438 page_get_pagecnt(szc) * sizeof (page_t *), 0, 439 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG); 440 } 441 442 443 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL)) 444 segvn_use_regions = 0; 445 446 /* 447 * For now shared regions and text replication segvn support 448 * are mutually exclusive. This is acceptable because 449 * currently significant benefit from text replication was 450 * only observed on AMD64 NUMA platforms (due to relatively 451 * small L2$ size) and currently we don't support shared 452 * regions on x86. 453 */ 454 if (segvn_use_regions && !segvn_disable_textrepl) { 455 segvn_disable_textrepl = 1; 456 } 457 458 #if defined(_LP64) 459 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 && 460 !segvn_disable_textrepl) { 461 ulong_t i; 462 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t); 463 464 svntr_cache = kmem_cache_create("svntr_cache", 465 sizeof (svntr_t), 0, svntr_cache_constructor, NULL, 466 NULL, NULL, NULL, 0); 467 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP); 468 for (i = 0; i < svntr_hashtab_sz; i++) { 469 mutex_init(&svntr_hashtab[i].tr_lock, NULL, 470 MUTEX_DEFAULT, NULL); 471 } 472 segvn_textrepl_max_bytes = ptob(physmem) / 473 segvn_textrepl_max_bytes_factor; 474 segvn_textrepl_stats = kmem_zalloc(NCPU * 475 sizeof (svntr_stats_t), KM_SLEEP); 476 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL); 477 (void) thread_create(NULL, 0, segvn_trasync_thread, 478 NULL, 0, &p0, TS_RUN, minclsyspri); 479 } 480 #endif 481 482 if (!ISP2(segvn_pglock_comb_balign) || 483 segvn_pglock_comb_balign < PAGESIZE) { 484 segvn_pglock_comb_balign = 1UL << 16; /* 64K */ 485 } 486 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1; 487 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign); 488 } 489 490 #define SEGVN_PAGEIO ((void *)0x1) 491 #define SEGVN_NOPAGEIO ((void *)0x2) 492 493 static void 494 segvn_setvnode_mpss(vnode_t *vp) 495 { 496 int err; 497 498 ASSERT(vp->v_mpssdata == NULL || 499 vp->v_mpssdata == SEGVN_PAGEIO || 500 vp->v_mpssdata == SEGVN_NOPAGEIO); 501 502 if (vp->v_mpssdata == NULL) { 503 if (vn_vmpss_usepageio(vp)) { 504 err = VOP_PAGEIO(vp, (page_t *)NULL, 505 (u_offset_t)0, 0, 0, CRED(), NULL); 506 } else { 507 err = ENOSYS; 508 } 509 /* 510 * set v_mpssdata just once per vnode life 511 * so that it never changes. 512 */ 513 mutex_enter(&vp->v_lock); 514 if (vp->v_mpssdata == NULL) { 515 if (err == EINVAL) { 516 vp->v_mpssdata = SEGVN_PAGEIO; 517 } else { 518 vp->v_mpssdata = SEGVN_NOPAGEIO; 519 } 520 } 521 mutex_exit(&vp->v_lock); 522 } 523 } 524 525 int 526 segvn_create(struct seg *seg, void *argsp) 527 { 528 struct segvn_crargs *a = (struct segvn_crargs *)argsp; 529 struct segvn_data *svd; 530 size_t swresv = 0; 531 struct cred *cred; 532 struct anon_map *amp; 533 int error = 0; 534 size_t pgsz; 535 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT; 536 int use_rgn = 0; 537 int trok = 0; 538 539 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 540 541 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) { 542 panic("segvn_create type"); 543 /*NOTREACHED*/ 544 } 545 546 /* 547 * Check arguments. If a shared anon structure is given then 548 * it is illegal to also specify a vp. 549 */ 550 if (a->amp != NULL && a->vp != NULL) { 551 panic("segvn_create anon_map"); 552 /*NOTREACHED*/ 553 } 554 555 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) && 556 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) && 557 segvn_use_regions) { 558 use_rgn = 1; 559 } 560 561 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */ 562 if (a->type == MAP_SHARED) 563 a->flags &= ~MAP_NORESERVE; 564 565 if (a->szc != 0) { 566 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) || 567 (a->amp != NULL && a->type == MAP_PRIVATE) || 568 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) { 569 a->szc = 0; 570 } else { 571 if (a->szc > segvn_maxpgszc) 572 a->szc = segvn_maxpgszc; 573 pgsz = page_get_pagesize(a->szc); 574 if (!IS_P2ALIGNED(seg->s_base, pgsz) || 575 !IS_P2ALIGNED(seg->s_size, pgsz)) { 576 a->szc = 0; 577 } else if (a->vp != NULL) { 578 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) { 579 /* 580 * paranoid check. 581 * hat_page_demote() is not supported 582 * on swapfs pages. 583 */ 584 a->szc = 0; 585 } else if (map_addr_vacalign_check(seg->s_base, 586 a->offset & PAGEMASK)) { 587 a->szc = 0; 588 } 589 } else if (a->amp != NULL) { 590 pgcnt_t anum = btopr(a->offset); 591 pgcnt_t pgcnt = page_get_pagecnt(a->szc); 592 if (!IS_P2ALIGNED(anum, pgcnt)) { 593 a->szc = 0; 594 } 595 } 596 } 597 } 598 599 /* 600 * If segment may need private pages, reserve them now. 601 */ 602 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) || 603 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) { 604 if (anon_resv_zone(seg->s_size, 605 seg->s_as->a_proc->p_zone) == 0) 606 return (EAGAIN); 607 swresv = seg->s_size; 608 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 609 seg, swresv, 1); 610 } 611 612 /* 613 * Reserve any mapping structures that may be required. 614 * 615 * Don't do it for segments that may use regions. It's currently a 616 * noop in the hat implementations anyway. 617 */ 618 if (!use_rgn) { 619 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP); 620 } 621 622 if (a->cred) { 623 cred = a->cred; 624 crhold(cred); 625 } else { 626 crhold(cred = CRED()); 627 } 628 629 /* Inform the vnode of the new mapping */ 630 if (a->vp != NULL) { 631 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK, 632 seg->s_as, seg->s_base, seg->s_size, a->prot, 633 a->maxprot, a->type, cred, NULL); 634 if (error) { 635 if (swresv != 0) { 636 anon_unresv_zone(swresv, 637 seg->s_as->a_proc->p_zone); 638 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 639 "anon proc:%p %lu %u", seg, swresv, 0); 640 } 641 crfree(cred); 642 if (!use_rgn) { 643 hat_unload(seg->s_as->a_hat, seg->s_base, 644 seg->s_size, HAT_UNLOAD_UNMAP); 645 } 646 return (error); 647 } 648 /* 649 * svntr_hashtab will be NULL if we support shared regions. 650 */ 651 trok = ((a->flags & MAP_TEXT) && 652 (seg->s_size > textrepl_size_thresh || 653 (a->flags & _MAP_TEXTREPL)) && 654 lgrp_optimizations() && svntr_hashtab != NULL && 655 a->type == MAP_PRIVATE && swresv == 0 && 656 !(a->flags & MAP_NORESERVE) && 657 seg->s_as != &kas && a->vp->v_type == VREG); 658 659 ASSERT(!trok || !use_rgn); 660 } 661 662 /* 663 * MAP_NORESERVE mappings don't count towards the VSZ of a process 664 * until we fault the pages in. 665 */ 666 if ((a->vp == NULL || a->vp->v_type != VREG) && 667 a->flags & MAP_NORESERVE) { 668 seg->s_as->a_resvsize -= seg->s_size; 669 } 670 671 /* 672 * If more than one segment in the address space, and they're adjacent 673 * virtually, try to concatenate them. Don't concatenate if an 674 * explicit anon_map structure was supplied (e.g., SystemV shared 675 * memory) or if we'll use text replication for this segment. 676 */ 677 if (a->amp == NULL && !use_rgn && !trok) { 678 struct seg *pseg, *nseg; 679 struct segvn_data *psvd, *nsvd; 680 lgrp_mem_policy_t ppolicy, npolicy; 681 uint_t lgrp_mem_policy_flags = 0; 682 extern lgrp_mem_policy_t lgrp_mem_default_policy; 683 684 /* 685 * Memory policy flags (lgrp_mem_policy_flags) is valid when 686 * extending stack/heap segments. 687 */ 688 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) && 689 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) { 690 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags; 691 } else { 692 /* 693 * Get policy when not extending it from another segment 694 */ 695 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type); 696 } 697 698 /* 699 * First, try to concatenate the previous and new segments 700 */ 701 pseg = AS_SEGPREV(seg->s_as, seg); 702 if (pseg != NULL && 703 pseg->s_base + pseg->s_size == seg->s_base && 704 pseg->s_ops == &segvn_ops) { 705 /* 706 * Get memory allocation policy from previous segment. 707 * When extension is specified (e.g. for heap) apply 708 * this policy to the new segment regardless of the 709 * outcome of segment concatenation. Extension occurs 710 * for non-default policy otherwise default policy is 711 * used and is based on extended segment size. 712 */ 713 psvd = (struct segvn_data *)pseg->s_data; 714 ppolicy = psvd->policy_info.mem_policy; 715 if (lgrp_mem_policy_flags == 716 LGRP_MP_FLAG_EXTEND_UP) { 717 if (ppolicy != lgrp_mem_default_policy) { 718 mpolicy = ppolicy; 719 } else { 720 mpolicy = lgrp_mem_policy_default( 721 pseg->s_size + seg->s_size, 722 a->type); 723 } 724 } 725 726 if (mpolicy == ppolicy && 727 (pseg->s_size + seg->s_size <= 728 segvn_comb_thrshld || psvd->amp == NULL) && 729 segvn_extend_prev(pseg, seg, a, swresv) == 0) { 730 /* 731 * success! now try to concatenate 732 * with following seg 733 */ 734 crfree(cred); 735 nseg = AS_SEGNEXT(pseg->s_as, pseg); 736 if (nseg != NULL && 737 nseg != pseg && 738 nseg->s_ops == &segvn_ops && 739 pseg->s_base + pseg->s_size == 740 nseg->s_base) 741 (void) segvn_concat(pseg, nseg, 0); 742 ASSERT(pseg->s_szc == 0 || 743 (a->szc == pseg->s_szc && 744 IS_P2ALIGNED(pseg->s_base, pgsz) && 745 IS_P2ALIGNED(pseg->s_size, pgsz))); 746 return (0); 747 } 748 } 749 750 /* 751 * Failed, so try to concatenate with following seg 752 */ 753 nseg = AS_SEGNEXT(seg->s_as, seg); 754 if (nseg != NULL && 755 seg->s_base + seg->s_size == nseg->s_base && 756 nseg->s_ops == &segvn_ops) { 757 /* 758 * Get memory allocation policy from next segment. 759 * When extension is specified (e.g. for stack) apply 760 * this policy to the new segment regardless of the 761 * outcome of segment concatenation. Extension occurs 762 * for non-default policy otherwise default policy is 763 * used and is based on extended segment size. 764 */ 765 nsvd = (struct segvn_data *)nseg->s_data; 766 npolicy = nsvd->policy_info.mem_policy; 767 if (lgrp_mem_policy_flags == 768 LGRP_MP_FLAG_EXTEND_DOWN) { 769 if (npolicy != lgrp_mem_default_policy) { 770 mpolicy = npolicy; 771 } else { 772 mpolicy = lgrp_mem_policy_default( 773 nseg->s_size + seg->s_size, 774 a->type); 775 } 776 } 777 778 if (mpolicy == npolicy && 779 segvn_extend_next(seg, nseg, a, swresv) == 0) { 780 crfree(cred); 781 ASSERT(nseg->s_szc == 0 || 782 (a->szc == nseg->s_szc && 783 IS_P2ALIGNED(nseg->s_base, pgsz) && 784 IS_P2ALIGNED(nseg->s_size, pgsz))); 785 return (0); 786 } 787 } 788 } 789 790 if (a->vp != NULL) { 791 VN_HOLD(a->vp); 792 if (a->type == MAP_SHARED) 793 lgrp_shm_policy_init(NULL, a->vp); 794 } 795 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 796 797 seg->s_ops = &segvn_ops; 798 seg->s_data = (void *)svd; 799 seg->s_szc = a->szc; 800 801 svd->seg = seg; 802 svd->vp = a->vp; 803 /* 804 * Anonymous mappings have no backing file so the offset is meaningless. 805 */ 806 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0; 807 svd->prot = a->prot; 808 svd->maxprot = a->maxprot; 809 svd->pageprot = 0; 810 svd->type = a->type; 811 svd->vpage = NULL; 812 svd->cred = cred; 813 svd->advice = MADV_NORMAL; 814 svd->pageadvice = 0; 815 svd->flags = (ushort_t)a->flags; 816 svd->softlockcnt = 0; 817 svd->softlockcnt_sbase = 0; 818 svd->softlockcnt_send = 0; 819 svd->svn_inz = 0; 820 svd->rcookie = HAT_INVALID_REGION_COOKIE; 821 svd->pageswap = 0; 822 823 if (a->szc != 0 && a->vp != NULL) { 824 segvn_setvnode_mpss(a->vp); 825 } 826 if (svd->type == MAP_SHARED && svd->vp != NULL && 827 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) { 828 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 829 segvn_inval_trcache(svd->vp); 830 } 831 832 amp = a->amp; 833 if ((svd->amp = amp) == NULL) { 834 svd->anon_index = 0; 835 if (svd->type == MAP_SHARED) { 836 svd->swresv = 0; 837 /* 838 * Shared mappings to a vp need no other setup. 839 * If we have a shared mapping to an anon_map object 840 * which hasn't been allocated yet, allocate the 841 * struct now so that it will be properly shared 842 * by remembering the swap reservation there. 843 */ 844 if (a->vp == NULL) { 845 svd->amp = anonmap_alloc(seg->s_size, swresv, 846 ANON_SLEEP); 847 svd->amp->a_szc = seg->s_szc; 848 } 849 } else { 850 /* 851 * Private mapping (with or without a vp). 852 * Allocate anon_map when needed. 853 */ 854 svd->swresv = swresv; 855 } 856 } else { 857 pgcnt_t anon_num; 858 859 /* 860 * Mapping to an existing anon_map structure without a vp. 861 * For now we will insure that the segment size isn't larger 862 * than the size - offset gives us. Later on we may wish to 863 * have the anon array dynamically allocated itself so that 864 * we don't always have to allocate all the anon pointer slots. 865 * This of course involves adding extra code to check that we 866 * aren't trying to use an anon pointer slot beyond the end 867 * of the currently allocated anon array. 868 */ 869 if ((amp->size - a->offset) < seg->s_size) { 870 panic("segvn_create anon_map size"); 871 /*NOTREACHED*/ 872 } 873 874 anon_num = btopr(a->offset); 875 876 if (a->type == MAP_SHARED) { 877 /* 878 * SHARED mapping to a given anon_map. 879 */ 880 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 881 amp->refcnt++; 882 if (a->szc > amp->a_szc) { 883 amp->a_szc = a->szc; 884 } 885 ANON_LOCK_EXIT(&->a_rwlock); 886 svd->anon_index = anon_num; 887 svd->swresv = 0; 888 } else { 889 /* 890 * PRIVATE mapping to a given anon_map. 891 * Make sure that all the needed anon 892 * structures are created (so that we will 893 * share the underlying pages if nothing 894 * is written by this mapping) and then 895 * duplicate the anon array as is done 896 * when a privately mapped segment is dup'ed. 897 */ 898 struct anon *ap; 899 caddr_t addr; 900 caddr_t eaddr; 901 ulong_t anon_idx; 902 int hat_flag = HAT_LOAD; 903 904 if (svd->flags & MAP_TEXT) { 905 hat_flag |= HAT_LOAD_TEXT; 906 } 907 908 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 909 svd->amp->a_szc = seg->s_szc; 910 svd->anon_index = 0; 911 svd->swresv = swresv; 912 913 /* 914 * Prevent 2 threads from allocating anon 915 * slots simultaneously. 916 */ 917 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 918 eaddr = seg->s_base + seg->s_size; 919 920 for (anon_idx = anon_num, addr = seg->s_base; 921 addr < eaddr; addr += PAGESIZE, anon_idx++) { 922 page_t *pp; 923 924 if ((ap = anon_get_ptr(amp->ahp, 925 anon_idx)) != NULL) 926 continue; 927 928 /* 929 * Allocate the anon struct now. 930 * Might as well load up translation 931 * to the page while we're at it... 932 */ 933 pp = anon_zero(seg, addr, &ap, cred); 934 if (ap == NULL || pp == NULL) { 935 panic("segvn_create anon_zero"); 936 /*NOTREACHED*/ 937 } 938 939 /* 940 * Re-acquire the anon_map lock and 941 * initialize the anon array entry. 942 */ 943 ASSERT(anon_get_ptr(amp->ahp, 944 anon_idx) == NULL); 945 (void) anon_set_ptr(amp->ahp, anon_idx, ap, 946 ANON_SLEEP); 947 948 ASSERT(seg->s_szc == 0); 949 ASSERT(!IS_VMODSORT(pp->p_vnode)); 950 951 ASSERT(use_rgn == 0); 952 hat_memload(seg->s_as->a_hat, addr, pp, 953 svd->prot & ~PROT_WRITE, hat_flag); 954 955 page_unlock(pp); 956 } 957 ASSERT(seg->s_szc == 0); 958 anon_dup(amp->ahp, anon_num, svd->amp->ahp, 959 0, seg->s_size); 960 ANON_LOCK_EXIT(&->a_rwlock); 961 } 962 } 963 964 /* 965 * Set default memory allocation policy for segment 966 * 967 * Always set policy for private memory at least for initialization 968 * even if this is a shared memory segment 969 */ 970 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size); 971 972 if (svd->type == MAP_SHARED) 973 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index, 974 svd->vp, svd->offset, seg->s_size); 975 976 if (use_rgn) { 977 ASSERT(!trok); 978 ASSERT(svd->amp == NULL); 979 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base, 980 seg->s_size, (void *)svd->vp, svd->offset, svd->prot, 981 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback, 982 HAT_REGION_TEXT); 983 } 984 985 ASSERT(!trok || !(svd->prot & PROT_WRITE)); 986 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF; 987 988 return (0); 989 } 990 991 /* 992 * Concatenate two existing segments, if possible. 993 * Return 0 on success, -1 if two segments are not compatible 994 * or -2 on memory allocation failure. 995 * If amp_cat == 1 then try and concat segments with anon maps 996 */ 997 static int 998 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat) 999 { 1000 struct segvn_data *svd1 = seg1->s_data; 1001 struct segvn_data *svd2 = seg2->s_data; 1002 struct anon_map *amp1 = svd1->amp; 1003 struct anon_map *amp2 = svd2->amp; 1004 struct vpage *vpage1 = svd1->vpage; 1005 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL; 1006 size_t size, nvpsize; 1007 pgcnt_t npages1, npages2; 1008 1009 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as); 1010 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1011 ASSERT(seg1->s_ops == seg2->s_ops); 1012 1013 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) || 1014 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1015 return (-1); 1016 } 1017 1018 /* both segments exist, try to merge them */ 1019 #define incompat(x) (svd1->x != svd2->x) 1020 if (incompat(vp) || incompat(maxprot) || 1021 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) || 1022 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) || 1023 incompat(type) || incompat(cred) || incompat(flags) || 1024 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) || 1025 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0) 1026 return (-1); 1027 #undef incompat 1028 1029 /* 1030 * vp == NULL implies zfod, offset doesn't matter 1031 */ 1032 if (svd1->vp != NULL && 1033 svd1->offset + seg1->s_size != svd2->offset) { 1034 return (-1); 1035 } 1036 1037 /* 1038 * Don't concatenate if either segment uses text replication. 1039 */ 1040 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) { 1041 return (-1); 1042 } 1043 1044 /* 1045 * Fail early if we're not supposed to concatenate 1046 * segments with non NULL amp. 1047 */ 1048 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) { 1049 return (-1); 1050 } 1051 1052 if (svd1->vp == NULL && svd1->type == MAP_SHARED) { 1053 if (amp1 != amp2) { 1054 return (-1); 1055 } 1056 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) != 1057 svd2->anon_index) { 1058 return (-1); 1059 } 1060 ASSERT(amp1 == NULL || amp1->refcnt >= 2); 1061 } 1062 1063 /* 1064 * If either seg has vpages, create a new merged vpage array. 1065 */ 1066 if (vpage1 != NULL || vpage2 != NULL) { 1067 struct vpage *vp, *evp; 1068 1069 npages1 = seg_pages(seg1); 1070 npages2 = seg_pages(seg2); 1071 nvpsize = vpgtob(npages1 + npages2); 1072 1073 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) { 1074 return (-2); 1075 } 1076 1077 if (vpage1 != NULL) { 1078 bcopy(vpage1, nvpage, vpgtob(npages1)); 1079 } else { 1080 evp = nvpage + npages1; 1081 for (vp = nvpage; vp < evp; vp++) { 1082 VPP_SETPROT(vp, svd1->prot); 1083 VPP_SETADVICE(vp, svd1->advice); 1084 } 1085 } 1086 1087 if (vpage2 != NULL) { 1088 bcopy(vpage2, nvpage + npages1, vpgtob(npages2)); 1089 } else { 1090 evp = nvpage + npages1 + npages2; 1091 for (vp = nvpage + npages1; vp < evp; vp++) { 1092 VPP_SETPROT(vp, svd2->prot); 1093 VPP_SETADVICE(vp, svd2->advice); 1094 } 1095 } 1096 1097 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) { 1098 ASSERT(svd1->swresv == seg1->s_size); 1099 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1100 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1101 evp = nvpage + npages1; 1102 for (vp = nvpage; vp < evp; vp++) { 1103 VPP_SETSWAPRES(vp); 1104 } 1105 } 1106 1107 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) { 1108 ASSERT(svd2->swresv == seg2->s_size); 1109 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1110 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1111 vp = nvpage + npages1; 1112 evp = vp + npages2; 1113 for (; vp < evp; vp++) { 1114 VPP_SETSWAPRES(vp); 1115 } 1116 } 1117 } 1118 ASSERT((vpage1 != NULL || vpage2 != NULL) || 1119 (svd1->pageswap == 0 && svd2->pageswap == 0)); 1120 1121 /* 1122 * If either segment has private pages, create a new merged anon 1123 * array. If mergeing shared anon segments just decrement anon map's 1124 * refcnt. 1125 */ 1126 if (amp1 != NULL && svd1->type == MAP_SHARED) { 1127 ASSERT(amp1 == amp2 && svd1->vp == NULL); 1128 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1129 ASSERT(amp1->refcnt >= 2); 1130 amp1->refcnt--; 1131 ANON_LOCK_EXIT(&1->a_rwlock); 1132 svd2->amp = NULL; 1133 } else if (amp1 != NULL || amp2 != NULL) { 1134 struct anon_hdr *nahp; 1135 struct anon_map *namp = NULL; 1136 size_t asize; 1137 1138 ASSERT(svd1->type == MAP_PRIVATE); 1139 1140 asize = seg1->s_size + seg2->s_size; 1141 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) { 1142 if (nvpage != NULL) { 1143 kmem_free(nvpage, nvpsize); 1144 } 1145 return (-2); 1146 } 1147 if (amp1 != NULL) { 1148 /* 1149 * XXX anon rwlock is not really needed because 1150 * this is a private segment and we are writers. 1151 */ 1152 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1153 ASSERT(amp1->refcnt == 1); 1154 if (anon_copy_ptr(amp1->ahp, svd1->anon_index, 1155 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) { 1156 anon_release(nahp, btop(asize)); 1157 ANON_LOCK_EXIT(&1->a_rwlock); 1158 if (nvpage != NULL) { 1159 kmem_free(nvpage, nvpsize); 1160 } 1161 return (-2); 1162 } 1163 } 1164 if (amp2 != NULL) { 1165 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1166 ASSERT(amp2->refcnt == 1); 1167 if (anon_copy_ptr(amp2->ahp, svd2->anon_index, 1168 nahp, btop(seg1->s_size), btop(seg2->s_size), 1169 ANON_NOSLEEP)) { 1170 anon_release(nahp, btop(asize)); 1171 ANON_LOCK_EXIT(&2->a_rwlock); 1172 if (amp1 != NULL) { 1173 ANON_LOCK_EXIT(&1->a_rwlock); 1174 } 1175 if (nvpage != NULL) { 1176 kmem_free(nvpage, nvpsize); 1177 } 1178 return (-2); 1179 } 1180 } 1181 if (amp1 != NULL) { 1182 namp = amp1; 1183 anon_release(amp1->ahp, btop(amp1->size)); 1184 } 1185 if (amp2 != NULL) { 1186 if (namp == NULL) { 1187 ASSERT(amp1 == NULL); 1188 namp = amp2; 1189 anon_release(amp2->ahp, btop(amp2->size)); 1190 } else { 1191 amp2->refcnt--; 1192 ANON_LOCK_EXIT(&2->a_rwlock); 1193 anonmap_free(amp2); 1194 } 1195 svd2->amp = NULL; /* needed for seg_free */ 1196 } 1197 namp->ahp = nahp; 1198 namp->size = asize; 1199 svd1->amp = namp; 1200 svd1->anon_index = 0; 1201 ANON_LOCK_EXIT(&namp->a_rwlock); 1202 } 1203 /* 1204 * Now free the old vpage structures. 1205 */ 1206 if (nvpage != NULL) { 1207 if (vpage1 != NULL) { 1208 kmem_free(vpage1, vpgtob(npages1)); 1209 } 1210 if (vpage2 != NULL) { 1211 svd2->vpage = NULL; 1212 kmem_free(vpage2, vpgtob(npages2)); 1213 } 1214 if (svd2->pageprot) { 1215 svd1->pageprot = 1; 1216 } 1217 if (svd2->pageadvice) { 1218 svd1->pageadvice = 1; 1219 } 1220 if (svd2->pageswap) { 1221 svd1->pageswap = 1; 1222 } 1223 svd1->vpage = nvpage; 1224 } 1225 1226 /* all looks ok, merge segments */ 1227 svd1->swresv += svd2->swresv; 1228 svd2->swresv = 0; /* so seg_free doesn't release swap space */ 1229 size = seg2->s_size; 1230 seg_free(seg2); 1231 seg1->s_size += size; 1232 return (0); 1233 } 1234 1235 /* 1236 * Extend the previous segment (seg1) to include the 1237 * new segment (seg2 + a), if possible. 1238 * Return 0 on success. 1239 */ 1240 static int 1241 segvn_extend_prev(seg1, seg2, a, swresv) 1242 struct seg *seg1, *seg2; 1243 struct segvn_crargs *a; 1244 size_t swresv; 1245 { 1246 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data; 1247 size_t size; 1248 struct anon_map *amp1; 1249 struct vpage *new_vpage; 1250 1251 /* 1252 * We don't need any segment level locks for "segvn" data 1253 * since the address space is "write" locked. 1254 */ 1255 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock)); 1256 1257 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) { 1258 return (-1); 1259 } 1260 1261 /* second segment is new, try to extend first */ 1262 /* XXX - should also check cred */ 1263 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot || 1264 (!svd1->pageprot && (svd1->prot != a->prot)) || 1265 svd1->type != a->type || svd1->flags != a->flags || 1266 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0) 1267 return (-1); 1268 1269 /* vp == NULL implies zfod, offset doesn't matter */ 1270 if (svd1->vp != NULL && 1271 svd1->offset + seg1->s_size != (a->offset & PAGEMASK)) 1272 return (-1); 1273 1274 if (svd1->tr_state != SEGVN_TR_OFF) { 1275 return (-1); 1276 } 1277 1278 amp1 = svd1->amp; 1279 if (amp1) { 1280 pgcnt_t newpgs; 1281 1282 /* 1283 * Segment has private pages, can data structures 1284 * be expanded? 1285 * 1286 * Acquire the anon_map lock to prevent it from changing, 1287 * if it is shared. This ensures that the anon_map 1288 * will not change while a thread which has a read/write 1289 * lock on an address space references it. 1290 * XXX - Don't need the anon_map lock at all if "refcnt" 1291 * is 1. 1292 * 1293 * Can't grow a MAP_SHARED segment with an anonmap because 1294 * there may be existing anon slots where we want to extend 1295 * the segment and we wouldn't know what to do with them 1296 * (e.g., for tmpfs right thing is to just leave them there, 1297 * for /dev/zero they should be cleared out). 1298 */ 1299 if (svd1->type == MAP_SHARED) 1300 return (-1); 1301 1302 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER); 1303 if (amp1->refcnt > 1) { 1304 ANON_LOCK_EXIT(&1->a_rwlock); 1305 return (-1); 1306 } 1307 newpgs = anon_grow(amp1->ahp, &svd1->anon_index, 1308 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP); 1309 1310 if (newpgs == 0) { 1311 ANON_LOCK_EXIT(&1->a_rwlock); 1312 return (-1); 1313 } 1314 amp1->size = ptob(newpgs); 1315 ANON_LOCK_EXIT(&1->a_rwlock); 1316 } 1317 if (svd1->vpage != NULL) { 1318 struct vpage *vp, *evp; 1319 new_vpage = 1320 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1321 KM_NOSLEEP); 1322 if (new_vpage == NULL) 1323 return (-1); 1324 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1))); 1325 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1))); 1326 svd1->vpage = new_vpage; 1327 1328 vp = new_vpage + seg_pages(seg1); 1329 evp = vp + seg_pages(seg2); 1330 for (; vp < evp; vp++) 1331 VPP_SETPROT(vp, a->prot); 1332 if (svd1->pageswap && swresv) { 1333 ASSERT(!(svd1->flags & MAP_NORESERVE)); 1334 ASSERT(swresv == seg2->s_size); 1335 vp = new_vpage + seg_pages(seg1); 1336 for (; vp < evp; vp++) { 1337 VPP_SETSWAPRES(vp); 1338 } 1339 } 1340 } 1341 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0); 1342 size = seg2->s_size; 1343 seg_free(seg2); 1344 seg1->s_size += size; 1345 svd1->swresv += swresv; 1346 if (svd1->pageprot && (a->prot & PROT_WRITE) && 1347 svd1->type == MAP_SHARED && svd1->vp != NULL && 1348 (svd1->vp->v_flag & VVMEXEC)) { 1349 ASSERT(vn_is_mapped(svd1->vp, V_WRITE)); 1350 segvn_inval_trcache(svd1->vp); 1351 } 1352 return (0); 1353 } 1354 1355 /* 1356 * Extend the next segment (seg2) to include the 1357 * new segment (seg1 + a), if possible. 1358 * Return 0 on success. 1359 */ 1360 static int 1361 segvn_extend_next( 1362 struct seg *seg1, 1363 struct seg *seg2, 1364 struct segvn_crargs *a, 1365 size_t swresv) 1366 { 1367 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data; 1368 size_t size; 1369 struct anon_map *amp2; 1370 struct vpage *new_vpage; 1371 1372 /* 1373 * We don't need any segment level locks for "segvn" data 1374 * since the address space is "write" locked. 1375 */ 1376 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock)); 1377 1378 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) { 1379 return (-1); 1380 } 1381 1382 /* first segment is new, try to extend second */ 1383 /* XXX - should also check cred */ 1384 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot || 1385 (!svd2->pageprot && (svd2->prot != a->prot)) || 1386 svd2->type != a->type || svd2->flags != a->flags || 1387 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0) 1388 return (-1); 1389 /* vp == NULL implies zfod, offset doesn't matter */ 1390 if (svd2->vp != NULL && 1391 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset) 1392 return (-1); 1393 1394 if (svd2->tr_state != SEGVN_TR_OFF) { 1395 return (-1); 1396 } 1397 1398 amp2 = svd2->amp; 1399 if (amp2) { 1400 pgcnt_t newpgs; 1401 1402 /* 1403 * Segment has private pages, can data structures 1404 * be expanded? 1405 * 1406 * Acquire the anon_map lock to prevent it from changing, 1407 * if it is shared. This ensures that the anon_map 1408 * will not change while a thread which has a read/write 1409 * lock on an address space references it. 1410 * 1411 * XXX - Don't need the anon_map lock at all if "refcnt" 1412 * is 1. 1413 */ 1414 if (svd2->type == MAP_SHARED) 1415 return (-1); 1416 1417 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER); 1418 if (amp2->refcnt > 1) { 1419 ANON_LOCK_EXIT(&2->a_rwlock); 1420 return (-1); 1421 } 1422 newpgs = anon_grow(amp2->ahp, &svd2->anon_index, 1423 btop(seg2->s_size), btop(seg1->s_size), 1424 ANON_NOSLEEP | ANON_GROWDOWN); 1425 1426 if (newpgs == 0) { 1427 ANON_LOCK_EXIT(&2->a_rwlock); 1428 return (-1); 1429 } 1430 amp2->size = ptob(newpgs); 1431 ANON_LOCK_EXIT(&2->a_rwlock); 1432 } 1433 if (svd2->vpage != NULL) { 1434 struct vpage *vp, *evp; 1435 new_vpage = 1436 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)), 1437 KM_NOSLEEP); 1438 if (new_vpage == NULL) { 1439 /* Not merging segments so adjust anon_index back */ 1440 if (amp2) 1441 svd2->anon_index += seg_pages(seg1); 1442 return (-1); 1443 } 1444 bcopy(svd2->vpage, new_vpage + seg_pages(seg1), 1445 vpgtob(seg_pages(seg2))); 1446 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2))); 1447 svd2->vpage = new_vpage; 1448 1449 vp = new_vpage; 1450 evp = vp + seg_pages(seg1); 1451 for (; vp < evp; vp++) 1452 VPP_SETPROT(vp, a->prot); 1453 if (svd2->pageswap && swresv) { 1454 ASSERT(!(svd2->flags & MAP_NORESERVE)); 1455 ASSERT(swresv == seg1->s_size); 1456 vp = new_vpage; 1457 for (; vp < evp; vp++) { 1458 VPP_SETSWAPRES(vp); 1459 } 1460 } 1461 } 1462 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0); 1463 size = seg1->s_size; 1464 seg_free(seg1); 1465 seg2->s_size += size; 1466 seg2->s_base -= size; 1467 svd2->offset -= size; 1468 svd2->swresv += swresv; 1469 if (svd2->pageprot && (a->prot & PROT_WRITE) && 1470 svd2->type == MAP_SHARED && svd2->vp != NULL && 1471 (svd2->vp->v_flag & VVMEXEC)) { 1472 ASSERT(vn_is_mapped(svd2->vp, V_WRITE)); 1473 segvn_inval_trcache(svd2->vp); 1474 } 1475 return (0); 1476 } 1477 1478 /* 1479 * Duplicate all the pages in the segment. This may break COW sharing for a 1480 * given page. If the page is marked with inherit zero set, then instead of 1481 * duplicating the page, we zero the page. 1482 */ 1483 static int 1484 segvn_dup_pages(struct seg *seg, struct seg *newseg) 1485 { 1486 int error; 1487 uint_t prot; 1488 page_t *pp; 1489 struct anon *ap, *newap; 1490 size_t i; 1491 caddr_t addr; 1492 1493 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1494 struct segvn_data *newsvd = (struct segvn_data *)newseg->s_data; 1495 ulong_t old_idx = svd->anon_index; 1496 ulong_t new_idx = 0; 1497 1498 i = btopr(seg->s_size); 1499 addr = seg->s_base; 1500 1501 /* 1502 * XXX break cow sharing using PAGESIZE 1503 * pages. They will be relocated into larger 1504 * pages at fault time. 1505 */ 1506 while (i-- > 0) { 1507 if ((ap = anon_get_ptr(svd->amp->ahp, old_idx)) != NULL) { 1508 struct vpage *vpp; 1509 1510 vpp = &svd->vpage[seg_page(seg, addr)]; 1511 1512 /* 1513 * prot need not be computed below 'cause anon_private 1514 * is going to ignore it anyway as child doesn't inherit 1515 * pagelock from parent. 1516 */ 1517 prot = svd->pageprot ? VPP_PROT(vpp) : svd->prot; 1518 1519 /* 1520 * Check whether we should zero this or dup it. 1521 */ 1522 if (svd->svn_inz == SEGVN_INZ_ALL || 1523 (svd->svn_inz == SEGVN_INZ_VPP && 1524 VPP_ISINHZERO(vpp))) { 1525 pp = anon_zero(newseg, addr, &newap, 1526 newsvd->cred); 1527 } else { 1528 page_t *anon_pl[1+1]; 1529 uint_t vpprot; 1530 error = anon_getpage(&ap, &vpprot, anon_pl, 1531 PAGESIZE, seg, addr, S_READ, svd->cred); 1532 if (error != 0) 1533 return (error); 1534 1535 pp = anon_private(&newap, newseg, addr, prot, 1536 anon_pl[0], 0, newsvd->cred); 1537 } 1538 if (pp == NULL) { 1539 return (ENOMEM); 1540 } 1541 (void) anon_set_ptr(newsvd->amp->ahp, new_idx, newap, 1542 ANON_SLEEP); 1543 page_unlock(pp); 1544 } 1545 addr += PAGESIZE; 1546 old_idx++; 1547 new_idx++; 1548 } 1549 1550 return (0); 1551 } 1552 1553 static int 1554 segvn_dup(struct seg *seg, struct seg *newseg) 1555 { 1556 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1557 struct segvn_data *newsvd; 1558 pgcnt_t npages = seg_pages(seg); 1559 int error = 0; 1560 size_t len; 1561 struct anon_map *amp; 1562 1563 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1564 ASSERT(newseg->s_as->a_proc->p_parent == curproc); 1565 1566 /* 1567 * If segment has anon reserved, reserve more for the new seg. 1568 * For a MAP_NORESERVE segment swresv will be a count of all the 1569 * allocated anon slots; thus we reserve for the child as many slots 1570 * as the parent has allocated. This semantic prevents the child or 1571 * parent from dieing during a copy-on-write fault caused by trying 1572 * to write a shared pre-existing anon page. 1573 */ 1574 if ((len = svd->swresv) != 0) { 1575 if (anon_resv(svd->swresv) == 0) 1576 return (ENOMEM); 1577 1578 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 1579 seg, len, 0); 1580 } 1581 1582 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 1583 1584 newseg->s_ops = &segvn_ops; 1585 newseg->s_data = (void *)newsvd; 1586 newseg->s_szc = seg->s_szc; 1587 1588 newsvd->seg = newseg; 1589 if ((newsvd->vp = svd->vp) != NULL) { 1590 VN_HOLD(svd->vp); 1591 if (svd->type == MAP_SHARED) 1592 lgrp_shm_policy_init(NULL, svd->vp); 1593 } 1594 newsvd->offset = svd->offset; 1595 newsvd->prot = svd->prot; 1596 newsvd->maxprot = svd->maxprot; 1597 newsvd->pageprot = svd->pageprot; 1598 newsvd->type = svd->type; 1599 newsvd->cred = svd->cred; 1600 crhold(newsvd->cred); 1601 newsvd->advice = svd->advice; 1602 newsvd->pageadvice = svd->pageadvice; 1603 newsvd->svn_inz = svd->svn_inz; 1604 newsvd->swresv = svd->swresv; 1605 newsvd->pageswap = svd->pageswap; 1606 newsvd->flags = svd->flags; 1607 newsvd->softlockcnt = 0; 1608 newsvd->softlockcnt_sbase = 0; 1609 newsvd->softlockcnt_send = 0; 1610 newsvd->policy_info = svd->policy_info; 1611 newsvd->rcookie = HAT_INVALID_REGION_COOKIE; 1612 1613 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) { 1614 /* 1615 * Not attaching to a shared anon object. 1616 */ 1617 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) || 1618 svd->tr_state == SEGVN_TR_OFF); 1619 if (svd->tr_state == SEGVN_TR_ON) { 1620 ASSERT(newsvd->vp != NULL && amp != NULL); 1621 newsvd->tr_state = SEGVN_TR_INIT; 1622 } else { 1623 newsvd->tr_state = svd->tr_state; 1624 } 1625 newsvd->amp = NULL; 1626 newsvd->anon_index = 0; 1627 } else { 1628 /* regions for now are only used on pure vnode segments */ 1629 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 1630 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1631 newsvd->tr_state = SEGVN_TR_OFF; 1632 if (svd->type == MAP_SHARED) { 1633 ASSERT(svd->svn_inz == SEGVN_INZ_NONE); 1634 newsvd->amp = amp; 1635 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 1636 amp->refcnt++; 1637 ANON_LOCK_EXIT(&->a_rwlock); 1638 newsvd->anon_index = svd->anon_index; 1639 } else { 1640 int reclaim = 1; 1641 1642 /* 1643 * Allocate and initialize new anon_map structure. 1644 */ 1645 newsvd->amp = anonmap_alloc(newseg->s_size, 0, 1646 ANON_SLEEP); 1647 newsvd->amp->a_szc = newseg->s_szc; 1648 newsvd->anon_index = 0; 1649 ASSERT(svd->svn_inz == SEGVN_INZ_NONE || 1650 svd->svn_inz == SEGVN_INZ_ALL || 1651 svd->svn_inz == SEGVN_INZ_VPP); 1652 1653 /* 1654 * We don't have to acquire the anon_map lock 1655 * for the new segment (since it belongs to an 1656 * address space that is still not associated 1657 * with any process), or the segment in the old 1658 * address space (since all threads in it 1659 * are stopped while duplicating the address space). 1660 */ 1661 1662 /* 1663 * The goal of the following code is to make sure that 1664 * softlocked pages do not end up as copy on write 1665 * pages. This would cause problems where one 1666 * thread writes to a page that is COW and a different 1667 * thread in the same process has softlocked it. The 1668 * softlock lock would move away from this process 1669 * because the write would cause this process to get 1670 * a copy (without the softlock). 1671 * 1672 * The strategy here is to just break the 1673 * sharing on pages that could possibly be 1674 * softlocked. 1675 * 1676 * In addition, if any pages have been marked that they 1677 * should be inherited as zero, then we immediately go 1678 * ahead and break COW and zero them. In the case of a 1679 * softlocked page that should be inherited zero, we 1680 * break COW and just get a zero page. 1681 */ 1682 retry: 1683 if (svd->softlockcnt || 1684 svd->svn_inz != SEGVN_INZ_NONE) { 1685 /* 1686 * The softlock count might be non zero 1687 * because some pages are still stuck in the 1688 * cache for lazy reclaim. Flush the cache 1689 * now. This should drop the count to zero. 1690 * [or there is really I/O going on to these 1691 * pages]. Note, we have the writers lock so 1692 * nothing gets inserted during the flush. 1693 */ 1694 if (svd->softlockcnt && reclaim == 1) { 1695 segvn_purge(seg); 1696 reclaim = 0; 1697 goto retry; 1698 } 1699 1700 error = segvn_dup_pages(seg, newseg); 1701 if (error != 0) { 1702 newsvd->vpage = NULL; 1703 goto out; 1704 } 1705 } else { /* common case */ 1706 if (seg->s_szc != 0) { 1707 /* 1708 * If at least one of anon slots of a 1709 * large page exists then make sure 1710 * all anon slots of a large page 1711 * exist to avoid partial cow sharing 1712 * of a large page in the future. 1713 */ 1714 anon_dup_fill_holes(amp->ahp, 1715 svd->anon_index, newsvd->amp->ahp, 1716 0, seg->s_size, seg->s_szc, 1717 svd->vp != NULL); 1718 } else { 1719 anon_dup(amp->ahp, svd->anon_index, 1720 newsvd->amp->ahp, 0, seg->s_size); 1721 } 1722 1723 hat_clrattr(seg->s_as->a_hat, seg->s_base, 1724 seg->s_size, PROT_WRITE); 1725 } 1726 } 1727 } 1728 /* 1729 * If necessary, create a vpage structure for the new segment. 1730 * Do not copy any page lock indications. 1731 */ 1732 if (svd->vpage != NULL) { 1733 uint_t i; 1734 struct vpage *ovp = svd->vpage; 1735 struct vpage *nvp; 1736 1737 nvp = newsvd->vpage = 1738 kmem_alloc(vpgtob(npages), KM_SLEEP); 1739 for (i = 0; i < npages; i++) { 1740 *nvp = *ovp++; 1741 VPP_CLRPPLOCK(nvp++); 1742 } 1743 } else 1744 newsvd->vpage = NULL; 1745 1746 /* Inform the vnode of the new mapping */ 1747 if (newsvd->vp != NULL) { 1748 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset, 1749 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot, 1750 newsvd->maxprot, newsvd->type, newsvd->cred, NULL); 1751 } 1752 out: 1753 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1754 ASSERT(newsvd->amp == NULL); 1755 ASSERT(newsvd->tr_state == SEGVN_TR_OFF); 1756 newsvd->rcookie = svd->rcookie; 1757 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie); 1758 } 1759 return (error); 1760 } 1761 1762 1763 /* 1764 * callback function to invoke free_vp_pages() for only those pages actually 1765 * processed by the HAT when a shared region is destroyed. 1766 */ 1767 extern int free_pages; 1768 1769 static void 1770 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr, 1771 size_t r_size, void *r_obj, u_offset_t r_objoff) 1772 { 1773 u_offset_t off; 1774 size_t len; 1775 vnode_t *vp = (vnode_t *)r_obj; 1776 1777 ASSERT(eaddr > saddr); 1778 ASSERT(saddr >= r_saddr); 1779 ASSERT(saddr < r_saddr + r_size); 1780 ASSERT(eaddr > r_saddr); 1781 ASSERT(eaddr <= r_saddr + r_size); 1782 ASSERT(vp != NULL); 1783 1784 if (!free_pages) { 1785 return; 1786 } 1787 1788 len = eaddr - saddr; 1789 off = (saddr - r_saddr) + r_objoff; 1790 free_vp_pages(vp, off, len); 1791 } 1792 1793 /* 1794 * callback function used by segvn_unmap to invoke free_vp_pages() for only 1795 * those pages actually processed by the HAT 1796 */ 1797 static void 1798 segvn_hat_unload_callback(hat_callback_t *cb) 1799 { 1800 struct seg *seg = cb->hcb_data; 1801 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1802 size_t len; 1803 u_offset_t off; 1804 1805 ASSERT(svd->vp != NULL); 1806 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr); 1807 ASSERT(cb->hcb_start_addr >= seg->s_base); 1808 1809 len = cb->hcb_end_addr - cb->hcb_start_addr; 1810 off = cb->hcb_start_addr - seg->s_base; 1811 free_vp_pages(svd->vp, svd->offset + off, len); 1812 } 1813 1814 /* 1815 * This function determines the number of bytes of swap reserved by 1816 * a segment for which per-page accounting is present. It is used to 1817 * calculate the correct value of a segvn_data's swresv. 1818 */ 1819 static size_t 1820 segvn_count_swap_by_vpages(struct seg *seg) 1821 { 1822 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1823 struct vpage *vp, *evp; 1824 size_t nswappages = 0; 1825 1826 ASSERT(svd->pageswap); 1827 ASSERT(svd->vpage != NULL); 1828 1829 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 1830 1831 for (vp = svd->vpage; vp < evp; vp++) { 1832 if (VPP_ISSWAPRES(vp)) 1833 nswappages++; 1834 } 1835 1836 return (nswappages << PAGESHIFT); 1837 } 1838 1839 static int 1840 segvn_unmap(struct seg *seg, caddr_t addr, size_t len) 1841 { 1842 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 1843 struct segvn_data *nsvd; 1844 struct seg *nseg; 1845 struct anon_map *amp; 1846 pgcnt_t opages; /* old segment size in pages */ 1847 pgcnt_t npages; /* new segment size in pages */ 1848 pgcnt_t dpages; /* pages being deleted (unmapped) */ 1849 hat_callback_t callback; /* used for free_vp_pages() */ 1850 hat_callback_t *cbp = NULL; 1851 caddr_t nbase; 1852 size_t nsize; 1853 size_t oswresv; 1854 int reclaim = 1; 1855 1856 /* 1857 * We don't need any segment level locks for "segvn" data 1858 * since the address space is "write" locked. 1859 */ 1860 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 1861 1862 /* 1863 * Fail the unmap if pages are SOFTLOCKed through this mapping. 1864 * softlockcnt is protected from change by the as write lock. 1865 */ 1866 retry: 1867 if (svd->softlockcnt > 0) { 1868 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1869 1870 /* 1871 * If this is shared segment non 0 softlockcnt 1872 * means locked pages are still in use. 1873 */ 1874 if (svd->type == MAP_SHARED) { 1875 return (EAGAIN); 1876 } 1877 1878 /* 1879 * since we do have the writers lock nobody can fill 1880 * the cache during the purge. The flush either succeeds 1881 * or we still have pending I/Os. 1882 */ 1883 if (reclaim == 1) { 1884 segvn_purge(seg); 1885 reclaim = 0; 1886 goto retry; 1887 } 1888 return (EAGAIN); 1889 } 1890 1891 /* 1892 * Check for bad sizes 1893 */ 1894 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size || 1895 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) { 1896 panic("segvn_unmap"); 1897 /*NOTREACHED*/ 1898 } 1899 1900 if (seg->s_szc != 0) { 1901 size_t pgsz = page_get_pagesize(seg->s_szc); 1902 int err; 1903 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 1904 ASSERT(seg->s_base != addr || seg->s_size != len); 1905 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1906 ASSERT(svd->amp == NULL); 1907 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1908 hat_leave_region(seg->s_as->a_hat, 1909 svd->rcookie, HAT_REGION_TEXT); 1910 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1911 /* 1912 * could pass a flag to segvn_demote_range() 1913 * below to tell it not to do any unloads but 1914 * this case is rare enough to not bother for 1915 * now. 1916 */ 1917 } else if (svd->tr_state == SEGVN_TR_INIT) { 1918 svd->tr_state = SEGVN_TR_OFF; 1919 } else if (svd->tr_state == SEGVN_TR_ON) { 1920 ASSERT(svd->amp != NULL); 1921 segvn_textunrepl(seg, 1); 1922 ASSERT(svd->amp == NULL); 1923 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1924 } 1925 VM_STAT_ADD(segvnvmstats.demoterange[0]); 1926 err = segvn_demote_range(seg, addr, len, SDR_END, 0); 1927 if (err == 0) { 1928 return (IE_RETRY); 1929 } 1930 return (err); 1931 } 1932 } 1933 1934 /* Inform the vnode of the unmapping. */ 1935 if (svd->vp) { 1936 int error; 1937 1938 error = VOP_DELMAP(svd->vp, 1939 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base), 1940 seg->s_as, addr, len, svd->prot, svd->maxprot, 1941 svd->type, svd->cred, NULL); 1942 1943 if (error == EAGAIN) 1944 return (error); 1945 } 1946 1947 /* 1948 * Remove any page locks set through this mapping. 1949 * If text replication is not off no page locks could have been 1950 * established via this mapping. 1951 */ 1952 if (svd->tr_state == SEGVN_TR_OFF) { 1953 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0); 1954 } 1955 1956 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 1957 ASSERT(svd->amp == NULL); 1958 ASSERT(svd->tr_state == SEGVN_TR_OFF); 1959 ASSERT(svd->type == MAP_PRIVATE); 1960 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 1961 HAT_REGION_TEXT); 1962 svd->rcookie = HAT_INVALID_REGION_COOKIE; 1963 } else if (svd->tr_state == SEGVN_TR_ON) { 1964 ASSERT(svd->amp != NULL); 1965 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE)); 1966 segvn_textunrepl(seg, 1); 1967 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 1968 } else { 1969 if (svd->tr_state != SEGVN_TR_OFF) { 1970 ASSERT(svd->tr_state == SEGVN_TR_INIT); 1971 svd->tr_state = SEGVN_TR_OFF; 1972 } 1973 /* 1974 * Unload any hardware translations in the range to be taken 1975 * out. Use a callback to invoke free_vp_pages() effectively. 1976 */ 1977 if (svd->vp != NULL && free_pages != 0) { 1978 callback.hcb_data = seg; 1979 callback.hcb_function = segvn_hat_unload_callback; 1980 cbp = &callback; 1981 } 1982 hat_unload_callback(seg->s_as->a_hat, addr, len, 1983 HAT_UNLOAD_UNMAP, cbp); 1984 1985 if (svd->type == MAP_SHARED && svd->vp != NULL && 1986 (svd->vp->v_flag & VVMEXEC) && 1987 ((svd->prot & PROT_WRITE) || svd->pageprot)) { 1988 segvn_inval_trcache(svd->vp); 1989 } 1990 } 1991 1992 /* 1993 * Check for entire segment 1994 */ 1995 if (addr == seg->s_base && len == seg->s_size) { 1996 seg_free(seg); 1997 return (0); 1998 } 1999 2000 opages = seg_pages(seg); 2001 dpages = btop(len); 2002 npages = opages - dpages; 2003 amp = svd->amp; 2004 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc); 2005 2006 /* 2007 * Check for beginning of segment 2008 */ 2009 if (addr == seg->s_base) { 2010 if (svd->vpage != NULL) { 2011 size_t nbytes; 2012 struct vpage *ovpage; 2013 2014 ovpage = svd->vpage; /* keep pointer to vpage */ 2015 2016 nbytes = vpgtob(npages); 2017 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2018 bcopy(&ovpage[dpages], svd->vpage, nbytes); 2019 2020 /* free up old vpage */ 2021 kmem_free(ovpage, vpgtob(opages)); 2022 } 2023 if (amp != NULL) { 2024 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2025 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2026 /* 2027 * Shared anon map is no longer in use. Before 2028 * freeing its pages purge all entries from 2029 * pcache that belong to this amp. 2030 */ 2031 if (svd->type == MAP_SHARED) { 2032 ASSERT(amp->refcnt == 1); 2033 ASSERT(svd->softlockcnt == 0); 2034 anonmap_purge(amp); 2035 } 2036 /* 2037 * Free up now unused parts of anon_map array. 2038 */ 2039 if (amp->a_szc == seg->s_szc) { 2040 if (seg->s_szc != 0) { 2041 anon_free_pages(amp->ahp, 2042 svd->anon_index, len, 2043 seg->s_szc); 2044 } else { 2045 anon_free(amp->ahp, 2046 svd->anon_index, 2047 len); 2048 } 2049 } else { 2050 ASSERT(svd->type == MAP_SHARED); 2051 ASSERT(amp->a_szc > seg->s_szc); 2052 anon_shmap_free_pages(amp, 2053 svd->anon_index, len); 2054 } 2055 2056 /* 2057 * Unreserve swap space for the 2058 * unmapped chunk of this segment in 2059 * case it's MAP_SHARED 2060 */ 2061 if (svd->type == MAP_SHARED) { 2062 anon_unresv_zone(len, 2063 seg->s_as->a_proc->p_zone); 2064 amp->swresv -= len; 2065 } 2066 } 2067 ANON_LOCK_EXIT(&->a_rwlock); 2068 svd->anon_index += dpages; 2069 } 2070 if (svd->vp != NULL) 2071 svd->offset += len; 2072 2073 seg->s_base += len; 2074 seg->s_size -= len; 2075 2076 if (svd->swresv) { 2077 if (svd->flags & MAP_NORESERVE) { 2078 ASSERT(amp); 2079 oswresv = svd->swresv; 2080 2081 svd->swresv = ptob(anon_pages(amp->ahp, 2082 svd->anon_index, npages)); 2083 anon_unresv_zone(oswresv - svd->swresv, 2084 seg->s_as->a_proc->p_zone); 2085 if (SEG_IS_PARTIAL_RESV(seg)) 2086 seg->s_as->a_resvsize -= oswresv - 2087 svd->swresv; 2088 } else { 2089 size_t unlen; 2090 2091 if (svd->pageswap) { 2092 oswresv = svd->swresv; 2093 svd->swresv = 2094 segvn_count_swap_by_vpages(seg); 2095 ASSERT(oswresv >= svd->swresv); 2096 unlen = oswresv - svd->swresv; 2097 } else { 2098 svd->swresv -= len; 2099 ASSERT(svd->swresv == seg->s_size); 2100 unlen = len; 2101 } 2102 anon_unresv_zone(unlen, 2103 seg->s_as->a_proc->p_zone); 2104 } 2105 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2106 seg, len, 0); 2107 } 2108 2109 return (0); 2110 } 2111 2112 /* 2113 * Check for end of segment 2114 */ 2115 if (addr + len == seg->s_base + seg->s_size) { 2116 if (svd->vpage != NULL) { 2117 size_t nbytes; 2118 struct vpage *ovpage; 2119 2120 ovpage = svd->vpage; /* keep pointer to vpage */ 2121 2122 nbytes = vpgtob(npages); 2123 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2124 bcopy(ovpage, svd->vpage, nbytes); 2125 2126 /* free up old vpage */ 2127 kmem_free(ovpage, vpgtob(opages)); 2128 2129 } 2130 if (amp != NULL) { 2131 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2132 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2133 /* 2134 * Free up now unused parts of anon_map array. 2135 */ 2136 ulong_t an_idx = svd->anon_index + npages; 2137 2138 /* 2139 * Shared anon map is no longer in use. Before 2140 * freeing its pages purge all entries from 2141 * pcache that belong to this amp. 2142 */ 2143 if (svd->type == MAP_SHARED) { 2144 ASSERT(amp->refcnt == 1); 2145 ASSERT(svd->softlockcnt == 0); 2146 anonmap_purge(amp); 2147 } 2148 2149 if (amp->a_szc == seg->s_szc) { 2150 if (seg->s_szc != 0) { 2151 anon_free_pages(amp->ahp, 2152 an_idx, len, 2153 seg->s_szc); 2154 } else { 2155 anon_free(amp->ahp, an_idx, 2156 len); 2157 } 2158 } else { 2159 ASSERT(svd->type == MAP_SHARED); 2160 ASSERT(amp->a_szc > seg->s_szc); 2161 anon_shmap_free_pages(amp, 2162 an_idx, len); 2163 } 2164 2165 /* 2166 * Unreserve swap space for the 2167 * unmapped chunk of this segment in 2168 * case it's MAP_SHARED 2169 */ 2170 if (svd->type == MAP_SHARED) { 2171 anon_unresv_zone(len, 2172 seg->s_as->a_proc->p_zone); 2173 amp->swresv -= len; 2174 } 2175 } 2176 ANON_LOCK_EXIT(&->a_rwlock); 2177 } 2178 2179 seg->s_size -= len; 2180 2181 if (svd->swresv) { 2182 if (svd->flags & MAP_NORESERVE) { 2183 ASSERT(amp); 2184 oswresv = svd->swresv; 2185 svd->swresv = ptob(anon_pages(amp->ahp, 2186 svd->anon_index, npages)); 2187 anon_unresv_zone(oswresv - svd->swresv, 2188 seg->s_as->a_proc->p_zone); 2189 if (SEG_IS_PARTIAL_RESV(seg)) 2190 seg->s_as->a_resvsize -= oswresv - 2191 svd->swresv; 2192 } else { 2193 size_t unlen; 2194 2195 if (svd->pageswap) { 2196 oswresv = svd->swresv; 2197 svd->swresv = 2198 segvn_count_swap_by_vpages(seg); 2199 ASSERT(oswresv >= svd->swresv); 2200 unlen = oswresv - svd->swresv; 2201 } else { 2202 svd->swresv -= len; 2203 ASSERT(svd->swresv == seg->s_size); 2204 unlen = len; 2205 } 2206 anon_unresv_zone(unlen, 2207 seg->s_as->a_proc->p_zone); 2208 } 2209 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2210 "anon proc:%p %lu %u", seg, len, 0); 2211 } 2212 2213 return (0); 2214 } 2215 2216 /* 2217 * The section to go is in the middle of the segment, 2218 * have to make it into two segments. nseg is made for 2219 * the high end while seg is cut down at the low end. 2220 */ 2221 nbase = addr + len; /* new seg base */ 2222 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */ 2223 seg->s_size = addr - seg->s_base; /* shrink old seg */ 2224 nseg = seg_alloc(seg->s_as, nbase, nsize); 2225 if (nseg == NULL) { 2226 panic("segvn_unmap seg_alloc"); 2227 /*NOTREACHED*/ 2228 } 2229 nseg->s_ops = seg->s_ops; 2230 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 2231 nseg->s_data = (void *)nsvd; 2232 nseg->s_szc = seg->s_szc; 2233 *nsvd = *svd; 2234 nsvd->seg = nseg; 2235 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base); 2236 nsvd->swresv = 0; 2237 nsvd->softlockcnt = 0; 2238 nsvd->softlockcnt_sbase = 0; 2239 nsvd->softlockcnt_send = 0; 2240 nsvd->svn_inz = svd->svn_inz; 2241 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 2242 2243 if (svd->vp != NULL) { 2244 VN_HOLD(nsvd->vp); 2245 if (nsvd->type == MAP_SHARED) 2246 lgrp_shm_policy_init(NULL, nsvd->vp); 2247 } 2248 crhold(svd->cred); 2249 2250 if (svd->vpage == NULL) { 2251 nsvd->vpage = NULL; 2252 } else { 2253 /* need to split vpage into two arrays */ 2254 size_t nbytes; 2255 struct vpage *ovpage; 2256 2257 ovpage = svd->vpage; /* keep pointer to vpage */ 2258 2259 npages = seg_pages(seg); /* seg has shrunk */ 2260 nbytes = vpgtob(npages); 2261 svd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2262 2263 bcopy(ovpage, svd->vpage, nbytes); 2264 2265 npages = seg_pages(nseg); 2266 nbytes = vpgtob(npages); 2267 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 2268 2269 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes); 2270 2271 /* free up old vpage */ 2272 kmem_free(ovpage, vpgtob(opages)); 2273 } 2274 2275 if (amp == NULL) { 2276 nsvd->amp = NULL; 2277 nsvd->anon_index = 0; 2278 } else { 2279 /* 2280 * Need to create a new anon map for the new segment. 2281 * We'll also allocate a new smaller array for the old 2282 * smaller segment to save space. 2283 */ 2284 opages = btop((uintptr_t)(addr - seg->s_base)); 2285 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2286 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) { 2287 /* 2288 * Free up now unused parts of anon_map array. 2289 */ 2290 ulong_t an_idx = svd->anon_index + opages; 2291 2292 /* 2293 * Shared anon map is no longer in use. Before 2294 * freeing its pages purge all entries from 2295 * pcache that belong to this amp. 2296 */ 2297 if (svd->type == MAP_SHARED) { 2298 ASSERT(amp->refcnt == 1); 2299 ASSERT(svd->softlockcnt == 0); 2300 anonmap_purge(amp); 2301 } 2302 2303 if (amp->a_szc == seg->s_szc) { 2304 if (seg->s_szc != 0) { 2305 anon_free_pages(amp->ahp, an_idx, len, 2306 seg->s_szc); 2307 } else { 2308 anon_free(amp->ahp, an_idx, 2309 len); 2310 } 2311 } else { 2312 ASSERT(svd->type == MAP_SHARED); 2313 ASSERT(amp->a_szc > seg->s_szc); 2314 anon_shmap_free_pages(amp, an_idx, len); 2315 } 2316 2317 /* 2318 * Unreserve swap space for the 2319 * unmapped chunk of this segment in 2320 * case it's MAP_SHARED 2321 */ 2322 if (svd->type == MAP_SHARED) { 2323 anon_unresv_zone(len, 2324 seg->s_as->a_proc->p_zone); 2325 amp->swresv -= len; 2326 } 2327 } 2328 nsvd->anon_index = svd->anon_index + 2329 btop((uintptr_t)(nseg->s_base - seg->s_base)); 2330 if (svd->type == MAP_SHARED) { 2331 amp->refcnt++; 2332 nsvd->amp = amp; 2333 } else { 2334 struct anon_map *namp; 2335 struct anon_hdr *nahp; 2336 2337 ASSERT(svd->type == MAP_PRIVATE); 2338 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 2339 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 2340 namp->a_szc = seg->s_szc; 2341 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp, 2342 0, btop(seg->s_size), ANON_SLEEP); 2343 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index, 2344 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 2345 anon_release(amp->ahp, btop(amp->size)); 2346 svd->anon_index = 0; 2347 nsvd->anon_index = 0; 2348 amp->ahp = nahp; 2349 amp->size = seg->s_size; 2350 nsvd->amp = namp; 2351 } 2352 ANON_LOCK_EXIT(&->a_rwlock); 2353 } 2354 if (svd->swresv) { 2355 if (svd->flags & MAP_NORESERVE) { 2356 ASSERT(amp); 2357 oswresv = svd->swresv; 2358 svd->swresv = ptob(anon_pages(amp->ahp, 2359 svd->anon_index, btop(seg->s_size))); 2360 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 2361 nsvd->anon_index, btop(nseg->s_size))); 2362 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2363 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv), 2364 seg->s_as->a_proc->p_zone); 2365 if (SEG_IS_PARTIAL_RESV(seg)) 2366 seg->s_as->a_resvsize -= oswresv - 2367 (svd->swresv + nsvd->swresv); 2368 } else { 2369 size_t unlen; 2370 2371 if (svd->pageswap) { 2372 oswresv = svd->swresv; 2373 svd->swresv = segvn_count_swap_by_vpages(seg); 2374 nsvd->swresv = segvn_count_swap_by_vpages(nseg); 2375 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 2376 unlen = oswresv - (svd->swresv + nsvd->swresv); 2377 } else { 2378 if (seg->s_size + nseg->s_size + len != 2379 svd->swresv) { 2380 panic("segvn_unmap: cannot split " 2381 "swap reservation"); 2382 /*NOTREACHED*/ 2383 } 2384 svd->swresv = seg->s_size; 2385 nsvd->swresv = nseg->s_size; 2386 unlen = len; 2387 } 2388 anon_unresv_zone(unlen, 2389 seg->s_as->a_proc->p_zone); 2390 } 2391 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2392 seg, len, 0); 2393 } 2394 2395 return (0); /* I'm glad that's all over with! */ 2396 } 2397 2398 static void 2399 segvn_free(struct seg *seg) 2400 { 2401 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2402 pgcnt_t npages = seg_pages(seg); 2403 struct anon_map *amp; 2404 size_t len; 2405 2406 /* 2407 * We don't need any segment level locks for "segvn" data 2408 * since the address space is "write" locked. 2409 */ 2410 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 2411 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2412 2413 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2414 2415 /* 2416 * Be sure to unlock pages. XXX Why do things get free'ed instead 2417 * of unmapped? XXX 2418 */ 2419 (void) segvn_lockop(seg, seg->s_base, seg->s_size, 2420 0, MC_UNLOCK, NULL, 0); 2421 2422 /* 2423 * Deallocate the vpage and anon pointers if necessary and possible. 2424 */ 2425 if (svd->vpage != NULL) { 2426 kmem_free(svd->vpage, vpgtob(npages)); 2427 svd->vpage = NULL; 2428 } 2429 if ((amp = svd->amp) != NULL) { 2430 /* 2431 * If there are no more references to this anon_map 2432 * structure, then deallocate the structure after freeing 2433 * up all the anon slot pointers that we can. 2434 */ 2435 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 2436 ASSERT(amp->a_szc >= seg->s_szc); 2437 if (--amp->refcnt == 0) { 2438 if (svd->type == MAP_PRIVATE) { 2439 /* 2440 * Private - we only need to anon_free 2441 * the part that this segment refers to. 2442 */ 2443 if (seg->s_szc != 0) { 2444 anon_free_pages(amp->ahp, 2445 svd->anon_index, seg->s_size, 2446 seg->s_szc); 2447 } else { 2448 anon_free(amp->ahp, svd->anon_index, 2449 seg->s_size); 2450 } 2451 } else { 2452 2453 /* 2454 * Shared anon map is no longer in use. Before 2455 * freeing its pages purge all entries from 2456 * pcache that belong to this amp. 2457 */ 2458 ASSERT(svd->softlockcnt == 0); 2459 anonmap_purge(amp); 2460 2461 /* 2462 * Shared - anon_free the entire 2463 * anon_map's worth of stuff and 2464 * release any swap reservation. 2465 */ 2466 if (amp->a_szc != 0) { 2467 anon_shmap_free_pages(amp, 0, 2468 amp->size); 2469 } else { 2470 anon_free(amp->ahp, 0, amp->size); 2471 } 2472 if ((len = amp->swresv) != 0) { 2473 anon_unresv_zone(len, 2474 seg->s_as->a_proc->p_zone); 2475 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 2476 "anon proc:%p %lu %u", seg, len, 0); 2477 } 2478 } 2479 svd->amp = NULL; 2480 ANON_LOCK_EXIT(&->a_rwlock); 2481 anonmap_free(amp); 2482 } else if (svd->type == MAP_PRIVATE) { 2483 /* 2484 * We had a private mapping which still has 2485 * a held anon_map so just free up all the 2486 * anon slot pointers that we were using. 2487 */ 2488 if (seg->s_szc != 0) { 2489 anon_free_pages(amp->ahp, svd->anon_index, 2490 seg->s_size, seg->s_szc); 2491 } else { 2492 anon_free(amp->ahp, svd->anon_index, 2493 seg->s_size); 2494 } 2495 ANON_LOCK_EXIT(&->a_rwlock); 2496 } else { 2497 ANON_LOCK_EXIT(&->a_rwlock); 2498 } 2499 } 2500 2501 /* 2502 * Release swap reservation. 2503 */ 2504 if ((len = svd->swresv) != 0) { 2505 anon_unresv_zone(svd->swresv, 2506 seg->s_as->a_proc->p_zone); 2507 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u", 2508 seg, len, 0); 2509 if (SEG_IS_PARTIAL_RESV(seg)) 2510 seg->s_as->a_resvsize -= svd->swresv; 2511 svd->swresv = 0; 2512 } 2513 /* 2514 * Release claim on vnode, credentials, and finally free the 2515 * private data. 2516 */ 2517 if (svd->vp != NULL) { 2518 if (svd->type == MAP_SHARED) 2519 lgrp_shm_policy_fini(NULL, svd->vp); 2520 VN_RELE(svd->vp); 2521 svd->vp = NULL; 2522 } 2523 crfree(svd->cred); 2524 svd->pageprot = 0; 2525 svd->pageadvice = 0; 2526 svd->pageswap = 0; 2527 svd->cred = NULL; 2528 2529 /* 2530 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's 2531 * still working with this segment without holding as lock (in case 2532 * it's called by pcache async thread). 2533 */ 2534 ASSERT(svd->softlockcnt == 0); 2535 mutex_enter(&svd->segfree_syncmtx); 2536 mutex_exit(&svd->segfree_syncmtx); 2537 2538 seg->s_data = NULL; 2539 kmem_cache_free(segvn_cache, svd); 2540 } 2541 2542 /* 2543 * Do a F_SOFTUNLOCK call over the range requested. The range must have 2544 * already been F_SOFTLOCK'ed. 2545 * Caller must always match addr and len of a softunlock with a previous 2546 * softlock with exactly the same addr and len. 2547 */ 2548 static void 2549 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw) 2550 { 2551 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2552 page_t *pp; 2553 caddr_t adr; 2554 struct vnode *vp; 2555 u_offset_t offset; 2556 ulong_t anon_index; 2557 struct anon_map *amp; 2558 struct anon *ap = NULL; 2559 2560 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 2561 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 2562 2563 if ((amp = svd->amp) != NULL) 2564 anon_index = svd->anon_index + seg_page(seg, addr); 2565 2566 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 2567 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2568 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie); 2569 } else { 2570 hat_unlock(seg->s_as->a_hat, addr, len); 2571 } 2572 for (adr = addr; adr < addr + len; adr += PAGESIZE) { 2573 if (amp != NULL) { 2574 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 2575 if ((ap = anon_get_ptr(amp->ahp, anon_index++)) 2576 != NULL) { 2577 swap_xlate(ap, &vp, &offset); 2578 } else { 2579 vp = svd->vp; 2580 offset = svd->offset + 2581 (uintptr_t)(adr - seg->s_base); 2582 } 2583 ANON_LOCK_EXIT(&->a_rwlock); 2584 } else { 2585 vp = svd->vp; 2586 offset = svd->offset + 2587 (uintptr_t)(adr - seg->s_base); 2588 } 2589 2590 /* 2591 * Use page_find() instead of page_lookup() to 2592 * find the page since we know that it is locked. 2593 */ 2594 pp = page_find(vp, offset); 2595 if (pp == NULL) { 2596 panic( 2597 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx", 2598 (void *)adr, (void *)ap, (void *)vp, offset); 2599 /*NOTREACHED*/ 2600 } 2601 2602 if (rw == S_WRITE) { 2603 hat_setrefmod(pp); 2604 if (seg->s_as->a_vbits) 2605 hat_setstat(seg->s_as, adr, PAGESIZE, 2606 P_REF | P_MOD); 2607 } else if (rw != S_OTHER) { 2608 hat_setref(pp); 2609 if (seg->s_as->a_vbits) 2610 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF); 2611 } 2612 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2613 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset); 2614 page_unlock(pp); 2615 } 2616 ASSERT(svd->softlockcnt >= btop(len)); 2617 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) { 2618 /* 2619 * All SOFTLOCKS are gone. Wakeup any waiting 2620 * unmappers so they can try again to unmap. 2621 * Check for waiters first without the mutex 2622 * held so we don't always grab the mutex on 2623 * softunlocks. 2624 */ 2625 if (AS_ISUNMAPWAIT(seg->s_as)) { 2626 mutex_enter(&seg->s_as->a_contents); 2627 if (AS_ISUNMAPWAIT(seg->s_as)) { 2628 AS_CLRUNMAPWAIT(seg->s_as); 2629 cv_broadcast(&seg->s_as->a_cv); 2630 } 2631 mutex_exit(&seg->s_as->a_contents); 2632 } 2633 } 2634 } 2635 2636 #define PAGE_HANDLED ((page_t *)-1) 2637 2638 /* 2639 * Release all the pages in the NULL terminated ppp list 2640 * which haven't already been converted to PAGE_HANDLED. 2641 */ 2642 static void 2643 segvn_pagelist_rele(page_t **ppp) 2644 { 2645 for (; *ppp != NULL; ppp++) { 2646 if (*ppp != PAGE_HANDLED) 2647 page_unlock(*ppp); 2648 } 2649 } 2650 2651 static int stealcow = 1; 2652 2653 /* 2654 * Workaround for viking chip bug. See bug id 1220902. 2655 * To fix this down in pagefault() would require importing so 2656 * much as and segvn code as to be unmaintainable. 2657 */ 2658 int enable_mbit_wa = 0; 2659 2660 /* 2661 * Handles all the dirty work of getting the right 2662 * anonymous pages and loading up the translations. 2663 * This routine is called only from segvn_fault() 2664 * when looping over the range of addresses requested. 2665 * 2666 * The basic algorithm here is: 2667 * If this is an anon_zero case 2668 * Call anon_zero to allocate page 2669 * Load up translation 2670 * Return 2671 * endif 2672 * If this is an anon page 2673 * Use anon_getpage to get the page 2674 * else 2675 * Find page in pl[] list passed in 2676 * endif 2677 * If not a cow 2678 * Load up the translation to the page 2679 * return 2680 * endif 2681 * Call anon_private to handle cow 2682 * Load up (writable) translation to new page 2683 */ 2684 static faultcode_t 2685 segvn_faultpage( 2686 struct hat *hat, /* the hat to use for mapping */ 2687 struct seg *seg, /* seg_vn of interest */ 2688 caddr_t addr, /* address in as */ 2689 u_offset_t off, /* offset in vp */ 2690 struct vpage *vpage, /* pointer to vpage for vp, off */ 2691 page_t *pl[], /* object source page pointer */ 2692 uint_t vpprot, /* access allowed to object pages */ 2693 enum fault_type type, /* type of fault */ 2694 enum seg_rw rw, /* type of access at fault */ 2695 int brkcow) /* we may need to break cow */ 2696 { 2697 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 2698 page_t *pp, **ppp; 2699 uint_t pageflags = 0; 2700 page_t *anon_pl[1 + 1]; 2701 page_t *opp = NULL; /* original page */ 2702 uint_t prot; 2703 int err; 2704 int cow; 2705 int claim; 2706 int steal = 0; 2707 ulong_t anon_index; 2708 struct anon *ap, *oldap; 2709 struct anon_map *amp; 2710 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 2711 int anon_lock = 0; 2712 anon_sync_obj_t cookie; 2713 2714 if (svd->flags & MAP_TEXT) { 2715 hat_flag |= HAT_LOAD_TEXT; 2716 } 2717 2718 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 2719 ASSERT(seg->s_szc == 0); 2720 ASSERT(svd->tr_state != SEGVN_TR_INIT); 2721 2722 /* 2723 * Initialize protection value for this page. 2724 * If we have per page protection values check it now. 2725 */ 2726 if (svd->pageprot) { 2727 uint_t protchk; 2728 2729 switch (rw) { 2730 case S_READ: 2731 protchk = PROT_READ; 2732 break; 2733 case S_WRITE: 2734 protchk = PROT_WRITE; 2735 break; 2736 case S_EXEC: 2737 protchk = PROT_EXEC; 2738 break; 2739 case S_OTHER: 2740 default: 2741 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 2742 break; 2743 } 2744 2745 prot = VPP_PROT(vpage); 2746 if ((prot & protchk) == 0) 2747 return (FC_PROT); /* illegal access type */ 2748 } else { 2749 prot = svd->prot; 2750 } 2751 2752 if (type == F_SOFTLOCK) { 2753 atomic_inc_ulong((ulong_t *)&svd->softlockcnt); 2754 } 2755 2756 /* 2757 * Always acquire the anon array lock to prevent 2 threads from 2758 * allocating separate anon slots for the same "addr". 2759 */ 2760 2761 if ((amp = svd->amp) != NULL) { 2762 ASSERT(RW_READ_HELD(&->a_rwlock)); 2763 anon_index = svd->anon_index + seg_page(seg, addr); 2764 anon_array_enter(amp, anon_index, &cookie); 2765 anon_lock = 1; 2766 } 2767 2768 if (svd->vp == NULL && amp != NULL) { 2769 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) { 2770 /* 2771 * Allocate a (normally) writable anonymous page of 2772 * zeroes. If no advance reservations, reserve now. 2773 */ 2774 if (svd->flags & MAP_NORESERVE) { 2775 if (anon_resv_zone(ptob(1), 2776 seg->s_as->a_proc->p_zone)) { 2777 atomic_add_long(&svd->swresv, ptob(1)); 2778 atomic_add_long(&seg->s_as->a_resvsize, 2779 ptob(1)); 2780 } else { 2781 err = ENOMEM; 2782 goto out; 2783 } 2784 } 2785 if ((pp = anon_zero(seg, addr, &ap, 2786 svd->cred)) == NULL) { 2787 err = ENOMEM; 2788 goto out; /* out of swap space */ 2789 } 2790 /* 2791 * Re-acquire the anon_map lock and 2792 * initialize the anon array entry. 2793 */ 2794 (void) anon_set_ptr(amp->ahp, anon_index, ap, 2795 ANON_SLEEP); 2796 2797 ASSERT(pp->p_szc == 0); 2798 2799 /* 2800 * Handle pages that have been marked for migration 2801 */ 2802 if (lgrp_optimizations()) 2803 page_migrate(seg, addr, &pp, 1); 2804 2805 if (enable_mbit_wa) { 2806 if (rw == S_WRITE) 2807 hat_setmod(pp); 2808 else if (!hat_ismod(pp)) 2809 prot &= ~PROT_WRITE; 2810 } 2811 /* 2812 * If AS_PAGLCK is set in a_flags (via memcntl(2) 2813 * with MC_LOCKAS, MCL_FUTURE) and this is a 2814 * MAP_NORESERVE segment, we may need to 2815 * permanently lock the page as it is being faulted 2816 * for the first time. The following text applies 2817 * only to MAP_NORESERVE segments: 2818 * 2819 * As per memcntl(2), if this segment was created 2820 * after MCL_FUTURE was applied (a "future" 2821 * segment), its pages must be locked. If this 2822 * segment existed at MCL_FUTURE application (a 2823 * "past" segment), the interface is unclear. 2824 * 2825 * We decide to lock only if vpage is present: 2826 * 2827 * - "future" segments will have a vpage array (see 2828 * as_map), and so will be locked as required 2829 * 2830 * - "past" segments may not have a vpage array, 2831 * depending on whether events (such as 2832 * mprotect) have occurred. Locking if vpage 2833 * exists will preserve legacy behavior. Not 2834 * locking if vpage is absent, will not break 2835 * the interface or legacy behavior. Note that 2836 * allocating vpage here if it's absent requires 2837 * upgrading the segvn reader lock, the cost of 2838 * which does not seem worthwhile. 2839 * 2840 * Usually testing and setting VPP_ISPPLOCK and 2841 * VPP_SETPPLOCK requires holding the segvn lock as 2842 * writer, but in this case all readers are 2843 * serializing on the anon array lock. 2844 */ 2845 if (AS_ISPGLCK(seg->s_as) && vpage != NULL && 2846 (svd->flags & MAP_NORESERVE) && 2847 !VPP_ISPPLOCK(vpage)) { 2848 proc_t *p = seg->s_as->a_proc; 2849 ASSERT(svd->type == MAP_PRIVATE); 2850 mutex_enter(&p->p_lock); 2851 if (rctl_incr_locked_mem(p, NULL, PAGESIZE, 2852 1) == 0) { 2853 claim = VPP_PROT(vpage) & PROT_WRITE; 2854 if (page_pp_lock(pp, claim, 0)) { 2855 VPP_SETPPLOCK(vpage); 2856 } else { 2857 rctl_decr_locked_mem(p, NULL, 2858 PAGESIZE, 1); 2859 } 2860 } 2861 mutex_exit(&p->p_lock); 2862 } 2863 2864 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2865 hat_memload(hat, addr, pp, prot, hat_flag); 2866 2867 if (!(hat_flag & HAT_LOAD_LOCK)) 2868 page_unlock(pp); 2869 2870 anon_array_exit(&cookie); 2871 return (0); 2872 } 2873 } 2874 2875 /* 2876 * Obtain the page structure via anon_getpage() if it is 2877 * a private copy of an object (the result of a previous 2878 * copy-on-write). 2879 */ 2880 if (amp != NULL) { 2881 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) { 2882 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE, 2883 seg, addr, rw, svd->cred); 2884 if (err) 2885 goto out; 2886 2887 if (svd->type == MAP_SHARED) { 2888 /* 2889 * If this is a shared mapping to an 2890 * anon_map, then ignore the write 2891 * permissions returned by anon_getpage(). 2892 * They apply to the private mappings 2893 * of this anon_map. 2894 */ 2895 vpprot |= PROT_WRITE; 2896 } 2897 opp = anon_pl[0]; 2898 } 2899 } 2900 2901 /* 2902 * Search the pl[] list passed in if it is from the 2903 * original object (i.e., not a private copy). 2904 */ 2905 if (opp == NULL) { 2906 /* 2907 * Find original page. We must be bringing it in 2908 * from the list in pl[]. 2909 */ 2910 for (ppp = pl; (opp = *ppp) != NULL; ppp++) { 2911 if (opp == PAGE_HANDLED) 2912 continue; 2913 ASSERT(opp->p_vnode == svd->vp); /* XXX */ 2914 if (opp->p_offset == off) 2915 break; 2916 } 2917 if (opp == NULL) { 2918 panic("segvn_faultpage not found"); 2919 /*NOTREACHED*/ 2920 } 2921 *ppp = PAGE_HANDLED; 2922 2923 } 2924 2925 ASSERT(PAGE_LOCKED(opp)); 2926 2927 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT, 2928 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0); 2929 2930 /* 2931 * The fault is treated as a copy-on-write fault if a 2932 * write occurs on a private segment and the object 2933 * page (i.e., mapping) is write protected. We assume 2934 * that fatal protection checks have already been made. 2935 */ 2936 2937 if (brkcow) { 2938 ASSERT(svd->tr_state == SEGVN_TR_OFF); 2939 cow = !(vpprot & PROT_WRITE); 2940 } else if (svd->tr_state == SEGVN_TR_ON) { 2941 /* 2942 * If we are doing text replication COW on first touch. 2943 */ 2944 ASSERT(amp != NULL); 2945 ASSERT(svd->vp != NULL); 2946 ASSERT(rw != S_WRITE); 2947 cow = (ap == NULL); 2948 } else { 2949 cow = 0; 2950 } 2951 2952 /* 2953 * If not a copy-on-write case load the translation 2954 * and return. 2955 */ 2956 if (cow == 0) { 2957 2958 /* 2959 * Handle pages that have been marked for migration 2960 */ 2961 if (lgrp_optimizations()) 2962 page_migrate(seg, addr, &opp, 1); 2963 2964 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) { 2965 if (rw == S_WRITE) 2966 hat_setmod(opp); 2967 else if (rw != S_OTHER && !hat_ismod(opp)) 2968 prot &= ~PROT_WRITE; 2969 } 2970 2971 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 2972 (!svd->pageprot && svd->prot == (prot & vpprot))); 2973 ASSERT(amp == NULL || 2974 svd->rcookie == HAT_INVALID_REGION_COOKIE); 2975 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag, 2976 svd->rcookie); 2977 2978 if (!(hat_flag & HAT_LOAD_LOCK)) 2979 page_unlock(opp); 2980 2981 if (anon_lock) { 2982 anon_array_exit(&cookie); 2983 } 2984 return (0); 2985 } 2986 2987 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 2988 2989 hat_setref(opp); 2990 2991 ASSERT(amp != NULL && anon_lock); 2992 2993 /* 2994 * Steal the page only if it isn't a private page 2995 * since stealing a private page is not worth the effort. 2996 */ 2997 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) 2998 steal = 1; 2999 3000 /* 3001 * Steal the original page if the following conditions are true: 3002 * 3003 * We are low on memory, the page is not private, page is not large, 3004 * not shared, not modified, not `locked' or if we have it `locked' 3005 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies 3006 * that the page is not shared) and if it doesn't have any 3007 * translations. page_struct_lock isn't needed to look at p_cowcnt 3008 * and p_lckcnt because we first get exclusive lock on page. 3009 */ 3010 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD); 3011 3012 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 && 3013 page_tryupgrade(opp) && !hat_ismod(opp) && 3014 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) || 3015 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 && 3016 vpage != NULL && VPP_ISPPLOCK(vpage)))) { 3017 /* 3018 * Check if this page has other translations 3019 * after unloading our translation. 3020 */ 3021 if (hat_page_is_mapped(opp)) { 3022 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3023 hat_unload(seg->s_as->a_hat, addr, PAGESIZE, 3024 HAT_UNLOAD); 3025 } 3026 3027 /* 3028 * hat_unload() might sync back someone else's recent 3029 * modification, so check again. 3030 */ 3031 if (!hat_ismod(opp) && !hat_page_is_mapped(opp)) 3032 pageflags |= STEAL_PAGE; 3033 } 3034 3035 /* 3036 * If we have a vpage pointer, see if it indicates that we have 3037 * ``locked'' the page we map -- if so, tell anon_private to 3038 * transfer the locking resource to the new page. 3039 * 3040 * See Statement at the beginning of segvn_lockop regarding 3041 * the way lockcnts/cowcnts are handled during COW. 3042 * 3043 */ 3044 if (vpage != NULL && VPP_ISPPLOCK(vpage)) 3045 pageflags |= LOCK_PAGE; 3046 3047 /* 3048 * Allocate a private page and perform the copy. 3049 * For MAP_NORESERVE reserve swap space now, unless this 3050 * is a cow fault on an existing anon page in which case 3051 * MAP_NORESERVE will have made advance reservations. 3052 */ 3053 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) { 3054 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) { 3055 atomic_add_long(&svd->swresv, ptob(1)); 3056 atomic_add_long(&seg->s_as->a_resvsize, ptob(1)); 3057 } else { 3058 page_unlock(opp); 3059 err = ENOMEM; 3060 goto out; 3061 } 3062 } 3063 oldap = ap; 3064 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred); 3065 if (pp == NULL) { 3066 err = ENOMEM; /* out of swap space */ 3067 goto out; 3068 } 3069 3070 /* 3071 * If we copied away from an anonymous page, then 3072 * we are one step closer to freeing up an anon slot. 3073 * 3074 * NOTE: The original anon slot must be released while 3075 * holding the "anon_map" lock. This is necessary to prevent 3076 * other threads from obtaining a pointer to the anon slot 3077 * which may be freed if its "refcnt" is 1. 3078 */ 3079 if (oldap != NULL) 3080 anon_decref(oldap); 3081 3082 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP); 3083 3084 /* 3085 * Handle pages that have been marked for migration 3086 */ 3087 if (lgrp_optimizations()) 3088 page_migrate(seg, addr, &pp, 1); 3089 3090 ASSERT(pp->p_szc == 0); 3091 3092 ASSERT(!IS_VMODSORT(pp->p_vnode)); 3093 if (enable_mbit_wa) { 3094 if (rw == S_WRITE) 3095 hat_setmod(pp); 3096 else if (!hat_ismod(pp)) 3097 prot &= ~PROT_WRITE; 3098 } 3099 3100 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 3101 hat_memload(hat, addr, pp, prot, hat_flag); 3102 3103 if (!(hat_flag & HAT_LOAD_LOCK)) 3104 page_unlock(pp); 3105 3106 ASSERT(anon_lock); 3107 anon_array_exit(&cookie); 3108 return (0); 3109 out: 3110 if (anon_lock) 3111 anon_array_exit(&cookie); 3112 3113 if (type == F_SOFTLOCK) { 3114 atomic_dec_ulong((ulong_t *)&svd->softlockcnt); 3115 } 3116 return (FC_MAKE_ERR(err)); 3117 } 3118 3119 /* 3120 * relocate a bunch of smaller targ pages into one large repl page. all targ 3121 * pages must be complete pages smaller than replacement pages. 3122 * it's assumed that no page's szc can change since they are all PAGESIZE or 3123 * complete large pages locked SHARED. 3124 */ 3125 static void 3126 segvn_relocate_pages(page_t **targ, page_t *replacement) 3127 { 3128 page_t *pp; 3129 pgcnt_t repl_npgs, curnpgs; 3130 pgcnt_t i; 3131 uint_t repl_szc = replacement->p_szc; 3132 page_t *first_repl = replacement; 3133 page_t *repl; 3134 spgcnt_t npgs; 3135 3136 VM_STAT_ADD(segvnvmstats.relocatepages[0]); 3137 3138 ASSERT(repl_szc != 0); 3139 npgs = repl_npgs = page_get_pagecnt(repl_szc); 3140 3141 i = 0; 3142 while (repl_npgs) { 3143 spgcnt_t nreloc; 3144 int err; 3145 ASSERT(replacement != NULL); 3146 pp = targ[i]; 3147 ASSERT(pp->p_szc < repl_szc); 3148 ASSERT(PAGE_EXCL(pp)); 3149 ASSERT(!PP_ISFREE(pp)); 3150 curnpgs = page_get_pagecnt(pp->p_szc); 3151 if (curnpgs == 1) { 3152 VM_STAT_ADD(segvnvmstats.relocatepages[1]); 3153 repl = replacement; 3154 page_sub(&replacement, repl); 3155 ASSERT(PAGE_EXCL(repl)); 3156 ASSERT(!PP_ISFREE(repl)); 3157 ASSERT(repl->p_szc == repl_szc); 3158 } else { 3159 page_t *repl_savepp; 3160 int j; 3161 VM_STAT_ADD(segvnvmstats.relocatepages[2]); 3162 repl_savepp = replacement; 3163 for (j = 0; j < curnpgs; j++) { 3164 repl = replacement; 3165 page_sub(&replacement, repl); 3166 ASSERT(PAGE_EXCL(repl)); 3167 ASSERT(!PP_ISFREE(repl)); 3168 ASSERT(repl->p_szc == repl_szc); 3169 ASSERT(page_pptonum(targ[i + j]) == 3170 page_pptonum(targ[i]) + j); 3171 } 3172 repl = repl_savepp; 3173 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs)); 3174 } 3175 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL); 3176 if (err || nreloc != curnpgs) { 3177 panic("segvn_relocate_pages: " 3178 "page_relocate failed err=%d curnpgs=%ld " 3179 "nreloc=%ld", err, curnpgs, nreloc); 3180 } 3181 ASSERT(curnpgs <= repl_npgs); 3182 repl_npgs -= curnpgs; 3183 i += curnpgs; 3184 } 3185 ASSERT(replacement == NULL); 3186 3187 repl = first_repl; 3188 repl_npgs = npgs; 3189 for (i = 0; i < repl_npgs; i++) { 3190 ASSERT(PAGE_EXCL(repl)); 3191 ASSERT(!PP_ISFREE(repl)); 3192 targ[i] = repl; 3193 page_downgrade(targ[i]); 3194 repl++; 3195 } 3196 } 3197 3198 /* 3199 * Check if all pages in ppa array are complete smaller than szc pages and 3200 * their roots will still be aligned relative to their current size if the 3201 * entire ppa array is relocated into one szc page. If these conditions are 3202 * not met return 0. 3203 * 3204 * If all pages are properly aligned attempt to upgrade their locks 3205 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0. 3206 * upgrdfail was set to 0 by caller. 3207 * 3208 * Return 1 if all pages are aligned and locked exclusively. 3209 * 3210 * If all pages in ppa array happen to be physically contiguous to make one 3211 * szc page and all exclusive locks are successfully obtained promote the page 3212 * size to szc and set *pszc to szc. Return 1 with pages locked shared. 3213 */ 3214 static int 3215 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc) 3216 { 3217 page_t *pp; 3218 pfn_t pfn; 3219 pgcnt_t totnpgs = page_get_pagecnt(szc); 3220 pfn_t first_pfn; 3221 int contig = 1; 3222 pgcnt_t i; 3223 pgcnt_t j; 3224 uint_t curszc; 3225 pgcnt_t curnpgs; 3226 int root = 0; 3227 3228 ASSERT(szc > 0); 3229 3230 VM_STAT_ADD(segvnvmstats.fullszcpages[0]); 3231 3232 for (i = 0; i < totnpgs; i++) { 3233 pp = ppa[i]; 3234 ASSERT(PAGE_SHARED(pp)); 3235 ASSERT(!PP_ISFREE(pp)); 3236 pfn = page_pptonum(pp); 3237 if (i == 0) { 3238 if (!IS_P2ALIGNED(pfn, totnpgs)) { 3239 contig = 0; 3240 } else { 3241 first_pfn = pfn; 3242 } 3243 } else if (contig && pfn != first_pfn + i) { 3244 contig = 0; 3245 } 3246 if (pp->p_szc == 0) { 3247 if (root) { 3248 VM_STAT_ADD(segvnvmstats.fullszcpages[1]); 3249 return (0); 3250 } 3251 } else if (!root) { 3252 if ((curszc = pp->p_szc) >= szc) { 3253 VM_STAT_ADD(segvnvmstats.fullszcpages[2]); 3254 return (0); 3255 } 3256 if (curszc == 0) { 3257 /* 3258 * p_szc changed means we don't have all pages 3259 * locked. return failure. 3260 */ 3261 VM_STAT_ADD(segvnvmstats.fullszcpages[3]); 3262 return (0); 3263 } 3264 curnpgs = page_get_pagecnt(curszc); 3265 if (!IS_P2ALIGNED(pfn, curnpgs) || 3266 !IS_P2ALIGNED(i, curnpgs)) { 3267 VM_STAT_ADD(segvnvmstats.fullszcpages[4]); 3268 return (0); 3269 } 3270 root = 1; 3271 } else { 3272 ASSERT(i > 0); 3273 VM_STAT_ADD(segvnvmstats.fullszcpages[5]); 3274 if (pp->p_szc != curszc) { 3275 VM_STAT_ADD(segvnvmstats.fullszcpages[6]); 3276 return (0); 3277 } 3278 if (pfn - 1 != page_pptonum(ppa[i - 1])) { 3279 panic("segvn_full_szcpages: " 3280 "large page not physically contiguous"); 3281 } 3282 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) { 3283 root = 0; 3284 } 3285 } 3286 } 3287 3288 for (i = 0; i < totnpgs; i++) { 3289 ASSERT(ppa[i]->p_szc < szc); 3290 if (!page_tryupgrade(ppa[i])) { 3291 for (j = 0; j < i; j++) { 3292 page_downgrade(ppa[j]); 3293 } 3294 *pszc = ppa[i]->p_szc; 3295 *upgrdfail = 1; 3296 VM_STAT_ADD(segvnvmstats.fullszcpages[7]); 3297 return (0); 3298 } 3299 } 3300 3301 /* 3302 * When a page is put a free cachelist its szc is set to 0. if file 3303 * system reclaimed pages from cachelist targ pages will be physically 3304 * contiguous with 0 p_szc. in this case just upgrade szc of targ 3305 * pages without any relocations. 3306 * To avoid any hat issues with previous small mappings 3307 * hat_pageunload() the target pages first. 3308 */ 3309 if (contig) { 3310 VM_STAT_ADD(segvnvmstats.fullszcpages[8]); 3311 for (i = 0; i < totnpgs; i++) { 3312 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD); 3313 } 3314 for (i = 0; i < totnpgs; i++) { 3315 ppa[i]->p_szc = szc; 3316 } 3317 for (i = 0; i < totnpgs; i++) { 3318 ASSERT(PAGE_EXCL(ppa[i])); 3319 page_downgrade(ppa[i]); 3320 } 3321 if (pszc != NULL) { 3322 *pszc = szc; 3323 } 3324 } 3325 VM_STAT_ADD(segvnvmstats.fullszcpages[9]); 3326 return (1); 3327 } 3328 3329 /* 3330 * Create physically contiguous pages for [vp, off] - [vp, off + 3331 * page_size(szc)) range and for private segment return them in ppa array. 3332 * Pages are created either via IO or relocations. 3333 * 3334 * Return 1 on success and 0 on failure. 3335 * 3336 * If physically contiguous pages already exist for this range return 1 without 3337 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa 3338 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE(). 3339 */ 3340 3341 static int 3342 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off, 3343 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc, 3344 int *downsize) 3345 3346 { 3347 page_t *pplist = *ppplist; 3348 size_t pgsz = page_get_pagesize(szc); 3349 pgcnt_t pages = btop(pgsz); 3350 ulong_t start_off = off; 3351 u_offset_t eoff = off + pgsz; 3352 spgcnt_t nreloc; 3353 u_offset_t io_off = off; 3354 size_t io_len; 3355 page_t *io_pplist = NULL; 3356 page_t *done_pplist = NULL; 3357 pgcnt_t pgidx = 0; 3358 page_t *pp; 3359 page_t *newpp; 3360 page_t *targpp; 3361 int io_err = 0; 3362 int i; 3363 pfn_t pfn; 3364 ulong_t ppages; 3365 page_t *targ_pplist = NULL; 3366 page_t *repl_pplist = NULL; 3367 page_t *tmp_pplist; 3368 int nios = 0; 3369 uint_t pszc; 3370 struct vattr va; 3371 3372 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]); 3373 3374 ASSERT(szc != 0); 3375 ASSERT(pplist->p_szc == szc); 3376 3377 /* 3378 * downsize will be set to 1 only if we fail to lock pages. this will 3379 * allow subsequent faults to try to relocate the page again. If we 3380 * fail due to misalignment don't downsize and let the caller map the 3381 * whole region with small mappings to avoid more faults into the area 3382 * where we can't get large pages anyway. 3383 */ 3384 *downsize = 0; 3385 3386 while (off < eoff) { 3387 newpp = pplist; 3388 ASSERT(newpp != NULL); 3389 ASSERT(PAGE_EXCL(newpp)); 3390 ASSERT(!PP_ISFREE(newpp)); 3391 /* 3392 * we pass NULL for nrelocp to page_lookup_create() 3393 * so that it doesn't relocate. We relocate here 3394 * later only after we make sure we can lock all 3395 * pages in the range we handle and they are all 3396 * aligned. 3397 */ 3398 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0); 3399 ASSERT(pp != NULL); 3400 ASSERT(!PP_ISFREE(pp)); 3401 ASSERT(pp->p_vnode == vp); 3402 ASSERT(pp->p_offset == off); 3403 if (pp == newpp) { 3404 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]); 3405 page_sub(&pplist, pp); 3406 ASSERT(PAGE_EXCL(pp)); 3407 ASSERT(page_iolock_assert(pp)); 3408 page_list_concat(&io_pplist, &pp); 3409 off += PAGESIZE; 3410 continue; 3411 } 3412 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]); 3413 pfn = page_pptonum(pp); 3414 pszc = pp->p_szc; 3415 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL && 3416 IS_P2ALIGNED(pfn, pages)) { 3417 ASSERT(repl_pplist == NULL); 3418 ASSERT(done_pplist == NULL); 3419 ASSERT(pplist == *ppplist); 3420 page_unlock(pp); 3421 page_free_replacement_page(pplist); 3422 page_create_putback(pages); 3423 *ppplist = NULL; 3424 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]); 3425 return (1); 3426 } 3427 if (pszc >= szc) { 3428 page_unlock(pp); 3429 segvn_faultvnmpss_align_err1++; 3430 goto out; 3431 } 3432 ppages = page_get_pagecnt(pszc); 3433 if (!IS_P2ALIGNED(pfn, ppages)) { 3434 ASSERT(pszc > 0); 3435 /* 3436 * sizing down to pszc won't help. 3437 */ 3438 page_unlock(pp); 3439 segvn_faultvnmpss_align_err2++; 3440 goto out; 3441 } 3442 pfn = page_pptonum(newpp); 3443 if (!IS_P2ALIGNED(pfn, ppages)) { 3444 ASSERT(pszc > 0); 3445 /* 3446 * sizing down to pszc won't help. 3447 */ 3448 page_unlock(pp); 3449 segvn_faultvnmpss_align_err3++; 3450 goto out; 3451 } 3452 if (!PAGE_EXCL(pp)) { 3453 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]); 3454 page_unlock(pp); 3455 *downsize = 1; 3456 *ret_pszc = pp->p_szc; 3457 goto out; 3458 } 3459 targpp = pp; 3460 if (io_pplist != NULL) { 3461 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]); 3462 io_len = off - io_off; 3463 /* 3464 * Some file systems like NFS don't check EOF 3465 * conditions in VOP_PAGEIO(). Check it here 3466 * now that pages are locked SE_EXCL. Any file 3467 * truncation will wait until the pages are 3468 * unlocked so no need to worry that file will 3469 * be truncated after we check its size here. 3470 * XXX fix NFS to remove this check. 3471 */ 3472 va.va_mask = AT_SIZE; 3473 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) { 3474 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]); 3475 page_unlock(targpp); 3476 goto out; 3477 } 3478 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3479 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]); 3480 *downsize = 1; 3481 *ret_pszc = 0; 3482 page_unlock(targpp); 3483 goto out; 3484 } 3485 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3486 B_READ, svd->cred, NULL); 3487 if (io_err) { 3488 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]); 3489 page_unlock(targpp); 3490 if (io_err == EDEADLK) { 3491 segvn_vmpss_pageio_deadlk_err++; 3492 } 3493 goto out; 3494 } 3495 nios++; 3496 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]); 3497 while (io_pplist != NULL) { 3498 pp = io_pplist; 3499 page_sub(&io_pplist, pp); 3500 ASSERT(page_iolock_assert(pp)); 3501 page_io_unlock(pp); 3502 pgidx = (pp->p_offset - start_off) >> 3503 PAGESHIFT; 3504 ASSERT(pgidx < pages); 3505 ppa[pgidx] = pp; 3506 page_list_concat(&done_pplist, &pp); 3507 } 3508 } 3509 pp = targpp; 3510 ASSERT(PAGE_EXCL(pp)); 3511 ASSERT(pp->p_szc <= pszc); 3512 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) { 3513 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]); 3514 page_unlock(pp); 3515 *downsize = 1; 3516 *ret_pszc = pp->p_szc; 3517 goto out; 3518 } 3519 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]); 3520 /* 3521 * page szc chould have changed before the entire group was 3522 * locked. reread page szc. 3523 */ 3524 pszc = pp->p_szc; 3525 ppages = page_get_pagecnt(pszc); 3526 3527 /* link just the roots */ 3528 page_list_concat(&targ_pplist, &pp); 3529 page_sub(&pplist, newpp); 3530 page_list_concat(&repl_pplist, &newpp); 3531 off += PAGESIZE; 3532 while (--ppages != 0) { 3533 newpp = pplist; 3534 page_sub(&pplist, newpp); 3535 off += PAGESIZE; 3536 } 3537 io_off = off; 3538 } 3539 if (io_pplist != NULL) { 3540 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]); 3541 io_len = eoff - io_off; 3542 va.va_mask = AT_SIZE; 3543 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) { 3544 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]); 3545 goto out; 3546 } 3547 if (btopr(va.va_size) < btopr(io_off + io_len)) { 3548 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]); 3549 *downsize = 1; 3550 *ret_pszc = 0; 3551 goto out; 3552 } 3553 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len, 3554 B_READ, svd->cred, NULL); 3555 if (io_err) { 3556 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]); 3557 if (io_err == EDEADLK) { 3558 segvn_vmpss_pageio_deadlk_err++; 3559 } 3560 goto out; 3561 } 3562 nios++; 3563 while (io_pplist != NULL) { 3564 pp = io_pplist; 3565 page_sub(&io_pplist, pp); 3566 ASSERT(page_iolock_assert(pp)); 3567 page_io_unlock(pp); 3568 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3569 ASSERT(pgidx < pages); 3570 ppa[pgidx] = pp; 3571 } 3572 } 3573 /* 3574 * we're now bound to succeed or panic. 3575 * remove pages from done_pplist. it's not needed anymore. 3576 */ 3577 while (done_pplist != NULL) { 3578 pp = done_pplist; 3579 page_sub(&done_pplist, pp); 3580 } 3581 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]); 3582 ASSERT(pplist == NULL); 3583 *ppplist = NULL; 3584 while (targ_pplist != NULL) { 3585 int ret; 3586 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]); 3587 ASSERT(repl_pplist); 3588 pp = targ_pplist; 3589 page_sub(&targ_pplist, pp); 3590 pgidx = (pp->p_offset - start_off) >> PAGESHIFT; 3591 newpp = repl_pplist; 3592 page_sub(&repl_pplist, newpp); 3593 #ifdef DEBUG 3594 pfn = page_pptonum(pp); 3595 pszc = pp->p_szc; 3596 ppages = page_get_pagecnt(pszc); 3597 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3598 pfn = page_pptonum(newpp); 3599 ASSERT(IS_P2ALIGNED(pfn, ppages)); 3600 ASSERT(P2PHASE(pfn, pages) == pgidx); 3601 #endif 3602 nreloc = 0; 3603 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL); 3604 if (ret != 0 || nreloc == 0) { 3605 panic("segvn_fill_vp_pages: " 3606 "page_relocate failed"); 3607 } 3608 pp = newpp; 3609 while (nreloc-- != 0) { 3610 ASSERT(PAGE_EXCL(pp)); 3611 ASSERT(pp->p_vnode == vp); 3612 ASSERT(pgidx == 3613 ((pp->p_offset - start_off) >> PAGESHIFT)); 3614 ppa[pgidx++] = pp; 3615 pp++; 3616 } 3617 } 3618 3619 if (svd->type == MAP_PRIVATE) { 3620 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]); 3621 for (i = 0; i < pages; i++) { 3622 ASSERT(ppa[i] != NULL); 3623 ASSERT(PAGE_EXCL(ppa[i])); 3624 ASSERT(ppa[i]->p_vnode == vp); 3625 ASSERT(ppa[i]->p_offset == 3626 start_off + (i << PAGESHIFT)); 3627 page_downgrade(ppa[i]); 3628 } 3629 ppa[pages] = NULL; 3630 } else { 3631 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]); 3632 /* 3633 * the caller will still call VOP_GETPAGE() for shared segments 3634 * to check FS write permissions. For private segments we map 3635 * file read only anyway. so no VOP_GETPAGE is needed. 3636 */ 3637 for (i = 0; i < pages; i++) { 3638 ASSERT(ppa[i] != NULL); 3639 ASSERT(PAGE_EXCL(ppa[i])); 3640 ASSERT(ppa[i]->p_vnode == vp); 3641 ASSERT(ppa[i]->p_offset == 3642 start_off + (i << PAGESHIFT)); 3643 page_unlock(ppa[i]); 3644 } 3645 ppa[0] = NULL; 3646 } 3647 3648 return (1); 3649 out: 3650 /* 3651 * Do the cleanup. Unlock target pages we didn't relocate. They are 3652 * linked on targ_pplist by root pages. reassemble unused replacement 3653 * and io pages back to pplist. 3654 */ 3655 if (io_pplist != NULL) { 3656 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]); 3657 pp = io_pplist; 3658 do { 3659 ASSERT(pp->p_vnode == vp); 3660 ASSERT(pp->p_offset == io_off); 3661 ASSERT(page_iolock_assert(pp)); 3662 page_io_unlock(pp); 3663 page_hashout(pp, NULL); 3664 io_off += PAGESIZE; 3665 } while ((pp = pp->p_next) != io_pplist); 3666 page_list_concat(&io_pplist, &pplist); 3667 pplist = io_pplist; 3668 } 3669 tmp_pplist = NULL; 3670 while (targ_pplist != NULL) { 3671 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]); 3672 pp = targ_pplist; 3673 ASSERT(PAGE_EXCL(pp)); 3674 page_sub(&targ_pplist, pp); 3675 3676 pszc = pp->p_szc; 3677 ppages = page_get_pagecnt(pszc); 3678 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3679 3680 if (pszc != 0) { 3681 group_page_unlock(pp); 3682 } 3683 page_unlock(pp); 3684 3685 pp = repl_pplist; 3686 ASSERT(pp != NULL); 3687 ASSERT(PAGE_EXCL(pp)); 3688 ASSERT(pp->p_szc == szc); 3689 page_sub(&repl_pplist, pp); 3690 3691 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages)); 3692 3693 /* relink replacement page */ 3694 page_list_concat(&tmp_pplist, &pp); 3695 while (--ppages != 0) { 3696 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]); 3697 pp++; 3698 ASSERT(PAGE_EXCL(pp)); 3699 ASSERT(pp->p_szc == szc); 3700 page_list_concat(&tmp_pplist, &pp); 3701 } 3702 } 3703 if (tmp_pplist != NULL) { 3704 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]); 3705 page_list_concat(&tmp_pplist, &pplist); 3706 pplist = tmp_pplist; 3707 } 3708 /* 3709 * at this point all pages are either on done_pplist or 3710 * pplist. They can't be all on done_pplist otherwise 3711 * we'd've been done. 3712 */ 3713 ASSERT(pplist != NULL); 3714 if (nios != 0) { 3715 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]); 3716 pp = pplist; 3717 do { 3718 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]); 3719 ASSERT(pp->p_szc == szc); 3720 ASSERT(PAGE_EXCL(pp)); 3721 ASSERT(pp->p_vnode != vp); 3722 pp->p_szc = 0; 3723 } while ((pp = pp->p_next) != pplist); 3724 3725 pp = done_pplist; 3726 do { 3727 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]); 3728 ASSERT(pp->p_szc == szc); 3729 ASSERT(PAGE_EXCL(pp)); 3730 ASSERT(pp->p_vnode == vp); 3731 pp->p_szc = 0; 3732 } while ((pp = pp->p_next) != done_pplist); 3733 3734 while (pplist != NULL) { 3735 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]); 3736 pp = pplist; 3737 page_sub(&pplist, pp); 3738 page_free(pp, 0); 3739 } 3740 3741 while (done_pplist != NULL) { 3742 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]); 3743 pp = done_pplist; 3744 page_sub(&done_pplist, pp); 3745 page_unlock(pp); 3746 } 3747 *ppplist = NULL; 3748 return (0); 3749 } 3750 ASSERT(pplist == *ppplist); 3751 if (io_err) { 3752 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]); 3753 /* 3754 * don't downsize on io error. 3755 * see if vop_getpage succeeds. 3756 * pplist may still be used in this case 3757 * for relocations. 3758 */ 3759 return (0); 3760 } 3761 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]); 3762 page_free_replacement_page(pplist); 3763 page_create_putback(pages); 3764 *ppplist = NULL; 3765 return (0); 3766 } 3767 3768 int segvn_anypgsz = 0; 3769 3770 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \ 3771 if ((type) == F_SOFTLOCK) { \ 3772 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \ 3773 -(pages)); \ 3774 } 3775 3776 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \ 3777 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \ 3778 if ((rw) == S_WRITE) { \ 3779 for (i = 0; i < (pages); i++) { \ 3780 ASSERT((ppa)[i]->p_vnode == \ 3781 (ppa)[0]->p_vnode); \ 3782 hat_setmod((ppa)[i]); \ 3783 } \ 3784 } else if ((rw) != S_OTHER && \ 3785 ((prot) & (vpprot) & PROT_WRITE)) { \ 3786 for (i = 0; i < (pages); i++) { \ 3787 ASSERT((ppa)[i]->p_vnode == \ 3788 (ppa)[0]->p_vnode); \ 3789 if (!hat_ismod((ppa)[i])) { \ 3790 prot &= ~PROT_WRITE; \ 3791 break; \ 3792 } \ 3793 } \ 3794 } \ 3795 } 3796 3797 #ifdef VM_STATS 3798 3799 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \ 3800 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]); 3801 3802 #else /* VM_STATS */ 3803 3804 #define SEGVN_VMSTAT_FLTVNPAGES(idx) 3805 3806 #endif 3807 3808 static faultcode_t 3809 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 3810 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 3811 caddr_t eaddr, int brkcow) 3812 { 3813 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 3814 struct anon_map *amp = svd->amp; 3815 uchar_t segtype = svd->type; 3816 uint_t szc = seg->s_szc; 3817 size_t pgsz = page_get_pagesize(szc); 3818 size_t maxpgsz = pgsz; 3819 pgcnt_t pages = btop(pgsz); 3820 pgcnt_t maxpages = pages; 3821 size_t ppasize = (pages + 1) * sizeof (page_t *); 3822 caddr_t a = lpgaddr; 3823 caddr_t maxlpgeaddr = lpgeaddr; 3824 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base); 3825 ulong_t aindx = svd->anon_index + seg_page(seg, a); 3826 struct vpage *vpage = (svd->vpage != NULL) ? 3827 &svd->vpage[seg_page(seg, a)] : NULL; 3828 vnode_t *vp = svd->vp; 3829 page_t **ppa; 3830 uint_t pszc; 3831 size_t ppgsz; 3832 pgcnt_t ppages; 3833 faultcode_t err = 0; 3834 int ierr; 3835 int vop_size_err = 0; 3836 uint_t protchk, prot, vpprot; 3837 ulong_t i; 3838 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 3839 anon_sync_obj_t an_cookie; 3840 enum seg_rw arw; 3841 int alloc_failed = 0; 3842 int adjszc_chk; 3843 struct vattr va; 3844 page_t *pplist; 3845 pfn_t pfn; 3846 int physcontig; 3847 int upgrdfail; 3848 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */ 3849 int tron = (svd->tr_state == SEGVN_TR_ON); 3850 3851 ASSERT(szc != 0); 3852 ASSERT(vp != NULL); 3853 ASSERT(brkcow == 0 || amp != NULL); 3854 ASSERT(tron == 0 || amp != NULL); 3855 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 3856 ASSERT(!(svd->flags & MAP_NORESERVE)); 3857 ASSERT(type != F_SOFTUNLOCK); 3858 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 3859 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages)); 3860 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 3861 ASSERT(seg->s_szc < NBBY * sizeof (int)); 3862 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz); 3863 ASSERT(svd->tr_state != SEGVN_TR_INIT); 3864 3865 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]); 3866 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]); 3867 3868 if (svd->flags & MAP_TEXT) { 3869 hat_flag |= HAT_LOAD_TEXT; 3870 } 3871 3872 if (svd->pageprot) { 3873 switch (rw) { 3874 case S_READ: 3875 protchk = PROT_READ; 3876 break; 3877 case S_WRITE: 3878 protchk = PROT_WRITE; 3879 break; 3880 case S_EXEC: 3881 protchk = PROT_EXEC; 3882 break; 3883 case S_OTHER: 3884 default: 3885 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 3886 break; 3887 } 3888 } else { 3889 prot = svd->prot; 3890 /* caller has already done segment level protection check. */ 3891 } 3892 3893 if (rw == S_WRITE && segtype == MAP_PRIVATE) { 3894 SEGVN_VMSTAT_FLTVNPAGES(2); 3895 arw = S_READ; 3896 } else { 3897 arw = rw; 3898 } 3899 3900 ppa = kmem_alloc(ppasize, KM_SLEEP); 3901 3902 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]); 3903 3904 for (;;) { 3905 adjszc_chk = 0; 3906 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) { 3907 if (adjszc_chk) { 3908 while (szc < seg->s_szc) { 3909 uintptr_t e; 3910 uint_t tszc; 3911 tszc = segvn_anypgsz_vnode ? szc + 1 : 3912 seg->s_szc; 3913 ppgsz = page_get_pagesize(tszc); 3914 if (!IS_P2ALIGNED(a, ppgsz) || 3915 ((alloc_failed >> tszc) & 0x1)) { 3916 break; 3917 } 3918 SEGVN_VMSTAT_FLTVNPAGES(4); 3919 szc = tszc; 3920 pgsz = ppgsz; 3921 pages = btop(pgsz); 3922 e = P2ROUNDUP((uintptr_t)eaddr, pgsz); 3923 lpgeaddr = (caddr_t)e; 3924 } 3925 } 3926 3927 again: 3928 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) { 3929 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 3930 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 3931 anon_array_enter(amp, aindx, &an_cookie); 3932 if (anon_get_ptr(amp->ahp, aindx) != NULL) { 3933 SEGVN_VMSTAT_FLTVNPAGES(5); 3934 ASSERT(anon_pages(amp->ahp, aindx, 3935 maxpages) == maxpages); 3936 anon_array_exit(&an_cookie); 3937 ANON_LOCK_EXIT(&->a_rwlock); 3938 err = segvn_fault_anonpages(hat, seg, 3939 a, a + maxpgsz, type, rw, 3940 MAX(a, addr), 3941 MIN(a + maxpgsz, eaddr), brkcow); 3942 if (err != 0) { 3943 SEGVN_VMSTAT_FLTVNPAGES(6); 3944 goto out; 3945 } 3946 if (szc < seg->s_szc) { 3947 szc = seg->s_szc; 3948 pgsz = maxpgsz; 3949 pages = maxpages; 3950 lpgeaddr = maxlpgeaddr; 3951 } 3952 goto next; 3953 } else { 3954 ASSERT(anon_pages(amp->ahp, aindx, 3955 maxpages) == 0); 3956 SEGVN_VMSTAT_FLTVNPAGES(7); 3957 anon_array_exit(&an_cookie); 3958 ANON_LOCK_EXIT(&->a_rwlock); 3959 } 3960 } 3961 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz)); 3962 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz)); 3963 3964 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 3965 ASSERT(vpage != NULL); 3966 prot = VPP_PROT(vpage); 3967 ASSERT(sameprot(seg, a, maxpgsz)); 3968 if ((prot & protchk) == 0) { 3969 SEGVN_VMSTAT_FLTVNPAGES(8); 3970 err = FC_PROT; 3971 goto out; 3972 } 3973 } 3974 if (type == F_SOFTLOCK) { 3975 atomic_add_long((ulong_t *)&svd->softlockcnt, 3976 pages); 3977 } 3978 3979 pplist = NULL; 3980 physcontig = 0; 3981 ppa[0] = NULL; 3982 if (!brkcow && !tron && szc && 3983 !page_exists_physcontig(vp, off, szc, 3984 segtype == MAP_PRIVATE ? ppa : NULL)) { 3985 SEGVN_VMSTAT_FLTVNPAGES(9); 3986 if (page_alloc_pages(vp, seg, a, &pplist, NULL, 3987 szc, 0, 0) && type != F_SOFTLOCK) { 3988 SEGVN_VMSTAT_FLTVNPAGES(10); 3989 pszc = 0; 3990 ierr = -1; 3991 alloc_failed |= (1 << szc); 3992 break; 3993 } 3994 if (pplist != NULL && 3995 vp->v_mpssdata == SEGVN_PAGEIO) { 3996 int downsize; 3997 SEGVN_VMSTAT_FLTVNPAGES(11); 3998 physcontig = segvn_fill_vp_pages(svd, 3999 vp, off, szc, ppa, &pplist, 4000 &pszc, &downsize); 4001 ASSERT(!physcontig || pplist == NULL); 4002 if (!physcontig && downsize && 4003 type != F_SOFTLOCK) { 4004 ASSERT(pplist == NULL); 4005 SEGVN_VMSTAT_FLTVNPAGES(12); 4006 ierr = -1; 4007 break; 4008 } 4009 ASSERT(!physcontig || 4010 segtype == MAP_PRIVATE || 4011 ppa[0] == NULL); 4012 if (physcontig && ppa[0] == NULL) { 4013 physcontig = 0; 4014 } 4015 } 4016 } else if (!brkcow && !tron && szc && ppa[0] != NULL) { 4017 SEGVN_VMSTAT_FLTVNPAGES(13); 4018 ASSERT(segtype == MAP_PRIVATE); 4019 physcontig = 1; 4020 } 4021 4022 if (!physcontig) { 4023 SEGVN_VMSTAT_FLTVNPAGES(14); 4024 ppa[0] = NULL; 4025 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz, 4026 &vpprot, ppa, pgsz, seg, a, arw, 4027 svd->cred, NULL); 4028 #ifdef DEBUG 4029 if (ierr == 0) { 4030 for (i = 0; i < pages; i++) { 4031 ASSERT(PAGE_LOCKED(ppa[i])); 4032 ASSERT(!PP_ISFREE(ppa[i])); 4033 ASSERT(ppa[i]->p_vnode == vp); 4034 ASSERT(ppa[i]->p_offset == 4035 off + (i << PAGESHIFT)); 4036 } 4037 } 4038 #endif /* DEBUG */ 4039 if (segtype == MAP_PRIVATE) { 4040 SEGVN_VMSTAT_FLTVNPAGES(15); 4041 vpprot &= ~PROT_WRITE; 4042 } 4043 } else { 4044 ASSERT(segtype == MAP_PRIVATE); 4045 SEGVN_VMSTAT_FLTVNPAGES(16); 4046 vpprot = PROT_ALL & ~PROT_WRITE; 4047 ierr = 0; 4048 } 4049 4050 if (ierr != 0) { 4051 SEGVN_VMSTAT_FLTVNPAGES(17); 4052 if (pplist != NULL) { 4053 SEGVN_VMSTAT_FLTVNPAGES(18); 4054 page_free_replacement_page(pplist); 4055 page_create_putback(pages); 4056 } 4057 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4058 if (a + pgsz <= eaddr) { 4059 SEGVN_VMSTAT_FLTVNPAGES(19); 4060 err = FC_MAKE_ERR(ierr); 4061 goto out; 4062 } 4063 va.va_mask = AT_SIZE; 4064 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) { 4065 SEGVN_VMSTAT_FLTVNPAGES(20); 4066 err = FC_MAKE_ERR(EIO); 4067 goto out; 4068 } 4069 if (btopr(va.va_size) >= btopr(off + pgsz)) { 4070 SEGVN_VMSTAT_FLTVNPAGES(21); 4071 err = FC_MAKE_ERR(ierr); 4072 goto out; 4073 } 4074 if (btopr(va.va_size) < 4075 btopr(off + (eaddr - a))) { 4076 SEGVN_VMSTAT_FLTVNPAGES(22); 4077 err = FC_MAKE_ERR(ierr); 4078 goto out; 4079 } 4080 if (brkcow || tron || type == F_SOFTLOCK) { 4081 /* can't reduce map area */ 4082 SEGVN_VMSTAT_FLTVNPAGES(23); 4083 vop_size_err = 1; 4084 goto out; 4085 } 4086 SEGVN_VMSTAT_FLTVNPAGES(24); 4087 ASSERT(szc != 0); 4088 pszc = 0; 4089 ierr = -1; 4090 break; 4091 } 4092 4093 if (amp != NULL) { 4094 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4095 anon_array_enter(amp, aindx, &an_cookie); 4096 } 4097 if (amp != NULL && 4098 anon_get_ptr(amp->ahp, aindx) != NULL) { 4099 ulong_t taindx = P2ALIGN(aindx, maxpages); 4100 4101 SEGVN_VMSTAT_FLTVNPAGES(25); 4102 ASSERT(anon_pages(amp->ahp, taindx, 4103 maxpages) == maxpages); 4104 for (i = 0; i < pages; i++) { 4105 page_unlock(ppa[i]); 4106 } 4107 anon_array_exit(&an_cookie); 4108 ANON_LOCK_EXIT(&->a_rwlock); 4109 if (pplist != NULL) { 4110 page_free_replacement_page(pplist); 4111 page_create_putback(pages); 4112 } 4113 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4114 if (szc < seg->s_szc) { 4115 SEGVN_VMSTAT_FLTVNPAGES(26); 4116 /* 4117 * For private segments SOFTLOCK 4118 * either always breaks cow (any rw 4119 * type except S_READ_NOCOW) or 4120 * address space is locked as writer 4121 * (S_READ_NOCOW case) and anon slots 4122 * can't show up on second check. 4123 * Therefore if we are here for 4124 * SOFTLOCK case it must be a cow 4125 * break but cow break never reduces 4126 * szc. text replication (tron) in 4127 * this case works as cow break. 4128 * Thus the assert below. 4129 */ 4130 ASSERT(!brkcow && !tron && 4131 type != F_SOFTLOCK); 4132 pszc = seg->s_szc; 4133 ierr = -2; 4134 break; 4135 } 4136 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4137 goto again; 4138 } 4139 #ifdef DEBUG 4140 if (amp != NULL) { 4141 ulong_t taindx = P2ALIGN(aindx, maxpages); 4142 ASSERT(!anon_pages(amp->ahp, taindx, maxpages)); 4143 } 4144 #endif /* DEBUG */ 4145 4146 if (brkcow || tron) { 4147 ASSERT(amp != NULL); 4148 ASSERT(pplist == NULL); 4149 ASSERT(szc == seg->s_szc); 4150 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4151 ASSERT(IS_P2ALIGNED(aindx, maxpages)); 4152 SEGVN_VMSTAT_FLTVNPAGES(27); 4153 ierr = anon_map_privatepages(amp, aindx, szc, 4154 seg, a, prot, ppa, vpage, segvn_anypgsz, 4155 tron ? PG_LOCAL : 0, svd->cred); 4156 if (ierr != 0) { 4157 SEGVN_VMSTAT_FLTVNPAGES(28); 4158 anon_array_exit(&an_cookie); 4159 ANON_LOCK_EXIT(&->a_rwlock); 4160 SEGVN_RESTORE_SOFTLOCK_VP(type, pages); 4161 err = FC_MAKE_ERR(ierr); 4162 goto out; 4163 } 4164 4165 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4166 /* 4167 * p_szc can't be changed for locked 4168 * swapfs pages. 4169 */ 4170 ASSERT(svd->rcookie == 4171 HAT_INVALID_REGION_COOKIE); 4172 hat_memload_array(hat, a, pgsz, ppa, prot, 4173 hat_flag); 4174 4175 if (!(hat_flag & HAT_LOAD_LOCK)) { 4176 SEGVN_VMSTAT_FLTVNPAGES(29); 4177 for (i = 0; i < pages; i++) { 4178 page_unlock(ppa[i]); 4179 } 4180 } 4181 anon_array_exit(&an_cookie); 4182 ANON_LOCK_EXIT(&->a_rwlock); 4183 goto next; 4184 } 4185 4186 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE || 4187 (!svd->pageprot && svd->prot == (prot & vpprot))); 4188 4189 pfn = page_pptonum(ppa[0]); 4190 /* 4191 * hat_page_demote() needs an SE_EXCL lock on one of 4192 * constituent page_t's and it decreases root's p_szc 4193 * last. This means if root's p_szc is equal szc and 4194 * all its constituent pages are locked 4195 * hat_page_demote() that could have changed p_szc to 4196 * szc is already done and no new have page_demote() 4197 * can start for this large page. 4198 */ 4199 4200 /* 4201 * we need to make sure same mapping size is used for 4202 * the same address range if there's a possibility the 4203 * adddress is already mapped because hat layer panics 4204 * when translation is loaded for the range already 4205 * mapped with a different page size. We achieve it 4206 * by always using largest page size possible subject 4207 * to the constraints of page size, segment page size 4208 * and page alignment. Since mappings are invalidated 4209 * when those constraints change and make it 4210 * impossible to use previously used mapping size no 4211 * mapping size conflicts should happen. 4212 */ 4213 4214 chkszc: 4215 if ((pszc = ppa[0]->p_szc) == szc && 4216 IS_P2ALIGNED(pfn, pages)) { 4217 4218 SEGVN_VMSTAT_FLTVNPAGES(30); 4219 #ifdef DEBUG 4220 for (i = 0; i < pages; i++) { 4221 ASSERT(PAGE_LOCKED(ppa[i])); 4222 ASSERT(!PP_ISFREE(ppa[i])); 4223 ASSERT(page_pptonum(ppa[i]) == 4224 pfn + i); 4225 ASSERT(ppa[i]->p_szc == szc); 4226 ASSERT(ppa[i]->p_vnode == vp); 4227 ASSERT(ppa[i]->p_offset == 4228 off + (i << PAGESHIFT)); 4229 } 4230 #endif /* DEBUG */ 4231 /* 4232 * All pages are of szc we need and they are 4233 * all locked so they can't change szc. load 4234 * translations. 4235 * 4236 * if page got promoted since last check 4237 * we don't need pplist. 4238 */ 4239 if (pplist != NULL) { 4240 page_free_replacement_page(pplist); 4241 page_create_putback(pages); 4242 } 4243 if (PP_ISMIGRATE(ppa[0])) { 4244 page_migrate(seg, a, ppa, pages); 4245 } 4246 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4247 prot, vpprot); 4248 hat_memload_array_region(hat, a, pgsz, 4249 ppa, prot & vpprot, hat_flag, 4250 svd->rcookie); 4251 4252 if (!(hat_flag & HAT_LOAD_LOCK)) { 4253 for (i = 0; i < pages; i++) { 4254 page_unlock(ppa[i]); 4255 } 4256 } 4257 if (amp != NULL) { 4258 anon_array_exit(&an_cookie); 4259 ANON_LOCK_EXIT(&->a_rwlock); 4260 } 4261 goto next; 4262 } 4263 4264 /* 4265 * See if upsize is possible. 4266 */ 4267 if (pszc > szc && szc < seg->s_szc && 4268 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) { 4269 pgcnt_t aphase; 4270 uint_t pszc1 = MIN(pszc, seg->s_szc); 4271 ppgsz = page_get_pagesize(pszc1); 4272 ppages = btop(ppgsz); 4273 aphase = btop(P2PHASE((uintptr_t)a, ppgsz)); 4274 4275 ASSERT(type != F_SOFTLOCK); 4276 4277 SEGVN_VMSTAT_FLTVNPAGES(31); 4278 if (aphase != P2PHASE(pfn, ppages)) { 4279 segvn_faultvnmpss_align_err4++; 4280 } else { 4281 SEGVN_VMSTAT_FLTVNPAGES(32); 4282 if (pplist != NULL) { 4283 page_t *pl = pplist; 4284 page_free_replacement_page(pl); 4285 page_create_putback(pages); 4286 } 4287 for (i = 0; i < pages; i++) { 4288 page_unlock(ppa[i]); 4289 } 4290 if (amp != NULL) { 4291 anon_array_exit(&an_cookie); 4292 ANON_LOCK_EXIT(&->a_rwlock); 4293 } 4294 pszc = pszc1; 4295 ierr = -2; 4296 break; 4297 } 4298 } 4299 4300 /* 4301 * check if we should use smallest mapping size. 4302 */ 4303 upgrdfail = 0; 4304 if (szc == 0 || 4305 (pszc >= szc && 4306 !IS_P2ALIGNED(pfn, pages)) || 4307 (pszc < szc && 4308 !segvn_full_szcpages(ppa, szc, &upgrdfail, 4309 &pszc))) { 4310 4311 if (upgrdfail && type != F_SOFTLOCK) { 4312 /* 4313 * segvn_full_szcpages failed to lock 4314 * all pages EXCL. Size down. 4315 */ 4316 ASSERT(pszc < szc); 4317 4318 SEGVN_VMSTAT_FLTVNPAGES(33); 4319 4320 if (pplist != NULL) { 4321 page_t *pl = pplist; 4322 page_free_replacement_page(pl); 4323 page_create_putback(pages); 4324 } 4325 4326 for (i = 0; i < pages; i++) { 4327 page_unlock(ppa[i]); 4328 } 4329 if (amp != NULL) { 4330 anon_array_exit(&an_cookie); 4331 ANON_LOCK_EXIT(&->a_rwlock); 4332 } 4333 ierr = -1; 4334 break; 4335 } 4336 if (szc != 0 && !upgrdfail) { 4337 segvn_faultvnmpss_align_err5++; 4338 } 4339 SEGVN_VMSTAT_FLTVNPAGES(34); 4340 if (pplist != NULL) { 4341 page_free_replacement_page(pplist); 4342 page_create_putback(pages); 4343 } 4344 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4345 prot, vpprot); 4346 if (upgrdfail && segvn_anypgsz_vnode) { 4347 /* SOFTLOCK case */ 4348 hat_memload_array_region(hat, a, pgsz, 4349 ppa, prot & vpprot, hat_flag, 4350 svd->rcookie); 4351 } else { 4352 for (i = 0; i < pages; i++) { 4353 hat_memload_region(hat, 4354 a + (i << PAGESHIFT), 4355 ppa[i], prot & vpprot, 4356 hat_flag, svd->rcookie); 4357 } 4358 } 4359 if (!(hat_flag & HAT_LOAD_LOCK)) { 4360 for (i = 0; i < pages; i++) { 4361 page_unlock(ppa[i]); 4362 } 4363 } 4364 if (amp != NULL) { 4365 anon_array_exit(&an_cookie); 4366 ANON_LOCK_EXIT(&->a_rwlock); 4367 } 4368 goto next; 4369 } 4370 4371 if (pszc == szc) { 4372 /* 4373 * segvn_full_szcpages() upgraded pages szc. 4374 */ 4375 ASSERT(pszc == ppa[0]->p_szc); 4376 ASSERT(IS_P2ALIGNED(pfn, pages)); 4377 goto chkszc; 4378 } 4379 4380 if (pszc > szc) { 4381 kmutex_t *szcmtx; 4382 SEGVN_VMSTAT_FLTVNPAGES(35); 4383 /* 4384 * p_szc of ppa[0] can change since we haven't 4385 * locked all constituent pages. Call 4386 * page_lock_szc() to prevent szc changes. 4387 * This should be a rare case that happens when 4388 * multiple segments use a different page size 4389 * to map the same file offsets. 4390 */ 4391 szcmtx = page_szc_lock(ppa[0]); 4392 pszc = ppa[0]->p_szc; 4393 ASSERT(szcmtx != NULL || pszc == 0); 4394 ASSERT(ppa[0]->p_szc <= pszc); 4395 if (pszc <= szc) { 4396 SEGVN_VMSTAT_FLTVNPAGES(36); 4397 if (szcmtx != NULL) { 4398 mutex_exit(szcmtx); 4399 } 4400 goto chkszc; 4401 } 4402 if (pplist != NULL) { 4403 /* 4404 * page got promoted since last check. 4405 * we don't need preaalocated large 4406 * page. 4407 */ 4408 SEGVN_VMSTAT_FLTVNPAGES(37); 4409 page_free_replacement_page(pplist); 4410 page_create_putback(pages); 4411 } 4412 SEGVN_UPDATE_MODBITS(ppa, pages, rw, 4413 prot, vpprot); 4414 hat_memload_array_region(hat, a, pgsz, ppa, 4415 prot & vpprot, hat_flag, svd->rcookie); 4416 mutex_exit(szcmtx); 4417 if (!(hat_flag & HAT_LOAD_LOCK)) { 4418 for (i = 0; i < pages; i++) { 4419 page_unlock(ppa[i]); 4420 } 4421 } 4422 if (amp != NULL) { 4423 anon_array_exit(&an_cookie); 4424 ANON_LOCK_EXIT(&->a_rwlock); 4425 } 4426 goto next; 4427 } 4428 4429 /* 4430 * if page got demoted since last check 4431 * we could have not allocated larger page. 4432 * allocate now. 4433 */ 4434 if (pplist == NULL && 4435 page_alloc_pages(vp, seg, a, &pplist, NULL, 4436 szc, 0, 0) && type != F_SOFTLOCK) { 4437 SEGVN_VMSTAT_FLTVNPAGES(38); 4438 for (i = 0; i < pages; i++) { 4439 page_unlock(ppa[i]); 4440 } 4441 if (amp != NULL) { 4442 anon_array_exit(&an_cookie); 4443 ANON_LOCK_EXIT(&->a_rwlock); 4444 } 4445 ierr = -1; 4446 alloc_failed |= (1 << szc); 4447 break; 4448 } 4449 4450 SEGVN_VMSTAT_FLTVNPAGES(39); 4451 4452 if (pplist != NULL) { 4453 segvn_relocate_pages(ppa, pplist); 4454 #ifdef DEBUG 4455 } else { 4456 ASSERT(type == F_SOFTLOCK); 4457 SEGVN_VMSTAT_FLTVNPAGES(40); 4458 #endif /* DEBUG */ 4459 } 4460 4461 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot); 4462 4463 if (pplist == NULL && segvn_anypgsz_vnode == 0) { 4464 ASSERT(type == F_SOFTLOCK); 4465 for (i = 0; i < pages; i++) { 4466 ASSERT(ppa[i]->p_szc < szc); 4467 hat_memload_region(hat, 4468 a + (i << PAGESHIFT), 4469 ppa[i], prot & vpprot, hat_flag, 4470 svd->rcookie); 4471 } 4472 } else { 4473 ASSERT(pplist != NULL || type == F_SOFTLOCK); 4474 hat_memload_array_region(hat, a, pgsz, ppa, 4475 prot & vpprot, hat_flag, svd->rcookie); 4476 } 4477 if (!(hat_flag & HAT_LOAD_LOCK)) { 4478 for (i = 0; i < pages; i++) { 4479 ASSERT(PAGE_SHARED(ppa[i])); 4480 page_unlock(ppa[i]); 4481 } 4482 } 4483 if (amp != NULL) { 4484 anon_array_exit(&an_cookie); 4485 ANON_LOCK_EXIT(&->a_rwlock); 4486 } 4487 4488 next: 4489 if (vpage != NULL) { 4490 vpage += pages; 4491 } 4492 adjszc_chk = 1; 4493 } 4494 if (a == lpgeaddr) 4495 break; 4496 ASSERT(a < lpgeaddr); 4497 4498 ASSERT(!brkcow && !tron && type != F_SOFTLOCK); 4499 4500 /* 4501 * ierr == -1 means we failed to map with a large page. 4502 * (either due to allocation/relocation failures or 4503 * misalignment with other mappings to this file. 4504 * 4505 * ierr == -2 means some other thread allocated a large page 4506 * after we gave up tp map with a large page. retry with 4507 * larger mapping. 4508 */ 4509 ASSERT(ierr == -1 || ierr == -2); 4510 ASSERT(ierr == -2 || szc != 0); 4511 ASSERT(ierr == -1 || szc < seg->s_szc); 4512 if (ierr == -2) { 4513 SEGVN_VMSTAT_FLTVNPAGES(41); 4514 ASSERT(pszc > szc && pszc <= seg->s_szc); 4515 szc = pszc; 4516 } else if (segvn_anypgsz_vnode) { 4517 SEGVN_VMSTAT_FLTVNPAGES(42); 4518 szc--; 4519 } else { 4520 SEGVN_VMSTAT_FLTVNPAGES(43); 4521 ASSERT(pszc < szc); 4522 /* 4523 * other process created pszc large page. 4524 * but we still have to drop to 0 szc. 4525 */ 4526 szc = 0; 4527 } 4528 4529 pgsz = page_get_pagesize(szc); 4530 pages = btop(pgsz); 4531 if (ierr == -2) { 4532 /* 4533 * Size up case. Note lpgaddr may only be needed for 4534 * softlock case so we don't adjust it here. 4535 */ 4536 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4537 ASSERT(a >= lpgaddr); 4538 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4539 off = svd->offset + (uintptr_t)(a - seg->s_base); 4540 aindx = svd->anon_index + seg_page(seg, a); 4541 vpage = (svd->vpage != NULL) ? 4542 &svd->vpage[seg_page(seg, a)] : NULL; 4543 } else { 4544 /* 4545 * Size down case. Note lpgaddr may only be needed for 4546 * softlock case so we don't adjust it here. 4547 */ 4548 ASSERT(IS_P2ALIGNED(a, pgsz)); 4549 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4550 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4551 ASSERT(a < lpgeaddr); 4552 if (a < addr) { 4553 SEGVN_VMSTAT_FLTVNPAGES(44); 4554 /* 4555 * The beginning of the large page region can 4556 * be pulled to the right to make a smaller 4557 * region. We haven't yet faulted a single 4558 * page. 4559 */ 4560 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4561 ASSERT(a >= lpgaddr); 4562 off = svd->offset + 4563 (uintptr_t)(a - seg->s_base); 4564 aindx = svd->anon_index + seg_page(seg, a); 4565 vpage = (svd->vpage != NULL) ? 4566 &svd->vpage[seg_page(seg, a)] : NULL; 4567 } 4568 } 4569 } 4570 out: 4571 kmem_free(ppa, ppasize); 4572 if (!err && !vop_size_err) { 4573 SEGVN_VMSTAT_FLTVNPAGES(45); 4574 return (0); 4575 } 4576 if (type == F_SOFTLOCK && a > lpgaddr) { 4577 SEGVN_VMSTAT_FLTVNPAGES(46); 4578 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4579 } 4580 if (!vop_size_err) { 4581 SEGVN_VMSTAT_FLTVNPAGES(47); 4582 return (err); 4583 } 4584 ASSERT(brkcow || tron || type == F_SOFTLOCK); 4585 /* 4586 * Large page end is mapped beyond the end of file and it's a cow 4587 * fault (can be a text replication induced cow) or softlock so we can't 4588 * reduce the map area. For now just demote the segment. This should 4589 * really only happen if the end of the file changed after the mapping 4590 * was established since when large page segments are created we make 4591 * sure they don't extend beyond the end of the file. 4592 */ 4593 SEGVN_VMSTAT_FLTVNPAGES(48); 4594 4595 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4596 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4597 err = 0; 4598 if (seg->s_szc != 0) { 4599 segvn_fltvnpages_clrszc_cnt++; 4600 ASSERT(svd->softlockcnt == 0); 4601 err = segvn_clrszc(seg); 4602 if (err != 0) { 4603 segvn_fltvnpages_clrszc_err++; 4604 } 4605 } 4606 ASSERT(err || seg->s_szc == 0); 4607 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock); 4608 /* segvn_fault will do its job as if szc had been zero to begin with */ 4609 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err)); 4610 } 4611 4612 /* 4613 * This routine will attempt to fault in one large page. 4614 * it will use smaller pages if that fails. 4615 * It should only be called for pure anonymous segments. 4616 */ 4617 static faultcode_t 4618 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr, 4619 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr, 4620 caddr_t eaddr, int brkcow) 4621 { 4622 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4623 struct anon_map *amp = svd->amp; 4624 uchar_t segtype = svd->type; 4625 uint_t szc = seg->s_szc; 4626 size_t pgsz = page_get_pagesize(szc); 4627 size_t maxpgsz = pgsz; 4628 pgcnt_t pages = btop(pgsz); 4629 uint_t ppaszc = szc; 4630 caddr_t a = lpgaddr; 4631 ulong_t aindx = svd->anon_index + seg_page(seg, a); 4632 struct vpage *vpage = (svd->vpage != NULL) ? 4633 &svd->vpage[seg_page(seg, a)] : NULL; 4634 page_t **ppa; 4635 uint_t ppa_szc; 4636 faultcode_t err; 4637 int ierr; 4638 uint_t protchk, prot, vpprot; 4639 ulong_t i; 4640 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD; 4641 anon_sync_obj_t cookie; 4642 int adjszc_chk; 4643 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0; 4644 4645 ASSERT(szc != 0); 4646 ASSERT(amp != NULL); 4647 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */ 4648 ASSERT(!(svd->flags & MAP_NORESERVE)); 4649 ASSERT(type != F_SOFTUNLOCK); 4650 ASSERT(IS_P2ALIGNED(a, maxpgsz)); 4651 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF); 4652 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4653 4654 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 4655 4656 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]); 4657 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]); 4658 4659 if (svd->flags & MAP_TEXT) { 4660 hat_flag |= HAT_LOAD_TEXT; 4661 } 4662 4663 if (svd->pageprot) { 4664 switch (rw) { 4665 case S_READ: 4666 protchk = PROT_READ; 4667 break; 4668 case S_WRITE: 4669 protchk = PROT_WRITE; 4670 break; 4671 case S_EXEC: 4672 protchk = PROT_EXEC; 4673 break; 4674 case S_OTHER: 4675 default: 4676 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 4677 break; 4678 } 4679 VM_STAT_ADD(segvnvmstats.fltanpages[2]); 4680 } else { 4681 prot = svd->prot; 4682 /* caller has already done segment level protection check. */ 4683 } 4684 4685 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP); 4686 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 4687 for (;;) { 4688 adjszc_chk = 0; 4689 for (; a < lpgeaddr; a += pgsz, aindx += pages) { 4690 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) { 4691 VM_STAT_ADD(segvnvmstats.fltanpages[3]); 4692 ASSERT(vpage != NULL); 4693 prot = VPP_PROT(vpage); 4694 ASSERT(sameprot(seg, a, maxpgsz)); 4695 if ((prot & protchk) == 0) { 4696 err = FC_PROT; 4697 goto error; 4698 } 4699 } 4700 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) && 4701 pgsz < maxpgsz) { 4702 ASSERT(a > lpgaddr); 4703 szc = seg->s_szc; 4704 pgsz = maxpgsz; 4705 pages = btop(pgsz); 4706 ASSERT(IS_P2ALIGNED(aindx, pages)); 4707 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, 4708 pgsz); 4709 } 4710 if (type == F_SOFTLOCK) { 4711 atomic_add_long((ulong_t *)&svd->softlockcnt, 4712 pages); 4713 } 4714 anon_array_enter(amp, aindx, &cookie); 4715 ppa_szc = (uint_t)-1; 4716 ierr = anon_map_getpages(amp, aindx, szc, seg, a, 4717 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow, 4718 segvn_anypgsz, pgflags, svd->cred); 4719 if (ierr != 0) { 4720 anon_array_exit(&cookie); 4721 VM_STAT_ADD(segvnvmstats.fltanpages[4]); 4722 if (type == F_SOFTLOCK) { 4723 atomic_add_long( 4724 (ulong_t *)&svd->softlockcnt, 4725 -pages); 4726 } 4727 if (ierr > 0) { 4728 VM_STAT_ADD(segvnvmstats.fltanpages[6]); 4729 err = FC_MAKE_ERR(ierr); 4730 goto error; 4731 } 4732 break; 4733 } 4734 4735 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode)); 4736 4737 ASSERT(segtype == MAP_SHARED || 4738 ppa[0]->p_szc <= szc); 4739 ASSERT(segtype == MAP_PRIVATE || 4740 ppa[0]->p_szc >= szc); 4741 4742 /* 4743 * Handle pages that have been marked for migration 4744 */ 4745 if (lgrp_optimizations()) 4746 page_migrate(seg, a, ppa, pages); 4747 4748 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 4749 4750 if (segtype == MAP_SHARED) { 4751 vpprot |= PROT_WRITE; 4752 } 4753 4754 hat_memload_array(hat, a, pgsz, ppa, 4755 prot & vpprot, hat_flag); 4756 4757 if (hat_flag & HAT_LOAD_LOCK) { 4758 VM_STAT_ADD(segvnvmstats.fltanpages[7]); 4759 } else { 4760 VM_STAT_ADD(segvnvmstats.fltanpages[8]); 4761 for (i = 0; i < pages; i++) 4762 page_unlock(ppa[i]); 4763 } 4764 if (vpage != NULL) 4765 vpage += pages; 4766 4767 anon_array_exit(&cookie); 4768 adjszc_chk = 1; 4769 } 4770 if (a == lpgeaddr) 4771 break; 4772 ASSERT(a < lpgeaddr); 4773 /* 4774 * ierr == -1 means we failed to allocate a large page. 4775 * so do a size down operation. 4776 * 4777 * ierr == -2 means some other process that privately shares 4778 * pages with this process has allocated a larger page and we 4779 * need to retry with larger pages. So do a size up 4780 * operation. This relies on the fact that large pages are 4781 * never partially shared i.e. if we share any constituent 4782 * page of a large page with another process we must share the 4783 * entire large page. Note this cannot happen for SOFTLOCK 4784 * case, unless current address (a) is at the beginning of the 4785 * next page size boundary because the other process couldn't 4786 * have relocated locked pages. 4787 */ 4788 ASSERT(ierr == -1 || ierr == -2); 4789 4790 if (segvn_anypgsz) { 4791 ASSERT(ierr == -2 || szc != 0); 4792 ASSERT(ierr == -1 || szc < seg->s_szc); 4793 szc = (ierr == -1) ? szc - 1 : szc + 1; 4794 } else { 4795 /* 4796 * For non COW faults and segvn_anypgsz == 0 4797 * we need to be careful not to loop forever 4798 * if existing page is found with szc other 4799 * than 0 or seg->s_szc. This could be due 4800 * to page relocations on behalf of DR or 4801 * more likely large page creation. For this 4802 * case simply re-size to existing page's szc 4803 * if returned by anon_map_getpages(). 4804 */ 4805 if (ppa_szc == (uint_t)-1) { 4806 szc = (ierr == -1) ? 0 : seg->s_szc; 4807 } else { 4808 ASSERT(ppa_szc <= seg->s_szc); 4809 ASSERT(ierr == -2 || ppa_szc < szc); 4810 ASSERT(ierr == -1 || ppa_szc > szc); 4811 szc = ppa_szc; 4812 } 4813 } 4814 4815 pgsz = page_get_pagesize(szc); 4816 pages = btop(pgsz); 4817 ASSERT(type != F_SOFTLOCK || ierr == -1 || 4818 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz))); 4819 if (type == F_SOFTLOCK) { 4820 /* 4821 * For softlocks we cannot reduce the fault area 4822 * (calculated based on the largest page size for this 4823 * segment) for size down and a is already next 4824 * page size aligned as assertted above for size 4825 * ups. Therefore just continue in case of softlock. 4826 */ 4827 VM_STAT_ADD(segvnvmstats.fltanpages[9]); 4828 continue; /* keep lint happy */ 4829 } else if (ierr == -2) { 4830 4831 /* 4832 * Size up case. Note lpgaddr may only be needed for 4833 * softlock case so we don't adjust it here. 4834 */ 4835 VM_STAT_ADD(segvnvmstats.fltanpages[10]); 4836 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz); 4837 ASSERT(a >= lpgaddr); 4838 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4839 aindx = svd->anon_index + seg_page(seg, a); 4840 vpage = (svd->vpage != NULL) ? 4841 &svd->vpage[seg_page(seg, a)] : NULL; 4842 } else { 4843 /* 4844 * Size down case. Note lpgaddr may only be needed for 4845 * softlock case so we don't adjust it here. 4846 */ 4847 VM_STAT_ADD(segvnvmstats.fltanpages[11]); 4848 ASSERT(IS_P2ALIGNED(a, pgsz)); 4849 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz)); 4850 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz); 4851 ASSERT(a < lpgeaddr); 4852 if (a < addr) { 4853 /* 4854 * The beginning of the large page region can 4855 * be pulled to the right to make a smaller 4856 * region. We haven't yet faulted a single 4857 * page. 4858 */ 4859 VM_STAT_ADD(segvnvmstats.fltanpages[12]); 4860 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 4861 ASSERT(a >= lpgaddr); 4862 aindx = svd->anon_index + seg_page(seg, a); 4863 vpage = (svd->vpage != NULL) ? 4864 &svd->vpage[seg_page(seg, a)] : NULL; 4865 } 4866 } 4867 } 4868 VM_STAT_ADD(segvnvmstats.fltanpages[13]); 4869 ANON_LOCK_EXIT(&->a_rwlock); 4870 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4871 return (0); 4872 error: 4873 VM_STAT_ADD(segvnvmstats.fltanpages[14]); 4874 ANON_LOCK_EXIT(&->a_rwlock); 4875 kmem_cache_free(segvn_szc_cache[ppaszc], ppa); 4876 if (type == F_SOFTLOCK && a > lpgaddr) { 4877 VM_STAT_ADD(segvnvmstats.fltanpages[15]); 4878 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER); 4879 } 4880 return (err); 4881 } 4882 4883 int fltadvice = 1; /* set to free behind pages for sequential access */ 4884 4885 /* 4886 * This routine is called via a machine specific fault handling routine. 4887 * It is also called by software routines wishing to lock or unlock 4888 * a range of addresses. 4889 * 4890 * Here is the basic algorithm: 4891 * If unlocking 4892 * Call segvn_softunlock 4893 * Return 4894 * endif 4895 * Checking and set up work 4896 * If we will need some non-anonymous pages 4897 * Call VOP_GETPAGE over the range of non-anonymous pages 4898 * endif 4899 * Loop over all addresses requested 4900 * Call segvn_faultpage passing in page list 4901 * to load up translations and handle anonymous pages 4902 * endloop 4903 * Load up translation to any additional pages in page list not 4904 * already handled that fit into this segment 4905 */ 4906 static faultcode_t 4907 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len, 4908 enum fault_type type, enum seg_rw rw) 4909 { 4910 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 4911 page_t **plp, **ppp, *pp; 4912 u_offset_t off; 4913 caddr_t a; 4914 struct vpage *vpage; 4915 uint_t vpprot, prot; 4916 int err; 4917 page_t *pl[FAULT_TMP_PAGES_NUM + 1]; 4918 size_t plsz, pl_alloc_sz; 4919 size_t page; 4920 ulong_t anon_index; 4921 struct anon_map *amp; 4922 int dogetpage = 0; 4923 caddr_t lpgaddr, lpgeaddr; 4924 size_t pgsz; 4925 anon_sync_obj_t cookie; 4926 int brkcow = BREAK_COW_SHARE(rw, type, svd->type); 4927 4928 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 4929 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE); 4930 4931 /* 4932 * First handle the easy stuff 4933 */ 4934 if (type == F_SOFTUNLOCK) { 4935 if (rw == S_READ_NOCOW) { 4936 rw = S_READ; 4937 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 4938 } 4939 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4940 pgsz = (seg->s_szc == 0) ? PAGESIZE : 4941 page_get_pagesize(seg->s_szc); 4942 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]); 4943 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 4944 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw); 4945 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4946 return (0); 4947 } 4948 4949 ASSERT(svd->tr_state == SEGVN_TR_OFF || 4950 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 4951 if (brkcow == 0) { 4952 if (svd->tr_state == SEGVN_TR_INIT) { 4953 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4954 if (svd->tr_state == SEGVN_TR_INIT) { 4955 ASSERT(svd->vp != NULL && svd->amp == NULL); 4956 ASSERT(svd->flags & MAP_TEXT); 4957 ASSERT(svd->type == MAP_PRIVATE); 4958 segvn_textrepl(seg); 4959 ASSERT(svd->tr_state != SEGVN_TR_INIT); 4960 ASSERT(svd->tr_state != SEGVN_TR_ON || 4961 svd->amp != NULL); 4962 } 4963 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4964 } 4965 } else if (svd->tr_state != SEGVN_TR_OFF) { 4966 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 4967 4968 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) { 4969 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 4970 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4971 return (FC_PROT); 4972 } 4973 4974 if (svd->tr_state == SEGVN_TR_ON) { 4975 ASSERT(svd->vp != NULL && svd->amp != NULL); 4976 segvn_textunrepl(seg, 0); 4977 ASSERT(svd->amp == NULL && 4978 svd->tr_state == SEGVN_TR_OFF); 4979 } else if (svd->tr_state != SEGVN_TR_OFF) { 4980 svd->tr_state = SEGVN_TR_OFF; 4981 } 4982 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 4983 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 4984 } 4985 4986 top: 4987 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 4988 4989 /* 4990 * If we have the same protections for the entire segment, 4991 * insure that the access being attempted is legitimate. 4992 */ 4993 4994 if (svd->pageprot == 0) { 4995 uint_t protchk; 4996 4997 switch (rw) { 4998 case S_READ: 4999 case S_READ_NOCOW: 5000 protchk = PROT_READ; 5001 break; 5002 case S_WRITE: 5003 protchk = PROT_WRITE; 5004 break; 5005 case S_EXEC: 5006 protchk = PROT_EXEC; 5007 break; 5008 case S_OTHER: 5009 default: 5010 protchk = PROT_READ | PROT_WRITE | PROT_EXEC; 5011 break; 5012 } 5013 5014 if ((svd->prot & protchk) == 0) { 5015 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5016 return (FC_PROT); /* illegal access type */ 5017 } 5018 } 5019 5020 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5021 /* this must be SOFTLOCK S_READ fault */ 5022 ASSERT(svd->amp == NULL); 5023 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5024 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5025 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5026 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5027 /* 5028 * this must be the first ever non S_READ_NOCOW 5029 * softlock for this segment. 5030 */ 5031 ASSERT(svd->softlockcnt == 0); 5032 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5033 HAT_REGION_TEXT); 5034 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5035 } 5036 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5037 goto top; 5038 } 5039 5040 /* 5041 * We can't allow the long term use of softlocks for vmpss segments, 5042 * because in some file truncation cases we should be able to demote 5043 * the segment, which requires that there are no softlocks. The 5044 * only case where it's ok to allow a SOFTLOCK fault against a vmpss 5045 * segment is S_READ_NOCOW, where the caller holds the address space 5046 * locked as writer and calls softunlock before dropping the as lock. 5047 * S_READ_NOCOW is used by /proc to read memory from another user. 5048 * 5049 * Another deadlock between SOFTLOCK and file truncation can happen 5050 * because segvn_fault_vnodepages() calls the FS one pagesize at 5051 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages() 5052 * can cause a deadlock because the first set of page_t's remain 5053 * locked SE_SHARED. To avoid this, we demote segments on a first 5054 * SOFTLOCK if they have a length greater than the segment's 5055 * page size. 5056 * 5057 * So for now, we only avoid demoting a segment on a SOFTLOCK when 5058 * the access type is S_READ_NOCOW and the fault length is less than 5059 * or equal to the segment's page size. While this is quite restrictive, 5060 * it should be the most common case of SOFTLOCK against a vmpss 5061 * segment. 5062 * 5063 * For S_READ_NOCOW, it's safe not to do a copy on write because the 5064 * caller makes sure no COW will be caused by another thread for a 5065 * softlocked page. 5066 */ 5067 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) { 5068 int demote = 0; 5069 5070 if (rw != S_READ_NOCOW) { 5071 demote = 1; 5072 } 5073 if (!demote && len > PAGESIZE) { 5074 pgsz = page_get_pagesize(seg->s_szc); 5075 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, 5076 lpgeaddr); 5077 if (lpgeaddr - lpgaddr > pgsz) { 5078 demote = 1; 5079 } 5080 } 5081 5082 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5083 5084 if (demote) { 5085 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5086 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5087 if (seg->s_szc != 0) { 5088 segvn_vmpss_clrszc_cnt++; 5089 ASSERT(svd->softlockcnt == 0); 5090 err = segvn_clrszc(seg); 5091 if (err) { 5092 segvn_vmpss_clrszc_err++; 5093 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5094 return (FC_MAKE_ERR(err)); 5095 } 5096 } 5097 ASSERT(seg->s_szc == 0); 5098 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5099 goto top; 5100 } 5101 } 5102 5103 /* 5104 * Check to see if we need to allocate an anon_map structure. 5105 */ 5106 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) { 5107 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5108 /* 5109 * Drop the "read" lock on the segment and acquire 5110 * the "write" version since we have to allocate the 5111 * anon_map. 5112 */ 5113 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5114 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5115 5116 if (svd->amp == NULL) { 5117 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 5118 svd->amp->a_szc = seg->s_szc; 5119 } 5120 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5121 5122 /* 5123 * Start all over again since segment protections 5124 * may have changed after we dropped the "read" lock. 5125 */ 5126 goto top; 5127 } 5128 5129 /* 5130 * S_READ_NOCOW vs S_READ distinction was 5131 * only needed for the code above. After 5132 * that we treat it as S_READ. 5133 */ 5134 if (rw == S_READ_NOCOW) { 5135 ASSERT(type == F_SOFTLOCK); 5136 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 5137 rw = S_READ; 5138 } 5139 5140 amp = svd->amp; 5141 5142 /* 5143 * MADV_SEQUENTIAL work is ignored for large page segments. 5144 */ 5145 if (seg->s_szc != 0) { 5146 pgsz = page_get_pagesize(seg->s_szc); 5147 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 5148 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 5149 if (svd->vp == NULL) { 5150 err = segvn_fault_anonpages(hat, seg, lpgaddr, 5151 lpgeaddr, type, rw, addr, addr + len, brkcow); 5152 } else { 5153 err = segvn_fault_vnodepages(hat, seg, lpgaddr, 5154 lpgeaddr, type, rw, addr, addr + len, brkcow); 5155 if (err == IE_RETRY) { 5156 ASSERT(seg->s_szc == 0); 5157 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock)); 5158 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5159 goto top; 5160 } 5161 } 5162 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5163 return (err); 5164 } 5165 5166 page = seg_page(seg, addr); 5167 if (amp != NULL) { 5168 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 5169 anon_index = svd->anon_index + page; 5170 5171 if (type == F_PROT && rw == S_READ && 5172 svd->tr_state == SEGVN_TR_OFF && 5173 svd->type == MAP_PRIVATE && svd->pageprot == 0) { 5174 size_t index = anon_index; 5175 struct anon *ap; 5176 5177 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5178 /* 5179 * The fast path could apply to S_WRITE also, except 5180 * that the protection fault could be caused by lazy 5181 * tlb flush when ro->rw. In this case, the pte is 5182 * RW already. But RO in the other cpu's tlb causes 5183 * the fault. Since hat_chgprot won't do anything if 5184 * pte doesn't change, we may end up faulting 5185 * indefinitely until the RO tlb entry gets replaced. 5186 */ 5187 for (a = addr; a < addr + len; a += PAGESIZE, index++) { 5188 anon_array_enter(amp, index, &cookie); 5189 ap = anon_get_ptr(amp->ahp, index); 5190 anon_array_exit(&cookie); 5191 if ((ap == NULL) || (ap->an_refcnt != 1)) { 5192 ANON_LOCK_EXIT(&->a_rwlock); 5193 goto slow; 5194 } 5195 } 5196 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot); 5197 ANON_LOCK_EXIT(&->a_rwlock); 5198 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5199 return (0); 5200 } 5201 } 5202 slow: 5203 5204 if (svd->vpage == NULL) 5205 vpage = NULL; 5206 else 5207 vpage = &svd->vpage[page]; 5208 5209 off = svd->offset + (uintptr_t)(addr - seg->s_base); 5210 5211 /* 5212 * If MADV_SEQUENTIAL has been set for the particular page we 5213 * are faulting on, free behind all pages in the segment and put 5214 * them on the free list. 5215 */ 5216 5217 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) { 5218 struct vpage *vpp; 5219 ulong_t fanon_index; 5220 size_t fpage; 5221 u_offset_t pgoff, fpgoff; 5222 struct vnode *fvp; 5223 struct anon *fap = NULL; 5224 5225 if (svd->advice == MADV_SEQUENTIAL || 5226 (svd->pageadvice && 5227 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) { 5228 pgoff = off - PAGESIZE; 5229 fpage = page - 1; 5230 if (vpage != NULL) 5231 vpp = &svd->vpage[fpage]; 5232 if (amp != NULL) 5233 fanon_index = svd->anon_index + fpage; 5234 5235 while (pgoff > svd->offset) { 5236 if (svd->advice != MADV_SEQUENTIAL && 5237 (!svd->pageadvice || (vpage && 5238 VPP_ADVICE(vpp) != MADV_SEQUENTIAL))) 5239 break; 5240 5241 /* 5242 * If this is an anon page, we must find the 5243 * correct <vp, offset> for it 5244 */ 5245 fap = NULL; 5246 if (amp != NULL) { 5247 ANON_LOCK_ENTER(&->a_rwlock, 5248 RW_READER); 5249 anon_array_enter(amp, fanon_index, 5250 &cookie); 5251 fap = anon_get_ptr(amp->ahp, 5252 fanon_index); 5253 if (fap != NULL) { 5254 swap_xlate(fap, &fvp, &fpgoff); 5255 } else { 5256 fpgoff = pgoff; 5257 fvp = svd->vp; 5258 } 5259 anon_array_exit(&cookie); 5260 ANON_LOCK_EXIT(&->a_rwlock); 5261 } else { 5262 fpgoff = pgoff; 5263 fvp = svd->vp; 5264 } 5265 if (fvp == NULL) 5266 break; /* XXX */ 5267 /* 5268 * Skip pages that are free or have an 5269 * "exclusive" lock. 5270 */ 5271 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED); 5272 if (pp == NULL) 5273 break; 5274 /* 5275 * We don't need the page_struct_lock to test 5276 * as this is only advisory; even if we 5277 * acquire it someone might race in and lock 5278 * the page after we unlock and before the 5279 * PUTPAGE, then VOP_PUTPAGE will do nothing. 5280 */ 5281 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) { 5282 /* 5283 * Hold the vnode before releasing 5284 * the page lock to prevent it from 5285 * being freed and re-used by some 5286 * other thread. 5287 */ 5288 VN_HOLD(fvp); 5289 page_unlock(pp); 5290 /* 5291 * We should build a page list 5292 * to kluster putpages XXX 5293 */ 5294 (void) VOP_PUTPAGE(fvp, 5295 (offset_t)fpgoff, PAGESIZE, 5296 (B_DONTNEED|B_FREE|B_ASYNC), 5297 svd->cred, NULL); 5298 VN_RELE(fvp); 5299 } else { 5300 /* 5301 * XXX - Should the loop terminate if 5302 * the page is `locked'? 5303 */ 5304 page_unlock(pp); 5305 } 5306 --vpp; 5307 --fanon_index; 5308 pgoff -= PAGESIZE; 5309 } 5310 } 5311 } 5312 5313 plp = pl; 5314 *plp = NULL; 5315 pl_alloc_sz = 0; 5316 5317 /* 5318 * See if we need to call VOP_GETPAGE for 5319 * *any* of the range being faulted on. 5320 * We can skip all of this work if there 5321 * was no original vnode. 5322 */ 5323 if (svd->vp != NULL) { 5324 u_offset_t vp_off; 5325 size_t vp_len; 5326 struct anon *ap; 5327 vnode_t *vp; 5328 5329 vp_off = off; 5330 vp_len = len; 5331 5332 if (amp == NULL) 5333 dogetpage = 1; 5334 else { 5335 /* 5336 * Only acquire reader lock to prevent amp->ahp 5337 * from being changed. It's ok to miss pages, 5338 * hence we don't do anon_array_enter 5339 */ 5340 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5341 ap = anon_get_ptr(amp->ahp, anon_index); 5342 5343 if (len <= PAGESIZE) 5344 /* inline non_anon() */ 5345 dogetpage = (ap == NULL); 5346 else 5347 dogetpage = non_anon(amp->ahp, anon_index, 5348 &vp_off, &vp_len); 5349 ANON_LOCK_EXIT(&->a_rwlock); 5350 } 5351 5352 if (dogetpage) { 5353 enum seg_rw arw; 5354 struct as *as = seg->s_as; 5355 5356 if (len > FAULT_TMP_PAGES_SZ) { 5357 /* 5358 * Page list won't fit in local array, 5359 * allocate one of the needed size. 5360 */ 5361 pl_alloc_sz = 5362 (btop(len) + 1) * sizeof (page_t *); 5363 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP); 5364 plp[0] = NULL; 5365 plsz = len; 5366 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE || 5367 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER || 5368 (((size_t)(addr + PAGESIZE) < 5369 (size_t)(seg->s_base + seg->s_size)) && 5370 hat_probe(as->a_hat, addr + PAGESIZE))) { 5371 /* 5372 * Ask VOP_GETPAGE to return the exact number 5373 * of pages if 5374 * (a) this is a COW fault, or 5375 * (b) this is a software fault, or 5376 * (c) next page is already mapped. 5377 */ 5378 plsz = len; 5379 } else { 5380 /* 5381 * Ask VOP_GETPAGE to return adjacent pages 5382 * within the segment. 5383 */ 5384 plsz = MIN((size_t)FAULT_TMP_PAGES_SZ, (size_t) 5385 ((seg->s_base + seg->s_size) - addr)); 5386 ASSERT((addr + plsz) <= 5387 (seg->s_base + seg->s_size)); 5388 } 5389 5390 /* 5391 * Need to get some non-anonymous pages. 5392 * We need to make only one call to GETPAGE to do 5393 * this to prevent certain deadlocking conditions 5394 * when we are doing locking. In this case 5395 * non_anon() should have picked up the smallest 5396 * range which includes all the non-anonymous 5397 * pages in the requested range. We have to 5398 * be careful regarding which rw flag to pass in 5399 * because on a private mapping, the underlying 5400 * object is never allowed to be written. 5401 */ 5402 if (rw == S_WRITE && svd->type == MAP_PRIVATE) { 5403 arw = S_READ; 5404 } else { 5405 arw = rw; 5406 } 5407 vp = svd->vp; 5408 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5409 "segvn_getpage:seg %p addr %p vp %p", 5410 seg, addr, vp); 5411 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len, 5412 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw, 5413 svd->cred, NULL); 5414 if (err) { 5415 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5416 segvn_pagelist_rele(plp); 5417 if (pl_alloc_sz) 5418 kmem_free(plp, pl_alloc_sz); 5419 return (FC_MAKE_ERR(err)); 5420 } 5421 if (svd->type == MAP_PRIVATE) 5422 vpprot &= ~PROT_WRITE; 5423 } 5424 } 5425 5426 /* 5427 * N.B. at this time the plp array has all the needed non-anon 5428 * pages in addition to (possibly) having some adjacent pages. 5429 */ 5430 5431 /* 5432 * Always acquire the anon_array_lock to prevent 5433 * 2 threads from allocating separate anon slots for 5434 * the same "addr". 5435 * 5436 * If this is a copy-on-write fault and we don't already 5437 * have the anon_array_lock, acquire it to prevent the 5438 * fault routine from handling multiple copy-on-write faults 5439 * on the same "addr" in the same address space. 5440 * 5441 * Only one thread should deal with the fault since after 5442 * it is handled, the other threads can acquire a translation 5443 * to the newly created private page. This prevents two or 5444 * more threads from creating different private pages for the 5445 * same fault. 5446 * 5447 * We grab "serialization" lock here if this is a MAP_PRIVATE segment 5448 * to prevent deadlock between this thread and another thread 5449 * which has soft-locked this page and wants to acquire serial_lock. 5450 * ( bug 4026339 ) 5451 * 5452 * The fix for bug 4026339 becomes unnecessary when using the 5453 * locking scheme with per amp rwlock and a global set of hash 5454 * lock, anon_array_lock. If we steal a vnode page when low 5455 * on memory and upgrad the page lock through page_rename, 5456 * then the page is PAGE_HANDLED, nothing needs to be done 5457 * for this page after returning from segvn_faultpage. 5458 * 5459 * But really, the page lock should be downgraded after 5460 * the stolen page is page_rename'd. 5461 */ 5462 5463 if (amp != NULL) 5464 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5465 5466 /* 5467 * Ok, now loop over the address range and handle faults 5468 */ 5469 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) { 5470 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot, 5471 type, rw, brkcow); 5472 if (err) { 5473 if (amp != NULL) 5474 ANON_LOCK_EXIT(&->a_rwlock); 5475 if (type == F_SOFTLOCK && a > addr) { 5476 segvn_softunlock(seg, addr, (a - addr), 5477 S_OTHER); 5478 } 5479 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5480 segvn_pagelist_rele(plp); 5481 if (pl_alloc_sz) 5482 kmem_free(plp, pl_alloc_sz); 5483 return (err); 5484 } 5485 if (vpage) { 5486 vpage++; 5487 } else if (svd->vpage) { 5488 page = seg_page(seg, addr); 5489 vpage = &svd->vpage[++page]; 5490 } 5491 } 5492 5493 /* Didn't get pages from the underlying fs so we're done */ 5494 if (!dogetpage) 5495 goto done; 5496 5497 /* 5498 * Now handle any other pages in the list returned. 5499 * If the page can be used, load up the translations now. 5500 * Note that the for loop will only be entered if "plp" 5501 * is pointing to a non-NULL page pointer which means that 5502 * VOP_GETPAGE() was called and vpprot has been initialized. 5503 */ 5504 if (svd->pageprot == 0) 5505 prot = svd->prot & vpprot; 5506 5507 5508 /* 5509 * Large Files: diff should be unsigned value because we started 5510 * supporting > 2GB segment sizes from 2.5.1 and when a 5511 * large file of size > 2GB gets mapped to address space 5512 * the diff value can be > 2GB. 5513 */ 5514 5515 for (ppp = plp; (pp = *ppp) != NULL; ppp++) { 5516 size_t diff; 5517 struct anon *ap; 5518 int anon_index; 5519 anon_sync_obj_t cookie; 5520 int hat_flag = HAT_LOAD_ADV; 5521 5522 if (svd->flags & MAP_TEXT) { 5523 hat_flag |= HAT_LOAD_TEXT; 5524 } 5525 5526 if (pp == PAGE_HANDLED) 5527 continue; 5528 5529 if (svd->tr_state != SEGVN_TR_ON && 5530 pp->p_offset >= svd->offset && 5531 pp->p_offset < svd->offset + seg->s_size) { 5532 5533 diff = pp->p_offset - svd->offset; 5534 5535 /* 5536 * Large Files: Following is the assertion 5537 * validating the above cast. 5538 */ 5539 ASSERT(svd->vp == pp->p_vnode); 5540 5541 page = btop(diff); 5542 if (svd->pageprot) 5543 prot = VPP_PROT(&svd->vpage[page]) & vpprot; 5544 5545 /* 5546 * Prevent other threads in the address space from 5547 * creating private pages (i.e., allocating anon slots) 5548 * while we are in the process of loading translations 5549 * to additional pages returned by the underlying 5550 * object. 5551 */ 5552 if (amp != NULL) { 5553 anon_index = svd->anon_index + page; 5554 anon_array_enter(amp, anon_index, &cookie); 5555 ap = anon_get_ptr(amp->ahp, anon_index); 5556 } 5557 if ((amp == NULL) || (ap == NULL)) { 5558 if (IS_VMODSORT(pp->p_vnode) || 5559 enable_mbit_wa) { 5560 if (rw == S_WRITE) 5561 hat_setmod(pp); 5562 else if (rw != S_OTHER && 5563 !hat_ismod(pp)) 5564 prot &= ~PROT_WRITE; 5565 } 5566 /* 5567 * Skip mapping read ahead pages marked 5568 * for migration, so they will get migrated 5569 * properly on fault 5570 */ 5571 ASSERT(amp == NULL || 5572 svd->rcookie == HAT_INVALID_REGION_COOKIE); 5573 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) { 5574 hat_memload_region(hat, 5575 seg->s_base + diff, 5576 pp, prot, hat_flag, 5577 svd->rcookie); 5578 } 5579 } 5580 if (amp != NULL) 5581 anon_array_exit(&cookie); 5582 } 5583 page_unlock(pp); 5584 } 5585 done: 5586 if (amp != NULL) 5587 ANON_LOCK_EXIT(&->a_rwlock); 5588 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5589 if (pl_alloc_sz) 5590 kmem_free(plp, pl_alloc_sz); 5591 return (0); 5592 } 5593 5594 /* 5595 * This routine is used to start I/O on pages asynchronously. XXX it will 5596 * only create PAGESIZE pages. At fault time they will be relocated into 5597 * larger pages. 5598 */ 5599 static faultcode_t 5600 segvn_faulta(struct seg *seg, caddr_t addr) 5601 { 5602 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5603 int err; 5604 struct anon_map *amp; 5605 vnode_t *vp; 5606 5607 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5608 5609 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 5610 if ((amp = svd->amp) != NULL) { 5611 struct anon *ap; 5612 5613 /* 5614 * Reader lock to prevent amp->ahp from being changed. 5615 * This is advisory, it's ok to miss a page, so 5616 * we don't do anon_array_enter lock. 5617 */ 5618 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5619 if ((ap = anon_get_ptr(amp->ahp, 5620 svd->anon_index + seg_page(seg, addr))) != NULL) { 5621 5622 err = anon_getpage(&ap, NULL, NULL, 5623 0, seg, addr, S_READ, svd->cred); 5624 5625 ANON_LOCK_EXIT(&->a_rwlock); 5626 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5627 if (err) 5628 return (FC_MAKE_ERR(err)); 5629 return (0); 5630 } 5631 ANON_LOCK_EXIT(&->a_rwlock); 5632 } 5633 5634 if (svd->vp == NULL) { 5635 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5636 return (0); /* zfod page - do nothing now */ 5637 } 5638 5639 vp = svd->vp; 5640 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE, 5641 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp); 5642 err = VOP_GETPAGE(vp, 5643 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)), 5644 PAGESIZE, NULL, NULL, 0, seg, addr, 5645 S_OTHER, svd->cred, NULL); 5646 5647 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5648 if (err) 5649 return (FC_MAKE_ERR(err)); 5650 return (0); 5651 } 5652 5653 static int 5654 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 5655 { 5656 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 5657 struct vpage *cvp, *svp, *evp; 5658 struct vnode *vp; 5659 size_t pgsz; 5660 pgcnt_t pgcnt; 5661 anon_sync_obj_t cookie; 5662 int unload_done = 0; 5663 5664 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 5665 5666 if ((svd->maxprot & prot) != prot) 5667 return (EACCES); /* violated maxprot */ 5668 5669 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 5670 5671 /* return if prot is the same */ 5672 if (!svd->pageprot && svd->prot == prot) { 5673 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5674 return (0); 5675 } 5676 5677 /* 5678 * Since we change protections we first have to flush the cache. 5679 * This makes sure all the pagelock calls have to recheck 5680 * protections. 5681 */ 5682 if (svd->softlockcnt > 0) { 5683 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5684 5685 /* 5686 * If this is shared segment non 0 softlockcnt 5687 * means locked pages are still in use. 5688 */ 5689 if (svd->type == MAP_SHARED) { 5690 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5691 return (EAGAIN); 5692 } 5693 5694 /* 5695 * Since we do have the segvn writers lock nobody can fill 5696 * the cache with entries belonging to this seg during 5697 * the purge. The flush either succeeds or we still have 5698 * pending I/Os. 5699 */ 5700 segvn_purge(seg); 5701 if (svd->softlockcnt > 0) { 5702 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5703 return (EAGAIN); 5704 } 5705 } 5706 5707 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 5708 ASSERT(svd->amp == NULL); 5709 ASSERT(svd->tr_state == SEGVN_TR_OFF); 5710 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 5711 HAT_REGION_TEXT); 5712 svd->rcookie = HAT_INVALID_REGION_COOKIE; 5713 unload_done = 1; 5714 } else if (svd->tr_state == SEGVN_TR_INIT) { 5715 svd->tr_state = SEGVN_TR_OFF; 5716 } else if (svd->tr_state == SEGVN_TR_ON) { 5717 ASSERT(svd->amp != NULL); 5718 segvn_textunrepl(seg, 0); 5719 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 5720 unload_done = 1; 5721 } 5722 5723 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED && 5724 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) { 5725 ASSERT(vn_is_mapped(svd->vp, V_WRITE)); 5726 segvn_inval_trcache(svd->vp); 5727 } 5728 if (seg->s_szc != 0) { 5729 int err; 5730 pgsz = page_get_pagesize(seg->s_szc); 5731 pgcnt = pgsz >> PAGESHIFT; 5732 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 5733 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) { 5734 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5735 ASSERT(seg->s_base != addr || seg->s_size != len); 5736 /* 5737 * If we are holding the as lock as a reader then 5738 * we need to return IE_RETRY and let the as 5739 * layer drop and re-acquire the lock as a writer. 5740 */ 5741 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) 5742 return (IE_RETRY); 5743 VM_STAT_ADD(segvnvmstats.demoterange[1]); 5744 if (svd->type == MAP_PRIVATE || svd->vp != NULL) { 5745 err = segvn_demote_range(seg, addr, len, 5746 SDR_END, 0); 5747 } else { 5748 uint_t szcvec = map_pgszcvec(seg->s_base, 5749 pgsz, (uintptr_t)seg->s_base, 5750 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0); 5751 err = segvn_demote_range(seg, addr, len, 5752 SDR_END, szcvec); 5753 } 5754 if (err == 0) 5755 return (IE_RETRY); 5756 if (err == ENOMEM) 5757 return (IE_NOMEM); 5758 return (err); 5759 } 5760 } 5761 5762 5763 /* 5764 * If it's a private mapping and we're making it writable then we 5765 * may have to reserve the additional swap space now. If we are 5766 * making writable only a part of the segment then we use its vpage 5767 * array to keep a record of the pages for which we have reserved 5768 * swap. In this case we set the pageswap field in the segment's 5769 * segvn structure to record this. 5770 * 5771 * If it's a private mapping to a file (i.e., vp != NULL) and we're 5772 * removing write permission on the entire segment and we haven't 5773 * modified any pages, we can release the swap space. 5774 */ 5775 if (svd->type == MAP_PRIVATE) { 5776 if (prot & PROT_WRITE) { 5777 if (!(svd->flags & MAP_NORESERVE) && 5778 !(svd->swresv && svd->pageswap == 0)) { 5779 size_t sz = 0; 5780 5781 /* 5782 * Start by determining how much swap 5783 * space is required. 5784 */ 5785 if (addr == seg->s_base && 5786 len == seg->s_size && 5787 svd->pageswap == 0) { 5788 /* The whole segment */ 5789 sz = seg->s_size; 5790 } else { 5791 /* 5792 * Make sure that the vpage array 5793 * exists, and make a note of the 5794 * range of elements corresponding 5795 * to len. 5796 */ 5797 segvn_vpage(seg); 5798 if (svd->vpage == NULL) { 5799 SEGVN_LOCK_EXIT(seg->s_as, 5800 &svd->lock); 5801 return (ENOMEM); 5802 } 5803 svp = &svd->vpage[seg_page(seg, addr)]; 5804 evp = &svd->vpage[seg_page(seg, 5805 addr + len)]; 5806 5807 if (svd->pageswap == 0) { 5808 /* 5809 * This is the first time we've 5810 * asked for a part of this 5811 * segment, so we need to 5812 * reserve everything we've 5813 * been asked for. 5814 */ 5815 sz = len; 5816 } else { 5817 /* 5818 * We have to count the number 5819 * of pages required. 5820 */ 5821 for (cvp = svp; cvp < evp; 5822 cvp++) { 5823 if (!VPP_ISSWAPRES(cvp)) 5824 sz++; 5825 } 5826 sz <<= PAGESHIFT; 5827 } 5828 } 5829 5830 /* Try to reserve the necessary swap. */ 5831 if (anon_resv_zone(sz, 5832 seg->s_as->a_proc->p_zone) == 0) { 5833 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5834 return (IE_NOMEM); 5835 } 5836 5837 /* 5838 * Make a note of how much swap space 5839 * we've reserved. 5840 */ 5841 if (svd->pageswap == 0 && sz == seg->s_size) { 5842 svd->swresv = sz; 5843 } else { 5844 ASSERT(svd->vpage != NULL); 5845 svd->swresv += sz; 5846 svd->pageswap = 1; 5847 for (cvp = svp; cvp < evp; cvp++) { 5848 if (!VPP_ISSWAPRES(cvp)) 5849 VPP_SETSWAPRES(cvp); 5850 } 5851 } 5852 } 5853 } else { 5854 /* 5855 * Swap space is released only if this segment 5856 * does not map anonymous memory, since read faults 5857 * on such segments still need an anon slot to read 5858 * in the data. 5859 */ 5860 if (svd->swresv != 0 && svd->vp != NULL && 5861 svd->amp == NULL && addr == seg->s_base && 5862 len == seg->s_size && svd->pageprot == 0) { 5863 ASSERT(svd->pageswap == 0); 5864 anon_unresv_zone(svd->swresv, 5865 seg->s_as->a_proc->p_zone); 5866 svd->swresv = 0; 5867 TRACE_3(TR_FAC_VM, TR_ANON_PROC, 5868 "anon proc:%p %lu %u", seg, 0, 0); 5869 } 5870 } 5871 } 5872 5873 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) { 5874 if (svd->prot == prot) { 5875 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5876 return (0); /* all done */ 5877 } 5878 svd->prot = (uchar_t)prot; 5879 } else if (svd->type == MAP_PRIVATE) { 5880 struct anon *ap = NULL; 5881 page_t *pp; 5882 u_offset_t offset, off; 5883 struct anon_map *amp; 5884 ulong_t anon_idx = 0; 5885 5886 /* 5887 * A vpage structure exists or else the change does not 5888 * involve the entire segment. Establish a vpage structure 5889 * if none is there. Then, for each page in the range, 5890 * adjust its individual permissions. Note that write- 5891 * enabling a MAP_PRIVATE page can affect the claims for 5892 * locked down memory. Overcommitting memory terminates 5893 * the operation. 5894 */ 5895 segvn_vpage(seg); 5896 if (svd->vpage == NULL) { 5897 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5898 return (ENOMEM); 5899 } 5900 svd->pageprot = 1; 5901 if ((amp = svd->amp) != NULL) { 5902 anon_idx = svd->anon_index + seg_page(seg, addr); 5903 ASSERT(seg->s_szc == 0 || 5904 IS_P2ALIGNED(anon_idx, pgcnt)); 5905 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 5906 } 5907 5908 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 5909 evp = &svd->vpage[seg_page(seg, addr + len)]; 5910 5911 /* 5912 * See Statement at the beginning of segvn_lockop regarding 5913 * the way cowcnts and lckcnts are handled. 5914 */ 5915 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 5916 5917 if (seg->s_szc != 0) { 5918 if (amp != NULL) { 5919 anon_array_enter(amp, anon_idx, 5920 &cookie); 5921 } 5922 if (IS_P2ALIGNED(anon_idx, pgcnt) && 5923 !segvn_claim_pages(seg, svp, offset, 5924 anon_idx, prot)) { 5925 if (amp != NULL) { 5926 anon_array_exit(&cookie); 5927 } 5928 break; 5929 } 5930 if (amp != NULL) { 5931 anon_array_exit(&cookie); 5932 } 5933 anon_idx++; 5934 } else { 5935 if (amp != NULL) { 5936 anon_array_enter(amp, anon_idx, 5937 &cookie); 5938 ap = anon_get_ptr(amp->ahp, anon_idx++); 5939 } 5940 5941 if (VPP_ISPPLOCK(svp) && 5942 VPP_PROT(svp) != prot) { 5943 5944 if (amp == NULL || ap == NULL) { 5945 vp = svd->vp; 5946 off = offset; 5947 } else 5948 swap_xlate(ap, &vp, &off); 5949 if (amp != NULL) 5950 anon_array_exit(&cookie); 5951 5952 if ((pp = page_lookup(vp, off, 5953 SE_SHARED)) == NULL) { 5954 panic("segvn_setprot: no page"); 5955 /*NOTREACHED*/ 5956 } 5957 ASSERT(seg->s_szc == 0); 5958 if ((VPP_PROT(svp) ^ prot) & 5959 PROT_WRITE) { 5960 if (prot & PROT_WRITE) { 5961 if (!page_addclaim( 5962 pp)) { 5963 page_unlock(pp); 5964 break; 5965 } 5966 } else { 5967 if (!page_subclaim( 5968 pp)) { 5969 page_unlock(pp); 5970 break; 5971 } 5972 } 5973 } 5974 page_unlock(pp); 5975 } else if (amp != NULL) 5976 anon_array_exit(&cookie); 5977 } 5978 VPP_SETPROT(svp, prot); 5979 offset += PAGESIZE; 5980 } 5981 if (amp != NULL) 5982 ANON_LOCK_EXIT(&->a_rwlock); 5983 5984 /* 5985 * Did we terminate prematurely? If so, simply unload 5986 * the translations to the things we've updated so far. 5987 */ 5988 if (svp != evp) { 5989 if (unload_done) { 5990 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 5991 return (IE_NOMEM); 5992 } 5993 len = (svp - &svd->vpage[seg_page(seg, addr)]) * 5994 PAGESIZE; 5995 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz)); 5996 if (len != 0) 5997 hat_unload(seg->s_as->a_hat, addr, 5998 len, HAT_UNLOAD); 5999 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6000 return (IE_NOMEM); 6001 } 6002 } else { 6003 segvn_vpage(seg); 6004 if (svd->vpage == NULL) { 6005 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6006 return (ENOMEM); 6007 } 6008 svd->pageprot = 1; 6009 evp = &svd->vpage[seg_page(seg, addr + len)]; 6010 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) { 6011 VPP_SETPROT(svp, prot); 6012 } 6013 } 6014 6015 if (unload_done) { 6016 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6017 return (0); 6018 } 6019 6020 if (((prot & PROT_WRITE) != 0 && 6021 (svd->vp != NULL || svd->type == MAP_PRIVATE)) || 6022 (prot & ~PROT_USER) == PROT_NONE) { 6023 /* 6024 * Either private or shared data with write access (in 6025 * which case we need to throw out all former translations 6026 * so that we get the right translations set up on fault 6027 * and we don't allow write access to any copy-on-write pages 6028 * that might be around or to prevent write access to pages 6029 * representing holes in a file), or we don't have permission 6030 * to access the memory at all (in which case we have to 6031 * unload any current translations that might exist). 6032 */ 6033 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 6034 } else { 6035 /* 6036 * A shared mapping or a private mapping in which write 6037 * protection is going to be denied - just change all the 6038 * protections over the range of addresses in question. 6039 * segvn does not support any other attributes other 6040 * than prot so we can use hat_chgattr. 6041 */ 6042 hat_chgattr(seg->s_as->a_hat, addr, len, prot); 6043 } 6044 6045 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6046 6047 return (0); 6048 } 6049 6050 /* 6051 * segvn_setpagesize is called via segop_setpagesize from as_setpagesize, 6052 * to determine if the seg is capable of mapping the requested szc. 6053 */ 6054 static int 6055 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc) 6056 { 6057 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6058 struct segvn_data *nsvd; 6059 struct anon_map *amp = svd->amp; 6060 struct seg *nseg; 6061 caddr_t eaddr = addr + len, a; 6062 size_t pgsz = page_get_pagesize(szc); 6063 pgcnt_t pgcnt = page_get_pagecnt(szc); 6064 int err; 6065 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base); 6066 6067 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6068 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6069 6070 if (seg->s_szc == szc || segvn_lpg_disable != 0) { 6071 return (0); 6072 } 6073 6074 /* 6075 * addr should always be pgsz aligned but eaddr may be misaligned if 6076 * it's at the end of the segment. 6077 * 6078 * XXX we should assert this condition since as_setpagesize() logic 6079 * guarantees it. 6080 */ 6081 if (!IS_P2ALIGNED(addr, pgsz) || 6082 (!IS_P2ALIGNED(eaddr, pgsz) && 6083 eaddr != seg->s_base + seg->s_size)) { 6084 6085 segvn_setpgsz_align_err++; 6086 return (EINVAL); 6087 } 6088 6089 if (amp != NULL && svd->type == MAP_SHARED) { 6090 ulong_t an_idx = svd->anon_index + seg_page(seg, addr); 6091 if (!IS_P2ALIGNED(an_idx, pgcnt)) { 6092 6093 segvn_setpgsz_anon_align_err++; 6094 return (EINVAL); 6095 } 6096 } 6097 6098 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas || 6099 szc > segvn_maxpgszc) { 6100 return (EINVAL); 6101 } 6102 6103 /* paranoid check */ 6104 if (svd->vp != NULL && 6105 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) { 6106 return (EINVAL); 6107 } 6108 6109 if (seg->s_szc == 0 && svd->vp != NULL && 6110 map_addr_vacalign_check(addr, off)) { 6111 return (EINVAL); 6112 } 6113 6114 /* 6115 * Check that protections are the same within new page 6116 * size boundaries. 6117 */ 6118 if (svd->pageprot) { 6119 for (a = addr; a < eaddr; a += pgsz) { 6120 if ((a + pgsz) > eaddr) { 6121 if (!sameprot(seg, a, eaddr - a)) { 6122 return (EINVAL); 6123 } 6124 } else { 6125 if (!sameprot(seg, a, pgsz)) { 6126 return (EINVAL); 6127 } 6128 } 6129 } 6130 } 6131 6132 /* 6133 * Since we are changing page size we first have to flush 6134 * the cache. This makes sure all the pagelock calls have 6135 * to recheck protections. 6136 */ 6137 if (svd->softlockcnt > 0) { 6138 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6139 6140 /* 6141 * If this is shared segment non 0 softlockcnt 6142 * means locked pages are still in use. 6143 */ 6144 if (svd->type == MAP_SHARED) { 6145 return (EAGAIN); 6146 } 6147 6148 /* 6149 * Since we do have the segvn writers lock nobody can fill 6150 * the cache with entries belonging to this seg during 6151 * the purge. The flush either succeeds or we still have 6152 * pending I/Os. 6153 */ 6154 segvn_purge(seg); 6155 if (svd->softlockcnt > 0) { 6156 return (EAGAIN); 6157 } 6158 } 6159 6160 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6161 ASSERT(svd->amp == NULL); 6162 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6163 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6164 HAT_REGION_TEXT); 6165 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6166 } else if (svd->tr_state == SEGVN_TR_INIT) { 6167 svd->tr_state = SEGVN_TR_OFF; 6168 } else if (svd->tr_state == SEGVN_TR_ON) { 6169 ASSERT(svd->amp != NULL); 6170 segvn_textunrepl(seg, 1); 6171 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6172 amp = NULL; 6173 } 6174 6175 /* 6176 * Operation for sub range of existing segment. 6177 */ 6178 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) { 6179 if (szc < seg->s_szc) { 6180 VM_STAT_ADD(segvnvmstats.demoterange[2]); 6181 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0); 6182 if (err == 0) { 6183 return (IE_RETRY); 6184 } 6185 if (err == ENOMEM) { 6186 return (IE_NOMEM); 6187 } 6188 return (err); 6189 } 6190 if (addr != seg->s_base) { 6191 nseg = segvn_split_seg(seg, addr); 6192 if (eaddr != (nseg->s_base + nseg->s_size)) { 6193 /* eaddr is szc aligned */ 6194 (void) segvn_split_seg(nseg, eaddr); 6195 } 6196 return (IE_RETRY); 6197 } 6198 if (eaddr != (seg->s_base + seg->s_size)) { 6199 /* eaddr is szc aligned */ 6200 (void) segvn_split_seg(seg, eaddr); 6201 } 6202 return (IE_RETRY); 6203 } 6204 6205 /* 6206 * Break any low level sharing and reset seg->s_szc to 0. 6207 */ 6208 if ((err = segvn_clrszc(seg)) != 0) { 6209 if (err == ENOMEM) { 6210 err = IE_NOMEM; 6211 } 6212 return (err); 6213 } 6214 ASSERT(seg->s_szc == 0); 6215 6216 /* 6217 * If the end of the current segment is not pgsz aligned 6218 * then attempt to concatenate with the next segment. 6219 */ 6220 if (!IS_P2ALIGNED(eaddr, pgsz)) { 6221 nseg = AS_SEGNEXT(seg->s_as, seg); 6222 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) { 6223 return (ENOMEM); 6224 } 6225 if (nseg->s_ops != &segvn_ops) { 6226 return (EINVAL); 6227 } 6228 nsvd = (struct segvn_data *)nseg->s_data; 6229 if (nsvd->softlockcnt > 0) { 6230 /* 6231 * If this is shared segment non 0 softlockcnt 6232 * means locked pages are still in use. 6233 */ 6234 if (nsvd->type == MAP_SHARED) { 6235 return (EAGAIN); 6236 } 6237 segvn_purge(nseg); 6238 if (nsvd->softlockcnt > 0) { 6239 return (EAGAIN); 6240 } 6241 } 6242 err = segvn_clrszc(nseg); 6243 if (err == ENOMEM) { 6244 err = IE_NOMEM; 6245 } 6246 if (err != 0) { 6247 return (err); 6248 } 6249 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6250 err = segvn_concat(seg, nseg, 1); 6251 if (err == -1) { 6252 return (EINVAL); 6253 } 6254 if (err == -2) { 6255 return (IE_NOMEM); 6256 } 6257 return (IE_RETRY); 6258 } 6259 6260 /* 6261 * May need to re-align anon array to 6262 * new szc. 6263 */ 6264 if (amp != NULL) { 6265 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) { 6266 struct anon_hdr *nahp; 6267 6268 ASSERT(svd->type == MAP_PRIVATE); 6269 6270 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6271 ASSERT(amp->refcnt == 1); 6272 nahp = anon_create(btop(amp->size), ANON_NOSLEEP); 6273 if (nahp == NULL) { 6274 ANON_LOCK_EXIT(&->a_rwlock); 6275 return (IE_NOMEM); 6276 } 6277 if (anon_copy_ptr(amp->ahp, svd->anon_index, 6278 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) { 6279 anon_release(nahp, btop(amp->size)); 6280 ANON_LOCK_EXIT(&->a_rwlock); 6281 return (IE_NOMEM); 6282 } 6283 anon_release(amp->ahp, btop(amp->size)); 6284 amp->ahp = nahp; 6285 svd->anon_index = 0; 6286 ANON_LOCK_EXIT(&->a_rwlock); 6287 } 6288 } 6289 if (svd->vp != NULL && szc != 0) { 6290 struct vattr va; 6291 u_offset_t eoffpage = svd->offset; 6292 va.va_mask = AT_SIZE; 6293 eoffpage += seg->s_size; 6294 eoffpage = btopr(eoffpage); 6295 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) { 6296 segvn_setpgsz_getattr_err++; 6297 return (EINVAL); 6298 } 6299 if (btopr(va.va_size) < eoffpage) { 6300 segvn_setpgsz_eof_err++; 6301 return (EINVAL); 6302 } 6303 if (amp != NULL) { 6304 /* 6305 * anon_fill_cow_holes() may call VOP_GETPAGE(). 6306 * don't take anon map lock here to avoid holding it 6307 * across VOP_GETPAGE() calls that may call back into 6308 * segvn for klsutering checks. We don't really need 6309 * anon map lock here since it's a private segment and 6310 * we hold as level lock as writers. 6311 */ 6312 if ((err = anon_fill_cow_holes(seg, seg->s_base, 6313 amp->ahp, svd->anon_index, svd->vp, svd->offset, 6314 seg->s_size, szc, svd->prot, svd->vpage, 6315 svd->cred)) != 0) { 6316 return (EINVAL); 6317 } 6318 } 6319 segvn_setvnode_mpss(svd->vp); 6320 } 6321 6322 if (amp != NULL) { 6323 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6324 if (svd->type == MAP_PRIVATE) { 6325 amp->a_szc = szc; 6326 } else if (szc > amp->a_szc) { 6327 amp->a_szc = szc; 6328 } 6329 ANON_LOCK_EXIT(&->a_rwlock); 6330 } 6331 6332 seg->s_szc = szc; 6333 6334 return (0); 6335 } 6336 6337 static int 6338 segvn_clrszc(struct seg *seg) 6339 { 6340 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6341 struct anon_map *amp = svd->amp; 6342 size_t pgsz; 6343 pgcnt_t pages; 6344 int err = 0; 6345 caddr_t a = seg->s_base; 6346 caddr_t ea = a + seg->s_size; 6347 ulong_t an_idx = svd->anon_index; 6348 vnode_t *vp = svd->vp; 6349 struct vpage *vpage = svd->vpage; 6350 page_t *anon_pl[1 + 1], *pp; 6351 struct anon *ap, *oldap; 6352 uint_t prot = svd->prot, vpprot; 6353 int pageflag = 0; 6354 6355 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6356 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 6357 ASSERT(svd->softlockcnt == 0); 6358 6359 if (vp == NULL && amp == NULL) { 6360 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6361 seg->s_szc = 0; 6362 return (0); 6363 } 6364 6365 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 6366 ASSERT(svd->amp == NULL); 6367 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6368 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 6369 HAT_REGION_TEXT); 6370 svd->rcookie = HAT_INVALID_REGION_COOKIE; 6371 } else if (svd->tr_state == SEGVN_TR_ON) { 6372 ASSERT(svd->amp != NULL); 6373 segvn_textunrepl(seg, 1); 6374 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF); 6375 amp = NULL; 6376 } else { 6377 if (svd->tr_state != SEGVN_TR_OFF) { 6378 ASSERT(svd->tr_state == SEGVN_TR_INIT); 6379 svd->tr_state = SEGVN_TR_OFF; 6380 } 6381 6382 /* 6383 * do HAT_UNLOAD_UNMAP since we are changing the pagesize. 6384 * unload argument is 0 when we are freeing the segment 6385 * and unload was already done. 6386 */ 6387 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size, 6388 HAT_UNLOAD_UNMAP); 6389 } 6390 6391 if (amp == NULL || svd->type == MAP_SHARED) { 6392 seg->s_szc = 0; 6393 return (0); 6394 } 6395 6396 pgsz = page_get_pagesize(seg->s_szc); 6397 pages = btop(pgsz); 6398 6399 /* 6400 * XXX anon rwlock is not really needed because this is a 6401 * private segment and we are writers. 6402 */ 6403 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER); 6404 6405 for (; a < ea; a += pgsz, an_idx += pages) { 6406 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) { 6407 ASSERT(vpage != NULL || svd->pageprot == 0); 6408 if (vpage != NULL) { 6409 ASSERT(sameprot(seg, a, pgsz)); 6410 prot = VPP_PROT(vpage); 6411 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0; 6412 } 6413 if (seg->s_szc != 0) { 6414 ASSERT(vp == NULL || anon_pages(amp->ahp, 6415 an_idx, pages) == pages); 6416 if ((err = anon_map_demotepages(amp, an_idx, 6417 seg, a, prot, vpage, svd->cred)) != 0) { 6418 goto out; 6419 } 6420 } else { 6421 if (oldap->an_refcnt == 1) { 6422 continue; 6423 } 6424 if ((err = anon_getpage(&oldap, &vpprot, 6425 anon_pl, PAGESIZE, seg, a, S_READ, 6426 svd->cred))) { 6427 goto out; 6428 } 6429 if ((pp = anon_private(&ap, seg, a, prot, 6430 anon_pl[0], pageflag, svd->cred)) == NULL) { 6431 err = ENOMEM; 6432 goto out; 6433 } 6434 anon_decref(oldap); 6435 (void) anon_set_ptr(amp->ahp, an_idx, ap, 6436 ANON_SLEEP); 6437 page_unlock(pp); 6438 } 6439 } 6440 vpage = (vpage == NULL) ? NULL : vpage + pages; 6441 } 6442 6443 amp->a_szc = 0; 6444 seg->s_szc = 0; 6445 out: 6446 ANON_LOCK_EXIT(&->a_rwlock); 6447 return (err); 6448 } 6449 6450 static int 6451 segvn_claim_pages( 6452 struct seg *seg, 6453 struct vpage *svp, 6454 u_offset_t off, 6455 ulong_t anon_idx, 6456 uint_t prot) 6457 { 6458 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6459 size_t ppasize = (pgcnt + 1) * sizeof (page_t *); 6460 page_t **ppa; 6461 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6462 struct anon_map *amp = svd->amp; 6463 struct vpage *evp = svp + pgcnt; 6464 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT) 6465 + seg->s_base; 6466 struct anon *ap; 6467 struct vnode *vp = svd->vp; 6468 page_t *pp; 6469 pgcnt_t pg_idx, i; 6470 int err = 0; 6471 anoff_t aoff; 6472 int anon = (amp != NULL) ? 1 : 0; 6473 6474 ASSERT(svd->type == MAP_PRIVATE); 6475 ASSERT(svd->vpage != NULL); 6476 ASSERT(seg->s_szc != 0); 6477 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt)); 6478 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt)); 6479 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT)); 6480 6481 if (VPP_PROT(svp) == prot) 6482 return (1); 6483 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE)) 6484 return (1); 6485 6486 ppa = kmem_alloc(ppasize, KM_SLEEP); 6487 if (anon && vp != NULL) { 6488 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) { 6489 anon = 0; 6490 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt)); 6491 } 6492 ASSERT(!anon || 6493 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt); 6494 } 6495 6496 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) { 6497 if (!VPP_ISPPLOCK(svp)) 6498 continue; 6499 if (anon) { 6500 ap = anon_get_ptr(amp->ahp, anon_idx); 6501 if (ap == NULL) { 6502 panic("segvn_claim_pages: no anon slot"); 6503 } 6504 swap_xlate(ap, &vp, &aoff); 6505 off = (u_offset_t)aoff; 6506 } 6507 ASSERT(vp != NULL); 6508 if ((pp = page_lookup(vp, 6509 (u_offset_t)off, SE_SHARED)) == NULL) { 6510 panic("segvn_claim_pages: no page"); 6511 } 6512 ppa[pg_idx++] = pp; 6513 off += PAGESIZE; 6514 } 6515 6516 if (ppa[0] == NULL) { 6517 kmem_free(ppa, ppasize); 6518 return (1); 6519 } 6520 6521 ASSERT(pg_idx <= pgcnt); 6522 ppa[pg_idx] = NULL; 6523 6524 6525 /* Find each large page within ppa, and adjust its claim */ 6526 6527 /* Does ppa cover a single large page? */ 6528 if (ppa[0]->p_szc == seg->s_szc) { 6529 if (prot & PROT_WRITE) 6530 err = page_addclaim_pages(ppa); 6531 else 6532 err = page_subclaim_pages(ppa); 6533 } else { 6534 for (i = 0; ppa[i]; i += pgcnt) { 6535 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt)); 6536 if (prot & PROT_WRITE) 6537 err = page_addclaim_pages(&ppa[i]); 6538 else 6539 err = page_subclaim_pages(&ppa[i]); 6540 if (err == 0) 6541 break; 6542 } 6543 } 6544 6545 for (i = 0; i < pg_idx; i++) { 6546 ASSERT(ppa[i] != NULL); 6547 page_unlock(ppa[i]); 6548 } 6549 6550 kmem_free(ppa, ppasize); 6551 return (err); 6552 } 6553 6554 /* 6555 * Returns right (upper address) segment if split occurred. 6556 * If the address is equal to the beginning or end of its segment it returns 6557 * the current segment. 6558 */ 6559 static struct seg * 6560 segvn_split_seg(struct seg *seg, caddr_t addr) 6561 { 6562 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6563 struct seg *nseg; 6564 size_t nsize; 6565 struct segvn_data *nsvd; 6566 6567 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6568 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6569 6570 ASSERT(addr >= seg->s_base); 6571 ASSERT(addr <= seg->s_base + seg->s_size); 6572 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6573 6574 if (addr == seg->s_base || addr == seg->s_base + seg->s_size) 6575 return (seg); 6576 6577 nsize = seg->s_base + seg->s_size - addr; 6578 seg->s_size = addr - seg->s_base; 6579 nseg = seg_alloc(seg->s_as, addr, nsize); 6580 ASSERT(nseg != NULL); 6581 nseg->s_ops = seg->s_ops; 6582 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP); 6583 nseg->s_data = (void *)nsvd; 6584 nseg->s_szc = seg->s_szc; 6585 *nsvd = *svd; 6586 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE); 6587 nsvd->seg = nseg; 6588 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL); 6589 6590 if (nsvd->vp != NULL) { 6591 VN_HOLD(nsvd->vp); 6592 nsvd->offset = svd->offset + 6593 (uintptr_t)(nseg->s_base - seg->s_base); 6594 if (nsvd->type == MAP_SHARED) 6595 lgrp_shm_policy_init(NULL, nsvd->vp); 6596 } else { 6597 /* 6598 * The offset for an anonymous segment has no signifigance in 6599 * terms of an offset into a file. If we were to use the above 6600 * calculation instead, the structures read out of 6601 * /proc/<pid>/xmap would be more difficult to decipher since 6602 * it would be unclear whether two seemingly contiguous 6603 * prxmap_t structures represented different segments or a 6604 * single segment that had been split up into multiple prxmap_t 6605 * structures (e.g. if some part of the segment had not yet 6606 * been faulted in). 6607 */ 6608 nsvd->offset = 0; 6609 } 6610 6611 ASSERT(svd->softlockcnt == 0); 6612 ASSERT(svd->softlockcnt_sbase == 0); 6613 ASSERT(svd->softlockcnt_send == 0); 6614 crhold(svd->cred); 6615 6616 if (svd->vpage != NULL) { 6617 size_t bytes = vpgtob(seg_pages(seg)); 6618 size_t nbytes = vpgtob(seg_pages(nseg)); 6619 struct vpage *ovpage = svd->vpage; 6620 6621 svd->vpage = kmem_alloc(bytes, KM_SLEEP); 6622 bcopy(ovpage, svd->vpage, bytes); 6623 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP); 6624 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes); 6625 kmem_free(ovpage, bytes + nbytes); 6626 } 6627 if (svd->amp != NULL && svd->type == MAP_PRIVATE) { 6628 struct anon_map *oamp = svd->amp, *namp; 6629 struct anon_hdr *nahp; 6630 6631 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER); 6632 ASSERT(oamp->refcnt == 1); 6633 nahp = anon_create(btop(seg->s_size), ANON_SLEEP); 6634 (void) anon_copy_ptr(oamp->ahp, svd->anon_index, 6635 nahp, 0, btop(seg->s_size), ANON_SLEEP); 6636 6637 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP); 6638 namp->a_szc = nseg->s_szc; 6639 (void) anon_copy_ptr(oamp->ahp, 6640 svd->anon_index + btop(seg->s_size), 6641 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP); 6642 anon_release(oamp->ahp, btop(oamp->size)); 6643 oamp->ahp = nahp; 6644 oamp->size = seg->s_size; 6645 svd->anon_index = 0; 6646 nsvd->amp = namp; 6647 nsvd->anon_index = 0; 6648 ANON_LOCK_EXIT(&oamp->a_rwlock); 6649 } else if (svd->amp != NULL) { 6650 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc); 6651 ASSERT(svd->amp == nsvd->amp); 6652 ASSERT(seg->s_szc <= svd->amp->a_szc); 6653 nsvd->anon_index = svd->anon_index + seg_pages(seg); 6654 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt)); 6655 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER); 6656 svd->amp->refcnt++; 6657 ANON_LOCK_EXIT(&svd->amp->a_rwlock); 6658 } 6659 6660 /* 6661 * Split the amount of swap reserved. 6662 */ 6663 if (svd->swresv) { 6664 /* 6665 * For MAP_NORESERVE, only allocate swap reserve for pages 6666 * being used. Other segments get enough to cover whole 6667 * segment. 6668 */ 6669 if (svd->flags & MAP_NORESERVE) { 6670 size_t oswresv; 6671 6672 ASSERT(svd->amp); 6673 oswresv = svd->swresv; 6674 svd->swresv = ptob(anon_pages(svd->amp->ahp, 6675 svd->anon_index, btop(seg->s_size))); 6676 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp, 6677 nsvd->anon_index, btop(nseg->s_size))); 6678 ASSERT(oswresv >= (svd->swresv + nsvd->swresv)); 6679 } else { 6680 if (svd->pageswap) { 6681 svd->swresv = segvn_count_swap_by_vpages(seg); 6682 ASSERT(nsvd->swresv >= svd->swresv); 6683 nsvd->swresv -= svd->swresv; 6684 } else { 6685 ASSERT(svd->swresv == seg->s_size + 6686 nseg->s_size); 6687 svd->swresv = seg->s_size; 6688 nsvd->swresv = nseg->s_size; 6689 } 6690 } 6691 } 6692 6693 return (nseg); 6694 } 6695 6696 /* 6697 * called on memory operations (unmap, setprot, setpagesize) for a subset 6698 * of a large page segment to either demote the memory range (SDR_RANGE) 6699 * or the ends (SDR_END) by addr/len. 6700 * 6701 * returns 0 on success. returns errno, including ENOMEM, on failure. 6702 */ 6703 static int 6704 segvn_demote_range( 6705 struct seg *seg, 6706 caddr_t addr, 6707 size_t len, 6708 int flag, 6709 uint_t szcvec) 6710 { 6711 caddr_t eaddr = addr + len; 6712 caddr_t lpgaddr, lpgeaddr; 6713 struct seg *nseg; 6714 struct seg *badseg1 = NULL; 6715 struct seg *badseg2 = NULL; 6716 size_t pgsz; 6717 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6718 int err; 6719 uint_t szc = seg->s_szc; 6720 uint_t tszcvec; 6721 6722 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock)); 6723 ASSERT(svd->tr_state == SEGVN_TR_OFF); 6724 ASSERT(szc != 0); 6725 pgsz = page_get_pagesize(szc); 6726 ASSERT(seg->s_base != addr || seg->s_size != len); 6727 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size); 6728 ASSERT(svd->softlockcnt == 0); 6729 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 6730 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED)); 6731 6732 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 6733 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr); 6734 if (flag == SDR_RANGE) { 6735 /* demote entire range */ 6736 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6737 (void) segvn_split_seg(nseg, lpgeaddr); 6738 ASSERT(badseg1->s_base == lpgaddr); 6739 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr); 6740 } else if (addr != lpgaddr) { 6741 ASSERT(flag == SDR_END); 6742 badseg1 = nseg = segvn_split_seg(seg, lpgaddr); 6743 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz && 6744 eaddr < lpgaddr + 2 * pgsz) { 6745 (void) segvn_split_seg(nseg, lpgeaddr); 6746 ASSERT(badseg1->s_base == lpgaddr); 6747 ASSERT(badseg1->s_size == 2 * pgsz); 6748 } else { 6749 nseg = segvn_split_seg(nseg, lpgaddr + pgsz); 6750 ASSERT(badseg1->s_base == lpgaddr); 6751 ASSERT(badseg1->s_size == pgsz); 6752 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) { 6753 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz); 6754 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz); 6755 badseg2 = nseg; 6756 (void) segvn_split_seg(nseg, lpgeaddr); 6757 ASSERT(badseg2->s_base == lpgeaddr - pgsz); 6758 ASSERT(badseg2->s_size == pgsz); 6759 } 6760 } 6761 } else { 6762 ASSERT(flag == SDR_END); 6763 ASSERT(eaddr < lpgeaddr); 6764 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz); 6765 (void) segvn_split_seg(nseg, lpgeaddr); 6766 ASSERT(badseg1->s_base == lpgeaddr - pgsz); 6767 ASSERT(badseg1->s_size == pgsz); 6768 } 6769 6770 ASSERT(badseg1 != NULL); 6771 ASSERT(badseg1->s_szc == szc); 6772 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz || 6773 badseg1->s_size == 2 * pgsz); 6774 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz)); 6775 ASSERT(badseg1->s_size == pgsz || 6776 sameprot(badseg1, badseg1->s_base + pgsz, pgsz)); 6777 if (err = segvn_clrszc(badseg1)) { 6778 return (err); 6779 } 6780 ASSERT(badseg1->s_szc == 0); 6781 6782 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6783 uint_t tszc = highbit(tszcvec) - 1; 6784 caddr_t ta = MAX(addr, badseg1->s_base); 6785 caddr_t te; 6786 size_t tpgsz = page_get_pagesize(tszc); 6787 6788 ASSERT(svd->type == MAP_SHARED); 6789 ASSERT(flag == SDR_END); 6790 ASSERT(tszc < szc && tszc > 0); 6791 6792 if (eaddr > badseg1->s_base + badseg1->s_size) { 6793 te = badseg1->s_base + badseg1->s_size; 6794 } else { 6795 te = eaddr; 6796 } 6797 6798 ASSERT(ta <= te); 6799 badseg1->s_szc = tszc; 6800 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) { 6801 if (badseg2 != NULL) { 6802 err = segvn_demote_range(badseg1, ta, te - ta, 6803 SDR_END, tszcvec); 6804 if (err != 0) { 6805 return (err); 6806 } 6807 } else { 6808 return (segvn_demote_range(badseg1, ta, 6809 te - ta, SDR_END, tszcvec)); 6810 } 6811 } 6812 } 6813 6814 if (badseg2 == NULL) 6815 return (0); 6816 ASSERT(badseg2->s_szc == szc); 6817 ASSERT(badseg2->s_size == pgsz); 6818 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size)); 6819 if (err = segvn_clrszc(badseg2)) { 6820 return (err); 6821 } 6822 ASSERT(badseg2->s_szc == 0); 6823 6824 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) { 6825 uint_t tszc = highbit(tszcvec) - 1; 6826 size_t tpgsz = page_get_pagesize(tszc); 6827 6828 ASSERT(svd->type == MAP_SHARED); 6829 ASSERT(flag == SDR_END); 6830 ASSERT(tszc < szc && tszc > 0); 6831 ASSERT(badseg2->s_base > addr); 6832 ASSERT(eaddr > badseg2->s_base); 6833 ASSERT(eaddr < badseg2->s_base + badseg2->s_size); 6834 6835 badseg2->s_szc = tszc; 6836 if (!IS_P2ALIGNED(eaddr, tpgsz)) { 6837 return (segvn_demote_range(badseg2, badseg2->s_base, 6838 eaddr - badseg2->s_base, SDR_END, tszcvec)); 6839 } 6840 } 6841 6842 return (0); 6843 } 6844 6845 static int 6846 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot) 6847 { 6848 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6849 struct vpage *vp, *evp; 6850 6851 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6852 6853 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6854 /* 6855 * If segment protection can be used, simply check against them. 6856 */ 6857 if (svd->pageprot == 0) { 6858 int err; 6859 6860 err = ((svd->prot & prot) != prot) ? EACCES : 0; 6861 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6862 return (err); 6863 } 6864 6865 /* 6866 * Have to check down to the vpage level. 6867 */ 6868 evp = &svd->vpage[seg_page(seg, addr + len)]; 6869 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) { 6870 if ((VPP_PROT(vp) & prot) != prot) { 6871 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6872 return (EACCES); 6873 } 6874 } 6875 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6876 return (0); 6877 } 6878 6879 static int 6880 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv) 6881 { 6882 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6883 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1; 6884 6885 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6886 6887 if (pgno != 0) { 6888 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 6889 if (svd->pageprot == 0) { 6890 do { 6891 protv[--pgno] = svd->prot; 6892 } while (pgno != 0); 6893 } else { 6894 size_t pgoff = seg_page(seg, addr); 6895 6896 do { 6897 pgno--; 6898 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]); 6899 } while (pgno != 0); 6900 } 6901 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 6902 } 6903 return (0); 6904 } 6905 6906 static u_offset_t 6907 segvn_getoffset(struct seg *seg, caddr_t addr) 6908 { 6909 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6910 6911 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6912 6913 return (svd->offset + (uintptr_t)(addr - seg->s_base)); 6914 } 6915 6916 /*ARGSUSED*/ 6917 static int 6918 segvn_gettype(struct seg *seg, caddr_t addr) 6919 { 6920 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6921 6922 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6923 6924 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT | 6925 MAP_INITDATA))); 6926 } 6927 6928 /*ARGSUSED*/ 6929 static int 6930 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp) 6931 { 6932 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6933 6934 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6935 6936 *vpp = svd->vp; 6937 return (0); 6938 } 6939 6940 /* 6941 * Check to see if it makes sense to do kluster/read ahead to 6942 * addr + delta relative to the mapping at addr. We assume here 6943 * that delta is a signed PAGESIZE'd multiple (which can be negative). 6944 * 6945 * For segvn, we currently "approve" of the action if we are 6946 * still in the segment and it maps from the same vp/off, 6947 * or if the advice stored in segvn_data or vpages allows it. 6948 * Currently, klustering is not allowed only if MADV_RANDOM is set. 6949 */ 6950 static int 6951 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta) 6952 { 6953 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 6954 struct anon *oap, *ap; 6955 ssize_t pd; 6956 size_t page; 6957 struct vnode *vp1, *vp2; 6958 u_offset_t off1, off2; 6959 struct anon_map *amp; 6960 6961 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 6962 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 6963 SEGVN_LOCK_HELD(seg->s_as, &svd->lock)); 6964 6965 if (addr + delta < seg->s_base || 6966 addr + delta >= (seg->s_base + seg->s_size)) 6967 return (-1); /* exceeded segment bounds */ 6968 6969 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */ 6970 page = seg_page(seg, addr); 6971 6972 /* 6973 * Check to see if either of the pages addr or addr + delta 6974 * have advice set that prevents klustering (if MADV_RANDOM advice 6975 * is set for entire segment, or MADV_SEQUENTIAL is set and delta 6976 * is negative). 6977 */ 6978 if (svd->advice == MADV_RANDOM || 6979 svd->advice == MADV_SEQUENTIAL && delta < 0) 6980 return (-1); 6981 else if (svd->pageadvice && svd->vpage) { 6982 struct vpage *bvpp, *evpp; 6983 6984 bvpp = &svd->vpage[page]; 6985 evpp = &svd->vpage[page + pd]; 6986 if (VPP_ADVICE(bvpp) == MADV_RANDOM || 6987 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0) 6988 return (-1); 6989 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) && 6990 VPP_ADVICE(evpp) == MADV_RANDOM) 6991 return (-1); 6992 } 6993 6994 if (svd->type == MAP_SHARED) 6995 return (0); /* shared mapping - all ok */ 6996 6997 if ((amp = svd->amp) == NULL) 6998 return (0); /* off original vnode */ 6999 7000 page += svd->anon_index; 7001 7002 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7003 7004 oap = anon_get_ptr(amp->ahp, page); 7005 ap = anon_get_ptr(amp->ahp, page + pd); 7006 7007 ANON_LOCK_EXIT(&->a_rwlock); 7008 7009 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) { 7010 return (-1); /* one with and one without an anon */ 7011 } 7012 7013 if (oap == NULL) { /* implies that ap == NULL */ 7014 return (0); /* off original vnode */ 7015 } 7016 7017 /* 7018 * Now we know we have two anon pointers - check to 7019 * see if they happen to be properly allocated. 7020 */ 7021 7022 /* 7023 * XXX We cheat here and don't lock the anon slots. We can't because 7024 * we may have been called from the anon layer which might already 7025 * have locked them. We are holding a refcnt on the slots so they 7026 * can't disappear. The worst that will happen is we'll get the wrong 7027 * names (vp, off) for the slots and make a poor klustering decision. 7028 */ 7029 swap_xlate(ap, &vp1, &off1); 7030 swap_xlate(oap, &vp2, &off2); 7031 7032 7033 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta) 7034 return (-1); 7035 return (0); 7036 } 7037 7038 /* 7039 * Synchronize primary storage cache with real object in virtual memory. 7040 * 7041 * XXX - Anonymous pages should not be sync'ed out at all. 7042 */ 7043 static int 7044 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags) 7045 { 7046 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7047 struct vpage *vpp; 7048 page_t *pp; 7049 u_offset_t offset; 7050 struct vnode *vp; 7051 u_offset_t off; 7052 caddr_t eaddr; 7053 int bflags; 7054 int err = 0; 7055 int segtype; 7056 int pageprot; 7057 int prot; 7058 ulong_t anon_index; 7059 struct anon_map *amp; 7060 struct anon *ap; 7061 anon_sync_obj_t cookie; 7062 7063 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7064 7065 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7066 7067 if (svd->softlockcnt > 0) { 7068 /* 7069 * If this is shared segment non 0 softlockcnt 7070 * means locked pages are still in use. 7071 */ 7072 if (svd->type == MAP_SHARED) { 7073 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7074 return (EAGAIN); 7075 } 7076 7077 /* 7078 * flush all pages from seg cache 7079 * otherwise we may deadlock in swap_putpage 7080 * for B_INVAL page (4175402). 7081 * 7082 * Even if we grab segvn WRITER's lock 7083 * here, there might be another thread which could've 7084 * successfully performed lookup/insert just before 7085 * we acquired the lock here. So, grabbing either 7086 * lock here is of not much use. Until we devise 7087 * a strategy at upper layers to solve the 7088 * synchronization issues completely, we expect 7089 * applications to handle this appropriately. 7090 */ 7091 segvn_purge(seg); 7092 if (svd->softlockcnt > 0) { 7093 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7094 return (EAGAIN); 7095 } 7096 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 7097 svd->amp->a_softlockcnt > 0) { 7098 /* 7099 * Try to purge this amp's entries from pcache. It will 7100 * succeed only if other segments that share the amp have no 7101 * outstanding softlock's. 7102 */ 7103 segvn_purge(seg); 7104 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) { 7105 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7106 return (EAGAIN); 7107 } 7108 } 7109 7110 vpp = svd->vpage; 7111 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7112 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) | 7113 ((flags & MS_INVALIDATE) ? B_INVAL : 0); 7114 7115 if (attr) { 7116 pageprot = attr & ~(SHARED|PRIVATE); 7117 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE; 7118 7119 /* 7120 * We are done if the segment types don't match 7121 * or if we have segment level protections and 7122 * they don't match. 7123 */ 7124 if (svd->type != segtype) { 7125 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7126 return (0); 7127 } 7128 if (vpp == NULL) { 7129 if (svd->prot != pageprot) { 7130 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7131 return (0); 7132 } 7133 prot = svd->prot; 7134 } else 7135 vpp = &svd->vpage[seg_page(seg, addr)]; 7136 7137 } else if (svd->vp && svd->amp == NULL && 7138 (flags & MS_INVALIDATE) == 0) { 7139 7140 /* 7141 * No attributes, no anonymous pages and MS_INVALIDATE flag 7142 * is not on, just use one big request. 7143 */ 7144 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len, 7145 bflags, svd->cred, NULL); 7146 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7147 return (err); 7148 } 7149 7150 if ((amp = svd->amp) != NULL) 7151 anon_index = svd->anon_index + seg_page(seg, addr); 7152 7153 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) { 7154 ap = NULL; 7155 if (amp != NULL) { 7156 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7157 anon_array_enter(amp, anon_index, &cookie); 7158 ap = anon_get_ptr(amp->ahp, anon_index++); 7159 if (ap != NULL) { 7160 swap_xlate(ap, &vp, &off); 7161 } else { 7162 vp = svd->vp; 7163 off = offset; 7164 } 7165 anon_array_exit(&cookie); 7166 ANON_LOCK_EXIT(&->a_rwlock); 7167 } else { 7168 vp = svd->vp; 7169 off = offset; 7170 } 7171 offset += PAGESIZE; 7172 7173 if (vp == NULL) /* untouched zfod page */ 7174 continue; 7175 7176 if (attr) { 7177 if (vpp) { 7178 prot = VPP_PROT(vpp); 7179 vpp++; 7180 } 7181 if (prot != pageprot) { 7182 continue; 7183 } 7184 } 7185 7186 /* 7187 * See if any of these pages are locked -- if so, then we 7188 * will have to truncate an invalidate request at the first 7189 * locked one. We don't need the page_struct_lock to test 7190 * as this is only advisory; even if we acquire it someone 7191 * might race in and lock the page after we unlock and before 7192 * we do the PUTPAGE, then PUTPAGE simply does nothing. 7193 */ 7194 if (flags & MS_INVALIDATE) { 7195 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) { 7196 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) { 7197 page_unlock(pp); 7198 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7199 return (EBUSY); 7200 } 7201 if (ap != NULL && pp->p_szc != 0 && 7202 page_tryupgrade(pp)) { 7203 if (pp->p_lckcnt == 0 && 7204 pp->p_cowcnt == 0) { 7205 /* 7206 * swapfs VN_DISPOSE() won't 7207 * invalidate large pages. 7208 * Attempt to demote. 7209 * XXX can't help it if it 7210 * fails. But for swapfs 7211 * pages it is no big deal. 7212 */ 7213 (void) page_try_demote_pages( 7214 pp); 7215 } 7216 } 7217 page_unlock(pp); 7218 } 7219 } else if (svd->type == MAP_SHARED && amp != NULL) { 7220 /* 7221 * Avoid writing out to disk ISM's large pages 7222 * because segspt_free_pages() relies on NULL an_pvp 7223 * of anon slots of such pages. 7224 */ 7225 7226 ASSERT(svd->vp == NULL); 7227 /* 7228 * swapfs uses page_lookup_nowait if not freeing or 7229 * invalidating and skips a page if 7230 * page_lookup_nowait returns NULL. 7231 */ 7232 pp = page_lookup_nowait(vp, off, SE_SHARED); 7233 if (pp == NULL) { 7234 continue; 7235 } 7236 if (pp->p_szc != 0) { 7237 page_unlock(pp); 7238 continue; 7239 } 7240 7241 /* 7242 * Note ISM pages are created large so (vp, off)'s 7243 * page cannot suddenly become large after we unlock 7244 * pp. 7245 */ 7246 page_unlock(pp); 7247 } 7248 /* 7249 * XXX - Should ultimately try to kluster 7250 * calls to VOP_PUTPAGE() for performance. 7251 */ 7252 VN_HOLD(vp); 7253 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE, 7254 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)), 7255 svd->cred, NULL); 7256 7257 VN_RELE(vp); 7258 if (err) 7259 break; 7260 } 7261 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7262 return (err); 7263 } 7264 7265 /* 7266 * Determine if we have data corresponding to pages in the 7267 * primary storage virtual memory cache (i.e., "in core"). 7268 */ 7269 static size_t 7270 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec) 7271 { 7272 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7273 struct vnode *vp, *avp; 7274 u_offset_t offset, aoffset; 7275 size_t p, ep; 7276 int ret; 7277 struct vpage *vpp; 7278 page_t *pp; 7279 uint_t start; 7280 struct anon_map *amp; /* XXX - for locknest */ 7281 struct anon *ap; 7282 uint_t attr; 7283 anon_sync_obj_t cookie; 7284 7285 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7286 7287 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7288 if (svd->amp == NULL && svd->vp == NULL) { 7289 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7290 bzero(vec, btopr(len)); 7291 return (len); /* no anonymous pages created yet */ 7292 } 7293 7294 p = seg_page(seg, addr); 7295 ep = seg_page(seg, addr + len); 7296 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0; 7297 7298 amp = svd->amp; 7299 for (; p < ep; p++, addr += PAGESIZE) { 7300 vpp = (svd->vpage) ? &svd->vpage[p]: NULL; 7301 ret = start; 7302 ap = NULL; 7303 avp = NULL; 7304 /* Grab the vnode/offset for the anon slot */ 7305 if (amp != NULL) { 7306 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7307 anon_array_enter(amp, svd->anon_index + p, &cookie); 7308 ap = anon_get_ptr(amp->ahp, svd->anon_index + p); 7309 if (ap != NULL) { 7310 swap_xlate(ap, &avp, &aoffset); 7311 } 7312 anon_array_exit(&cookie); 7313 ANON_LOCK_EXIT(&->a_rwlock); 7314 } 7315 if ((avp != NULL) && page_exists(avp, aoffset)) { 7316 /* A page exists for the anon slot */ 7317 ret |= SEG_PAGE_INCORE; 7318 7319 /* 7320 * If page is mapped and writable 7321 */ 7322 attr = (uint_t)0; 7323 if ((hat_getattr(seg->s_as->a_hat, addr, 7324 &attr) != -1) && (attr & PROT_WRITE)) { 7325 ret |= SEG_PAGE_ANON; 7326 } 7327 /* 7328 * Don't get page_struct lock for lckcnt and cowcnt, 7329 * since this is purely advisory. 7330 */ 7331 if ((pp = page_lookup_nowait(avp, aoffset, 7332 SE_SHARED)) != NULL) { 7333 if (pp->p_lckcnt) 7334 ret |= SEG_PAGE_SOFTLOCK; 7335 if (pp->p_cowcnt) 7336 ret |= SEG_PAGE_HASCOW; 7337 page_unlock(pp); 7338 } 7339 } 7340 7341 /* Gather vnode statistics */ 7342 vp = svd->vp; 7343 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7344 7345 if (vp != NULL) { 7346 /* 7347 * Try to obtain a "shared" lock on the page 7348 * without blocking. If this fails, determine 7349 * if the page is in memory. 7350 */ 7351 pp = page_lookup_nowait(vp, offset, SE_SHARED); 7352 if ((pp == NULL) && (page_exists(vp, offset))) { 7353 /* Page is incore, and is named */ 7354 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7355 } 7356 /* 7357 * Don't get page_struct lock for lckcnt and cowcnt, 7358 * since this is purely advisory. 7359 */ 7360 if (pp != NULL) { 7361 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE); 7362 if (pp->p_lckcnt) 7363 ret |= SEG_PAGE_SOFTLOCK; 7364 if (pp->p_cowcnt) 7365 ret |= SEG_PAGE_HASCOW; 7366 page_unlock(pp); 7367 } 7368 } 7369 7370 /* Gather virtual page information */ 7371 if (vpp) { 7372 if (VPP_ISPPLOCK(vpp)) 7373 ret |= SEG_PAGE_LOCKED; 7374 vpp++; 7375 } 7376 7377 *vec++ = (char)ret; 7378 } 7379 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7380 return (len); 7381 } 7382 7383 /* 7384 * Statement for p_cowcnts/p_lckcnts. 7385 * 7386 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region 7387 * irrespective of the following factors or anything else: 7388 * 7389 * (1) anon slots are populated or not 7390 * (2) cow is broken or not 7391 * (3) refcnt on ap is 1 or greater than 1 7392 * 7393 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock 7394 * and munlock. 7395 * 7396 * 7397 * Handling p_cowcnts/p_lckcnts during copy-on-write fault: 7398 * 7399 * if vpage has PROT_WRITE 7400 * transfer cowcnt on the oldpage -> cowcnt on the newpage 7401 * else 7402 * transfer lckcnt on the oldpage -> lckcnt on the newpage 7403 * 7404 * During copy-on-write, decrement p_cowcnt on the oldpage and increment 7405 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE. 7406 * 7407 * We may also break COW if softlocking on read access in the physio case. 7408 * In this case, vpage may not have PROT_WRITE. So, we need to decrement 7409 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the 7410 * vpage doesn't have PROT_WRITE. 7411 * 7412 * 7413 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region: 7414 * 7415 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and 7416 * increment p_lckcnt by calling page_subclaim() which takes care of 7417 * availrmem accounting and p_lckcnt overflow. 7418 * 7419 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and 7420 * increment p_cowcnt by calling page_addclaim() which takes care of 7421 * availrmem availability and p_cowcnt overflow. 7422 */ 7423 7424 /* 7425 * Lock down (or unlock) pages mapped by this segment. 7426 * 7427 * XXX only creates PAGESIZE pages if anon slots are not initialized. 7428 * At fault time they will be relocated into larger pages. 7429 */ 7430 static int 7431 segvn_lockop(struct seg *seg, caddr_t addr, size_t len, 7432 int attr, int op, ulong_t *lockmap, size_t pos) 7433 { 7434 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7435 struct vpage *vpp; 7436 struct vpage *evp; 7437 page_t *pp; 7438 u_offset_t offset; 7439 u_offset_t off; 7440 int segtype; 7441 int pageprot; 7442 int claim; 7443 struct vnode *vp; 7444 ulong_t anon_index; 7445 struct anon_map *amp; 7446 struct anon *ap; 7447 struct vattr va; 7448 anon_sync_obj_t cookie; 7449 struct kshmid *sp = NULL; 7450 struct proc *p = curproc; 7451 kproject_t *proj = NULL; 7452 int chargeproc = 1; 7453 size_t locked_bytes = 0; 7454 size_t unlocked_bytes = 0; 7455 int err = 0; 7456 7457 /* 7458 * Hold write lock on address space because may split or concatenate 7459 * segments 7460 */ 7461 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7462 7463 /* 7464 * If this is a shm, use shm's project and zone, else use 7465 * project and zone of calling process 7466 */ 7467 7468 /* Determine if this segment backs a sysV shm */ 7469 if (svd->amp != NULL && svd->amp->a_sp != NULL) { 7470 ASSERT(svd->type == MAP_SHARED); 7471 ASSERT(svd->tr_state == SEGVN_TR_OFF); 7472 sp = svd->amp->a_sp; 7473 proj = sp->shm_perm.ipc_proj; 7474 chargeproc = 0; 7475 } 7476 7477 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7478 if (attr) { 7479 pageprot = attr & ~(SHARED|PRIVATE); 7480 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE; 7481 7482 /* 7483 * We are done if the segment types don't match 7484 * or if we have segment level protections and 7485 * they don't match. 7486 */ 7487 if (svd->type != segtype) { 7488 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7489 return (0); 7490 } 7491 if (svd->pageprot == 0 && svd->prot != pageprot) { 7492 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7493 return (0); 7494 } 7495 } 7496 7497 if (op == MC_LOCK) { 7498 if (svd->tr_state == SEGVN_TR_INIT) { 7499 svd->tr_state = SEGVN_TR_OFF; 7500 } else if (svd->tr_state == SEGVN_TR_ON) { 7501 ASSERT(svd->amp != NULL); 7502 segvn_textunrepl(seg, 0); 7503 ASSERT(svd->amp == NULL && 7504 svd->tr_state == SEGVN_TR_OFF); 7505 } 7506 } 7507 7508 /* 7509 * If we're locking, then we must create a vpage structure if 7510 * none exists. If we're unlocking, then check to see if there 7511 * is a vpage -- if not, then we could not have locked anything. 7512 */ 7513 7514 if ((vpp = svd->vpage) == NULL) { 7515 if (op == MC_LOCK) { 7516 segvn_vpage(seg); 7517 if (svd->vpage == NULL) { 7518 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7519 return (ENOMEM); 7520 } 7521 } else { 7522 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7523 return (0); 7524 } 7525 } 7526 7527 /* 7528 * The anonymous data vector (i.e., previously 7529 * unreferenced mapping to swap space) can be allocated 7530 * by lazily testing for its existence. 7531 */ 7532 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) { 7533 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 7534 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP); 7535 svd->amp->a_szc = seg->s_szc; 7536 } 7537 7538 if ((amp = svd->amp) != NULL) { 7539 anon_index = svd->anon_index + seg_page(seg, addr); 7540 } 7541 7542 offset = svd->offset + (uintptr_t)(addr - seg->s_base); 7543 evp = &svd->vpage[seg_page(seg, addr + len)]; 7544 7545 if (sp != NULL) 7546 mutex_enter(&sp->shm_mlock); 7547 7548 /* determine number of unlocked bytes in range for lock operation */ 7549 if (op == MC_LOCK) { 7550 7551 if (sp == NULL) { 7552 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7553 vpp++) { 7554 if (!VPP_ISPPLOCK(vpp)) 7555 unlocked_bytes += PAGESIZE; 7556 } 7557 } else { 7558 ulong_t i_idx, i_edx; 7559 anon_sync_obj_t i_cookie; 7560 struct anon *i_ap; 7561 struct vnode *i_vp; 7562 u_offset_t i_off; 7563 7564 /* Only count sysV pages once for locked memory */ 7565 i_edx = svd->anon_index + seg_page(seg, addr + len); 7566 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7567 for (i_idx = anon_index; i_idx < i_edx; i_idx++) { 7568 anon_array_enter(amp, i_idx, &i_cookie); 7569 i_ap = anon_get_ptr(amp->ahp, i_idx); 7570 if (i_ap == NULL) { 7571 unlocked_bytes += PAGESIZE; 7572 anon_array_exit(&i_cookie); 7573 continue; 7574 } 7575 swap_xlate(i_ap, &i_vp, &i_off); 7576 anon_array_exit(&i_cookie); 7577 pp = page_lookup(i_vp, i_off, SE_SHARED); 7578 if (pp == NULL) { 7579 unlocked_bytes += PAGESIZE; 7580 continue; 7581 } else if (pp->p_lckcnt == 0) 7582 unlocked_bytes += PAGESIZE; 7583 page_unlock(pp); 7584 } 7585 ANON_LOCK_EXIT(&->a_rwlock); 7586 } 7587 7588 mutex_enter(&p->p_lock); 7589 err = rctl_incr_locked_mem(p, proj, unlocked_bytes, 7590 chargeproc); 7591 mutex_exit(&p->p_lock); 7592 7593 if (err) { 7594 if (sp != NULL) 7595 mutex_exit(&sp->shm_mlock); 7596 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7597 return (err); 7598 } 7599 } 7600 /* 7601 * Loop over all pages in the range. Process if we're locking and 7602 * page has not already been locked in this mapping; or if we're 7603 * unlocking and the page has been locked. 7604 */ 7605 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp; 7606 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) { 7607 if ((attr == 0 || VPP_PROT(vpp) == pageprot) && 7608 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) || 7609 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) { 7610 7611 if (amp != NULL) 7612 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7613 /* 7614 * If this isn't a MAP_NORESERVE segment and 7615 * we're locking, allocate anon slots if they 7616 * don't exist. The page is brought in later on. 7617 */ 7618 if (op == MC_LOCK && svd->vp == NULL && 7619 ((svd->flags & MAP_NORESERVE) == 0) && 7620 amp != NULL && 7621 ((ap = anon_get_ptr(amp->ahp, anon_index)) 7622 == NULL)) { 7623 anon_array_enter(amp, anon_index, &cookie); 7624 7625 if ((ap = anon_get_ptr(amp->ahp, 7626 anon_index)) == NULL) { 7627 pp = anon_zero(seg, addr, &ap, 7628 svd->cred); 7629 if (pp == NULL) { 7630 anon_array_exit(&cookie); 7631 ANON_LOCK_EXIT(&->a_rwlock); 7632 err = ENOMEM; 7633 goto out; 7634 } 7635 ASSERT(anon_get_ptr(amp->ahp, 7636 anon_index) == NULL); 7637 (void) anon_set_ptr(amp->ahp, 7638 anon_index, ap, ANON_SLEEP); 7639 page_unlock(pp); 7640 } 7641 anon_array_exit(&cookie); 7642 } 7643 7644 /* 7645 * Get name for page, accounting for 7646 * existence of private copy. 7647 */ 7648 ap = NULL; 7649 if (amp != NULL) { 7650 anon_array_enter(amp, anon_index, &cookie); 7651 ap = anon_get_ptr(amp->ahp, anon_index); 7652 if (ap != NULL) { 7653 swap_xlate(ap, &vp, &off); 7654 } else { 7655 if (svd->vp == NULL && 7656 (svd->flags & MAP_NORESERVE)) { 7657 anon_array_exit(&cookie); 7658 ANON_LOCK_EXIT(&->a_rwlock); 7659 continue; 7660 } 7661 vp = svd->vp; 7662 off = offset; 7663 } 7664 if (op != MC_LOCK || ap == NULL) { 7665 anon_array_exit(&cookie); 7666 ANON_LOCK_EXIT(&->a_rwlock); 7667 } 7668 } else { 7669 vp = svd->vp; 7670 off = offset; 7671 } 7672 7673 /* 7674 * Get page frame. It's ok if the page is 7675 * not available when we're unlocking, as this 7676 * may simply mean that a page we locked got 7677 * truncated out of existence after we locked it. 7678 * 7679 * Invoke VOP_GETPAGE() to obtain the page struct 7680 * since we may need to read it from disk if its 7681 * been paged out. 7682 */ 7683 if (op != MC_LOCK) 7684 pp = page_lookup(vp, off, SE_SHARED); 7685 else { 7686 page_t *pl[1 + 1]; 7687 int error; 7688 7689 ASSERT(vp != NULL); 7690 7691 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE, 7692 (uint_t *)NULL, pl, PAGESIZE, seg, addr, 7693 S_OTHER, svd->cred, NULL); 7694 7695 if (error && ap != NULL) { 7696 anon_array_exit(&cookie); 7697 ANON_LOCK_EXIT(&->a_rwlock); 7698 } 7699 7700 /* 7701 * If the error is EDEADLK then we must bounce 7702 * up and drop all vm subsystem locks and then 7703 * retry the operation later 7704 * This behavior is a temporary measure because 7705 * ufs/sds logging is badly designed and will 7706 * deadlock if we don't allow this bounce to 7707 * happen. The real solution is to re-design 7708 * the logging code to work properly. See bug 7709 * 4125102 for details of the problem. 7710 */ 7711 if (error == EDEADLK) { 7712 err = error; 7713 goto out; 7714 } 7715 /* 7716 * Quit if we fail to fault in the page. Treat 7717 * the failure as an error, unless the addr 7718 * is mapped beyond the end of a file. 7719 */ 7720 if (error && svd->vp) { 7721 va.va_mask = AT_SIZE; 7722 if (VOP_GETATTR(svd->vp, &va, 0, 7723 svd->cred, NULL) != 0) { 7724 err = EIO; 7725 goto out; 7726 } 7727 if (btopr(va.va_size) >= 7728 btopr(off + 1)) { 7729 err = EIO; 7730 goto out; 7731 } 7732 goto out; 7733 7734 } else if (error) { 7735 err = EIO; 7736 goto out; 7737 } 7738 pp = pl[0]; 7739 ASSERT(pp != NULL); 7740 } 7741 7742 /* 7743 * See Statement at the beginning of this routine. 7744 * 7745 * claim is always set if MAP_PRIVATE and PROT_WRITE 7746 * irrespective of following factors: 7747 * 7748 * (1) anon slots are populated or not 7749 * (2) cow is broken or not 7750 * (3) refcnt on ap is 1 or greater than 1 7751 * 7752 * See 4140683 for details 7753 */ 7754 claim = ((VPP_PROT(vpp) & PROT_WRITE) && 7755 (svd->type == MAP_PRIVATE)); 7756 7757 /* 7758 * Perform page-level operation appropriate to 7759 * operation. If locking, undo the SOFTLOCK 7760 * performed to bring the page into memory 7761 * after setting the lock. If unlocking, 7762 * and no page was found, account for the claim 7763 * separately. 7764 */ 7765 if (op == MC_LOCK) { 7766 int ret = 1; /* Assume success */ 7767 7768 ASSERT(!VPP_ISPPLOCK(vpp)); 7769 7770 ret = page_pp_lock(pp, claim, 0); 7771 if (ap != NULL) { 7772 if (ap->an_pvp != NULL) { 7773 anon_swap_free(ap, pp); 7774 } 7775 anon_array_exit(&cookie); 7776 ANON_LOCK_EXIT(&->a_rwlock); 7777 } 7778 if (ret == 0) { 7779 /* locking page failed */ 7780 page_unlock(pp); 7781 err = EAGAIN; 7782 goto out; 7783 } 7784 VPP_SETPPLOCK(vpp); 7785 if (sp != NULL) { 7786 if (pp->p_lckcnt == 1) 7787 locked_bytes += PAGESIZE; 7788 } else 7789 locked_bytes += PAGESIZE; 7790 7791 if (lockmap != (ulong_t *)NULL) 7792 BT_SET(lockmap, pos); 7793 7794 page_unlock(pp); 7795 } else { 7796 ASSERT(VPP_ISPPLOCK(vpp)); 7797 if (pp != NULL) { 7798 /* sysV pages should be locked */ 7799 ASSERT(sp == NULL || pp->p_lckcnt > 0); 7800 page_pp_unlock(pp, claim, 0); 7801 if (sp != NULL) { 7802 if (pp->p_lckcnt == 0) 7803 unlocked_bytes 7804 += PAGESIZE; 7805 } else 7806 unlocked_bytes += PAGESIZE; 7807 page_unlock(pp); 7808 } else { 7809 ASSERT(sp == NULL); 7810 unlocked_bytes += PAGESIZE; 7811 } 7812 VPP_CLRPPLOCK(vpp); 7813 } 7814 } 7815 } 7816 out: 7817 if (op == MC_LOCK) { 7818 /* Credit back bytes that did not get locked */ 7819 if ((unlocked_bytes - locked_bytes) > 0) { 7820 if (proj == NULL) 7821 mutex_enter(&p->p_lock); 7822 rctl_decr_locked_mem(p, proj, 7823 (unlocked_bytes - locked_bytes), chargeproc); 7824 if (proj == NULL) 7825 mutex_exit(&p->p_lock); 7826 } 7827 7828 } else { 7829 /* Account bytes that were unlocked */ 7830 if (unlocked_bytes > 0) { 7831 if (proj == NULL) 7832 mutex_enter(&p->p_lock); 7833 rctl_decr_locked_mem(p, proj, unlocked_bytes, 7834 chargeproc); 7835 if (proj == NULL) 7836 mutex_exit(&p->p_lock); 7837 } 7838 } 7839 if (sp != NULL) 7840 mutex_exit(&sp->shm_mlock); 7841 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7842 7843 return (err); 7844 } 7845 7846 /* 7847 * Set advice from user for specified pages 7848 * There are 9 types of advice: 7849 * MADV_NORMAL - Normal (default) behavior (whatever that is) 7850 * MADV_RANDOM - Random page references 7851 * do not allow readahead or 'klustering' 7852 * MADV_SEQUENTIAL - Sequential page references 7853 * Pages previous to the one currently being 7854 * accessed (determined by fault) are 'not needed' 7855 * and are freed immediately 7856 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl) 7857 * MADV_DONTNEED - Pages are not needed (synced out in mctl) 7858 * MADV_FREE - Contents can be discarded 7859 * MADV_ACCESS_DEFAULT- Default access 7860 * MADV_ACCESS_LWP - Next LWP will access heavily 7861 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily 7862 */ 7863 static int 7864 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 7865 { 7866 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 7867 size_t page; 7868 int err = 0; 7869 int already_set; 7870 struct anon_map *amp; 7871 ulong_t anon_index; 7872 struct seg *next; 7873 lgrp_mem_policy_t policy; 7874 struct seg *prev; 7875 struct vnode *vp; 7876 7877 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 7878 7879 /* 7880 * In case of MADV_FREE, we won't be modifying any segment private 7881 * data structures; so, we only need to grab READER's lock 7882 */ 7883 if (behav != MADV_FREE) { 7884 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 7885 if (svd->tr_state != SEGVN_TR_OFF) { 7886 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7887 return (0); 7888 } 7889 } else { 7890 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 7891 } 7892 7893 /* 7894 * Large pages are assumed to be only turned on when accesses to the 7895 * segment's address range have spatial and temporal locality. That 7896 * justifies ignoring MADV_SEQUENTIAL for large page segments. 7897 * Also, ignore advice affecting lgroup memory allocation 7898 * if don't need to do lgroup optimizations on this system 7899 */ 7900 7901 if ((behav == MADV_SEQUENTIAL && 7902 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) || 7903 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT || 7904 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) { 7905 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7906 return (0); 7907 } 7908 7909 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT || 7910 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) { 7911 /* 7912 * Since we are going to unload hat mappings 7913 * we first have to flush the cache. Otherwise 7914 * this might lead to system panic if another 7915 * thread is doing physio on the range whose 7916 * mappings are unloaded by madvise(3C). 7917 */ 7918 if (svd->softlockcnt > 0) { 7919 /* 7920 * If this is shared segment non 0 softlockcnt 7921 * means locked pages are still in use. 7922 */ 7923 if (svd->type == MAP_SHARED) { 7924 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7925 return (EAGAIN); 7926 } 7927 /* 7928 * Since we do have the segvn writers lock 7929 * nobody can fill the cache with entries 7930 * belonging to this seg during the purge. 7931 * The flush either succeeds or we still 7932 * have pending I/Os. In the later case, 7933 * madvise(3C) fails. 7934 */ 7935 segvn_purge(seg); 7936 if (svd->softlockcnt > 0) { 7937 /* 7938 * Since madvise(3C) is advisory and 7939 * it's not part of UNIX98, madvise(3C) 7940 * failure here doesn't cause any hardship. 7941 * Note that we don't block in "as" layer. 7942 */ 7943 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7944 return (EAGAIN); 7945 } 7946 } else if (svd->type == MAP_SHARED && svd->amp != NULL && 7947 svd->amp->a_softlockcnt > 0) { 7948 /* 7949 * Try to purge this amp's entries from pcache. It 7950 * will succeed only if other segments that share the 7951 * amp have no outstanding softlock's. 7952 */ 7953 segvn_purge(seg); 7954 } 7955 } 7956 7957 amp = svd->amp; 7958 vp = svd->vp; 7959 if (behav == MADV_FREE) { 7960 /* 7961 * MADV_FREE is not supported for segments with 7962 * underlying object; if anonmap is NULL, anon slots 7963 * are not yet populated and there is nothing for 7964 * us to do. As MADV_FREE is advisory, we don't 7965 * return error in either case. 7966 */ 7967 if (vp != NULL || amp == NULL) { 7968 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7969 return (0); 7970 } 7971 7972 segvn_purge(seg); 7973 7974 page = seg_page(seg, addr); 7975 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 7976 anon_disclaim(amp, svd->anon_index + page, len); 7977 ANON_LOCK_EXIT(&->a_rwlock); 7978 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 7979 return (0); 7980 } 7981 7982 /* 7983 * If advice is to be applied to entire segment, 7984 * use advice field in seg_data structure 7985 * otherwise use appropriate vpage entry. 7986 */ 7987 if ((addr == seg->s_base) && (len == seg->s_size)) { 7988 switch (behav) { 7989 case MADV_ACCESS_LWP: 7990 case MADV_ACCESS_MANY: 7991 case MADV_ACCESS_DEFAULT: 7992 /* 7993 * Set memory allocation policy for this segment 7994 */ 7995 policy = lgrp_madv_to_policy(behav, len, svd->type); 7996 if (svd->type == MAP_SHARED) 7997 already_set = lgrp_shm_policy_set(policy, amp, 7998 svd->anon_index, vp, svd->offset, len); 7999 else { 8000 /* 8001 * For private memory, need writers lock on 8002 * address space because the segment may be 8003 * split or concatenated when changing policy 8004 */ 8005 if (AS_READ_HELD(seg->s_as, 8006 &seg->s_as->a_lock)) { 8007 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8008 return (IE_RETRY); 8009 } 8010 8011 already_set = lgrp_privm_policy_set(policy, 8012 &svd->policy_info, len); 8013 } 8014 8015 /* 8016 * If policy set already and it shouldn't be reapplied, 8017 * don't do anything. 8018 */ 8019 if (already_set && 8020 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8021 break; 8022 8023 /* 8024 * Mark any existing pages in given range for 8025 * migration 8026 */ 8027 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8028 vp, svd->offset, 1); 8029 8030 /* 8031 * If same policy set already or this is a shared 8032 * memory segment, don't need to try to concatenate 8033 * segment with adjacent ones. 8034 */ 8035 if (already_set || svd->type == MAP_SHARED) 8036 break; 8037 8038 /* 8039 * Try to concatenate this segment with previous 8040 * one and next one, since we changed policy for 8041 * this one and it may be compatible with adjacent 8042 * ones now. 8043 */ 8044 prev = AS_SEGPREV(seg->s_as, seg); 8045 next = AS_SEGNEXT(seg->s_as, seg); 8046 8047 if (next && next->s_ops == &segvn_ops && 8048 addr + len == next->s_base) 8049 (void) segvn_concat(seg, next, 1); 8050 8051 if (prev && prev->s_ops == &segvn_ops && 8052 addr == prev->s_base + prev->s_size) { 8053 /* 8054 * Drop lock for private data of current 8055 * segment before concatenating (deleting) it 8056 * and return IE_REATTACH to tell as_ctl() that 8057 * current segment has changed 8058 */ 8059 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8060 if (!segvn_concat(prev, seg, 1)) 8061 err = IE_REATTACH; 8062 8063 return (err); 8064 } 8065 break; 8066 8067 case MADV_SEQUENTIAL: 8068 /* 8069 * unloading mapping guarantees 8070 * detection in segvn_fault 8071 */ 8072 ASSERT(seg->s_szc == 0); 8073 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8074 hat_unload(seg->s_as->a_hat, addr, len, 8075 HAT_UNLOAD); 8076 /* FALLTHROUGH */ 8077 case MADV_NORMAL: 8078 case MADV_RANDOM: 8079 svd->advice = (uchar_t)behav; 8080 svd->pageadvice = 0; 8081 break; 8082 case MADV_WILLNEED: /* handled in memcntl */ 8083 case MADV_DONTNEED: /* handled in memcntl */ 8084 case MADV_FREE: /* handled above */ 8085 break; 8086 default: 8087 err = EINVAL; 8088 } 8089 } else { 8090 caddr_t eaddr; 8091 struct seg *new_seg; 8092 struct segvn_data *new_svd; 8093 u_offset_t off; 8094 caddr_t oldeaddr; 8095 8096 page = seg_page(seg, addr); 8097 8098 segvn_vpage(seg); 8099 if (svd->vpage == NULL) { 8100 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8101 return (ENOMEM); 8102 } 8103 8104 switch (behav) { 8105 struct vpage *bvpp, *evpp; 8106 8107 case MADV_ACCESS_LWP: 8108 case MADV_ACCESS_MANY: 8109 case MADV_ACCESS_DEFAULT: 8110 /* 8111 * Set memory allocation policy for portion of this 8112 * segment 8113 */ 8114 8115 /* 8116 * Align address and length of advice to page 8117 * boundaries for large pages 8118 */ 8119 if (seg->s_szc != 0) { 8120 size_t pgsz; 8121 8122 pgsz = page_get_pagesize(seg->s_szc); 8123 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz); 8124 len = P2ROUNDUP(len, pgsz); 8125 } 8126 8127 /* 8128 * Check to see whether policy is set already 8129 */ 8130 policy = lgrp_madv_to_policy(behav, len, svd->type); 8131 8132 anon_index = svd->anon_index + page; 8133 off = svd->offset + (uintptr_t)(addr - seg->s_base); 8134 8135 if (svd->type == MAP_SHARED) 8136 already_set = lgrp_shm_policy_set(policy, amp, 8137 anon_index, vp, off, len); 8138 else 8139 already_set = 8140 (policy == svd->policy_info.mem_policy); 8141 8142 /* 8143 * If policy set already and it shouldn't be reapplied, 8144 * don't do anything. 8145 */ 8146 if (already_set && 8147 !LGRP_MEM_POLICY_REAPPLICABLE(policy)) 8148 break; 8149 8150 /* 8151 * For private memory, need writers lock on 8152 * address space because the segment may be 8153 * split or concatenated when changing policy 8154 */ 8155 if (svd->type == MAP_PRIVATE && 8156 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) { 8157 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8158 return (IE_RETRY); 8159 } 8160 8161 /* 8162 * Mark any existing pages in given range for 8163 * migration 8164 */ 8165 page_mark_migrate(seg, addr, len, amp, svd->anon_index, 8166 vp, svd->offset, 1); 8167 8168 /* 8169 * Don't need to try to split or concatenate 8170 * segments, since policy is same or this is a shared 8171 * memory segment 8172 */ 8173 if (already_set || svd->type == MAP_SHARED) 8174 break; 8175 8176 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) { 8177 ASSERT(svd->amp == NULL); 8178 ASSERT(svd->tr_state == SEGVN_TR_OFF); 8179 ASSERT(svd->softlockcnt == 0); 8180 hat_leave_region(seg->s_as->a_hat, svd->rcookie, 8181 HAT_REGION_TEXT); 8182 svd->rcookie = HAT_INVALID_REGION_COOKIE; 8183 } 8184 8185 /* 8186 * Split off new segment if advice only applies to a 8187 * portion of existing segment starting in middle 8188 */ 8189 new_seg = NULL; 8190 eaddr = addr + len; 8191 oldeaddr = seg->s_base + seg->s_size; 8192 if (addr > seg->s_base) { 8193 /* 8194 * Must flush I/O page cache 8195 * before splitting segment 8196 */ 8197 if (svd->softlockcnt > 0) 8198 segvn_purge(seg); 8199 8200 /* 8201 * Split segment and return IE_REATTACH to tell 8202 * as_ctl() that current segment changed 8203 */ 8204 new_seg = segvn_split_seg(seg, addr); 8205 new_svd = (struct segvn_data *)new_seg->s_data; 8206 err = IE_REATTACH; 8207 8208 /* 8209 * If new segment ends where old one 8210 * did, try to concatenate the new 8211 * segment with next one. 8212 */ 8213 if (eaddr == oldeaddr) { 8214 /* 8215 * Set policy for new segment 8216 */ 8217 (void) lgrp_privm_policy_set(policy, 8218 &new_svd->policy_info, 8219 new_seg->s_size); 8220 8221 next = AS_SEGNEXT(new_seg->s_as, 8222 new_seg); 8223 8224 if (next && 8225 next->s_ops == &segvn_ops && 8226 eaddr == next->s_base) 8227 (void) segvn_concat(new_seg, 8228 next, 1); 8229 } 8230 } 8231 8232 /* 8233 * Split off end of existing segment if advice only 8234 * applies to a portion of segment ending before 8235 * end of the existing segment 8236 */ 8237 if (eaddr < oldeaddr) { 8238 /* 8239 * Must flush I/O page cache 8240 * before splitting segment 8241 */ 8242 if (svd->softlockcnt > 0) 8243 segvn_purge(seg); 8244 8245 /* 8246 * If beginning of old segment was already 8247 * split off, use new segment to split end off 8248 * from. 8249 */ 8250 if (new_seg != NULL && new_seg != seg) { 8251 /* 8252 * Split segment 8253 */ 8254 (void) segvn_split_seg(new_seg, eaddr); 8255 8256 /* 8257 * Set policy for new segment 8258 */ 8259 (void) lgrp_privm_policy_set(policy, 8260 &new_svd->policy_info, 8261 new_seg->s_size); 8262 } else { 8263 /* 8264 * Split segment and return IE_REATTACH 8265 * to tell as_ctl() that current 8266 * segment changed 8267 */ 8268 (void) segvn_split_seg(seg, eaddr); 8269 err = IE_REATTACH; 8270 8271 (void) lgrp_privm_policy_set(policy, 8272 &svd->policy_info, seg->s_size); 8273 8274 /* 8275 * If new segment starts where old one 8276 * did, try to concatenate it with 8277 * previous segment. 8278 */ 8279 if (addr == seg->s_base) { 8280 prev = AS_SEGPREV(seg->s_as, 8281 seg); 8282 8283 /* 8284 * Drop lock for private data 8285 * of current segment before 8286 * concatenating (deleting) it 8287 */ 8288 if (prev && 8289 prev->s_ops == 8290 &segvn_ops && 8291 addr == prev->s_base + 8292 prev->s_size) { 8293 SEGVN_LOCK_EXIT( 8294 seg->s_as, 8295 &svd->lock); 8296 (void) segvn_concat( 8297 prev, seg, 1); 8298 return (err); 8299 } 8300 } 8301 } 8302 } 8303 break; 8304 case MADV_SEQUENTIAL: 8305 ASSERT(seg->s_szc == 0); 8306 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE); 8307 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD); 8308 /* FALLTHROUGH */ 8309 case MADV_NORMAL: 8310 case MADV_RANDOM: 8311 bvpp = &svd->vpage[page]; 8312 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8313 for (; bvpp < evpp; bvpp++) 8314 VPP_SETADVICE(bvpp, behav); 8315 svd->advice = MADV_NORMAL; 8316 break; 8317 case MADV_WILLNEED: /* handled in memcntl */ 8318 case MADV_DONTNEED: /* handled in memcntl */ 8319 case MADV_FREE: /* handled above */ 8320 break; 8321 default: 8322 err = EINVAL; 8323 } 8324 } 8325 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8326 return (err); 8327 } 8328 8329 /* 8330 * There is one kind of inheritance that can be specified for pages: 8331 * 8332 * SEGP_INH_ZERO - Pages should be zeroed in the child 8333 */ 8334 static int 8335 segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav) 8336 { 8337 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8338 struct vpage *bvpp, *evpp; 8339 size_t page; 8340 int ret = 0; 8341 8342 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8343 8344 /* Can't support something we don't know about */ 8345 if (behav != SEGP_INH_ZERO) 8346 return (ENOTSUP); 8347 8348 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER); 8349 8350 /* 8351 * This must be a straightforward anonymous segment that is mapped 8352 * privately and is not backed by a vnode. 8353 */ 8354 if (svd->tr_state != SEGVN_TR_OFF || 8355 svd->type != MAP_PRIVATE || 8356 svd->vp != NULL) { 8357 ret = EINVAL; 8358 goto out; 8359 } 8360 8361 /* 8362 * If the entire segment has been marked as inherit zero, then no reason 8363 * to do anything else. 8364 */ 8365 if (svd->svn_inz == SEGVN_INZ_ALL) { 8366 ret = 0; 8367 goto out; 8368 } 8369 8370 /* 8371 * If this applies to the entire segment, simply mark it and we're done. 8372 */ 8373 if ((addr == seg->s_base) && (len == seg->s_size)) { 8374 svd->svn_inz = SEGVN_INZ_ALL; 8375 ret = 0; 8376 goto out; 8377 } 8378 8379 /* 8380 * We've been asked to mark a subset of this segment as inherit zero, 8381 * therefore we need to mainpulate its vpages. 8382 */ 8383 if (svd->vpage == NULL) { 8384 segvn_vpage(seg); 8385 if (svd->vpage == NULL) { 8386 ret = ENOMEM; 8387 goto out; 8388 } 8389 } 8390 8391 svd->svn_inz = SEGVN_INZ_VPP; 8392 page = seg_page(seg, addr); 8393 bvpp = &svd->vpage[page]; 8394 evpp = &svd->vpage[page + (len >> PAGESHIFT)]; 8395 for (; bvpp < evpp; bvpp++) 8396 VPP_SETINHZERO(bvpp); 8397 ret = 0; 8398 8399 out: 8400 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8401 return (ret); 8402 } 8403 8404 /* 8405 * Create a vpage structure for this seg. 8406 */ 8407 static void 8408 segvn_vpage(struct seg *seg) 8409 { 8410 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8411 struct vpage *vp, *evp; 8412 static pgcnt_t page_limit = 0; 8413 8414 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 8415 8416 /* 8417 * If no vpage structure exists, allocate one. Copy the protections 8418 * and the advice from the segment itself to the individual pages. 8419 */ 8420 if (svd->vpage == NULL) { 8421 /* 8422 * Start by calculating the number of pages we must allocate to 8423 * track the per-page vpage structs needs for this entire 8424 * segment. If we know now that it will require more than our 8425 * heuristic for the maximum amount of kmem we can consume then 8426 * fail. We do this here, instead of trying to detect this deep 8427 * in page_resv and propagating the error up, since the entire 8428 * memory allocation stack is not amenable to passing this 8429 * back. Instead, it wants to keep trying. 8430 * 8431 * As a heuristic we set a page limit of 5/8s of total_pages 8432 * for this allocation. We use shifts so that no floating 8433 * point conversion takes place and only need to do the 8434 * calculation once. 8435 */ 8436 ulong_t mem_needed = seg_pages(seg) * sizeof (struct vpage); 8437 pgcnt_t npages = mem_needed >> PAGESHIFT; 8438 8439 if (page_limit == 0) 8440 page_limit = (total_pages >> 1) + (total_pages >> 3); 8441 8442 if (npages > page_limit) 8443 return; 8444 8445 svd->pageadvice = 1; 8446 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP); 8447 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)]; 8448 for (vp = svd->vpage; vp < evp; vp++) { 8449 VPP_SETPROT(vp, svd->prot); 8450 VPP_SETADVICE(vp, svd->advice); 8451 } 8452 } 8453 } 8454 8455 /* 8456 * Dump the pages belonging to this segvn segment. 8457 */ 8458 static void 8459 segvn_dump(struct seg *seg) 8460 { 8461 struct segvn_data *svd; 8462 page_t *pp; 8463 struct anon_map *amp; 8464 ulong_t anon_index; 8465 struct vnode *vp; 8466 u_offset_t off, offset; 8467 pfn_t pfn; 8468 pgcnt_t page, npages; 8469 caddr_t addr; 8470 8471 npages = seg_pages(seg); 8472 svd = (struct segvn_data *)seg->s_data; 8473 vp = svd->vp; 8474 off = offset = svd->offset; 8475 addr = seg->s_base; 8476 8477 if ((amp = svd->amp) != NULL) { 8478 anon_index = svd->anon_index; 8479 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 8480 } 8481 8482 for (page = 0; page < npages; page++, offset += PAGESIZE) { 8483 struct anon *ap; 8484 int we_own_it = 0; 8485 8486 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) { 8487 swap_xlate_nopanic(ap, &vp, &off); 8488 } else { 8489 vp = svd->vp; 8490 off = offset; 8491 } 8492 8493 /* 8494 * If pp == NULL, the page either does not exist 8495 * or is exclusively locked. So determine if it 8496 * exists before searching for it. 8497 */ 8498 8499 if ((pp = page_lookup_nowait(vp, off, SE_SHARED))) 8500 we_own_it = 1; 8501 else 8502 pp = page_exists(vp, off); 8503 8504 if (pp) { 8505 pfn = page_pptonum(pp); 8506 dump_addpage(seg->s_as, addr, pfn); 8507 if (we_own_it) 8508 page_unlock(pp); 8509 } 8510 addr += PAGESIZE; 8511 dump_timeleft = dump_timeout; 8512 } 8513 8514 if (amp != NULL) 8515 ANON_LOCK_EXIT(&->a_rwlock); 8516 } 8517 8518 #ifdef DEBUG 8519 static uint32_t segvn_pglock_mtbf = 0; 8520 #endif 8521 8522 #define PCACHE_SHWLIST ((page_t *)-2) 8523 #define NOPCACHE_SHWLIST ((page_t *)-1) 8524 8525 /* 8526 * Lock/Unlock anon pages over a given range. Return shadow list. This routine 8527 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages 8528 * to avoid the overhead of per page locking, unlocking for subsequent IOs to 8529 * the same parts of the segment. Currently shadow list creation is only 8530 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are 8531 * tagged with segment pointer, starting virtual address and length. This 8532 * approach for MAP_SHARED segments may add many pcache entries for the same 8533 * set of pages and lead to long hash chains that decrease pcache lookup 8534 * performance. To avoid this issue for shared segments shared anon map and 8535 * starting anon index are used for pcache entry tagging. This allows all 8536 * segments to share pcache entries for the same anon range and reduces pcache 8537 * chain's length as well as memory overhead from duplicate shadow lists and 8538 * pcache entries. 8539 * 8540 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd 8541 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock 8542 * part of softlockcnt accounting is done differently for private and shared 8543 * segments. In private segment case softlock is only incremented when a new 8544 * shadow list is created but not when an existing one is found via 8545 * seg_plookup(). pcache entries have reference count incremented/decremented 8546 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0 8547 * reference count can be purged (and purging is needed before segment can be 8548 * freed). When a private segment pcache entry is purged segvn_reclaim() will 8549 * decrement softlockcnt. Since in private segment case each of its pcache 8550 * entries only belongs to this segment we can expect that when 8551 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8552 * segment purge will succeed and softlockcnt will drop to 0. In shared 8553 * segment case reference count in pcache entry counts active locks from many 8554 * different segments so we can't expect segment purging to succeed even when 8555 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this 8556 * segment. To be able to determine when there're no pending pagelocks in 8557 * shared segment case we don't rely on purging to make softlockcnt drop to 0 8558 * but instead softlockcnt is incremented and decremented for every 8559 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow 8560 * list was created or an existing one was found. When softlockcnt drops to 0 8561 * this segment no longer has any claims for pcached shadow lists and the 8562 * segment can be freed even if there're still active pcache entries 8563 * shared by this segment anon map. Shared segment pcache entries belong to 8564 * anon map and are typically removed when anon map is freed after all 8565 * processes destroy the segments that use this anon map. 8566 */ 8567 static int 8568 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp, 8569 enum lock_type type, enum seg_rw rw) 8570 { 8571 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 8572 size_t np; 8573 pgcnt_t adjustpages; 8574 pgcnt_t npages; 8575 ulong_t anon_index; 8576 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE; 8577 uint_t error; 8578 struct anon_map *amp; 8579 pgcnt_t anpgcnt; 8580 struct page **pplist, **pl, *pp; 8581 caddr_t a; 8582 size_t page; 8583 caddr_t lpgaddr, lpgeaddr; 8584 anon_sync_obj_t cookie; 8585 int anlock; 8586 struct anon_map *pamp; 8587 caddr_t paddr; 8588 seg_preclaim_cbfunc_t preclaim_callback; 8589 size_t pgsz; 8590 int use_pcache; 8591 size_t wlen; 8592 uint_t pflags = 0; 8593 int sftlck_sbase = 0; 8594 int sftlck_send = 0; 8595 8596 #ifdef DEBUG 8597 if (type == L_PAGELOCK && segvn_pglock_mtbf) { 8598 hrtime_t ts = gethrtime(); 8599 if ((ts % segvn_pglock_mtbf) == 0) { 8600 return (ENOTSUP); 8601 } 8602 if ((ts % segvn_pglock_mtbf) == 1) { 8603 return (EFAULT); 8604 } 8605 } 8606 #endif 8607 8608 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START, 8609 "segvn_pagelock: start seg %p addr %p", seg, addr); 8610 8611 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 8612 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK); 8613 8614 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 8615 8616 /* 8617 * for now we only support pagelock to anon memory. We would have to 8618 * check protections for vnode objects and call into the vnode driver. 8619 * That's too much for a fast path. Let the fault entry point handle 8620 * it. 8621 */ 8622 if (svd->vp != NULL) { 8623 if (type == L_PAGELOCK) { 8624 error = ENOTSUP; 8625 goto out; 8626 } 8627 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL"); 8628 } 8629 if ((amp = svd->amp) == NULL) { 8630 if (type == L_PAGELOCK) { 8631 error = EFAULT; 8632 goto out; 8633 } 8634 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL"); 8635 } 8636 if (rw != S_READ && rw != S_WRITE) { 8637 if (type == L_PAGELOCK) { 8638 error = ENOTSUP; 8639 goto out; 8640 } 8641 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw"); 8642 } 8643 8644 if (seg->s_szc != 0) { 8645 /* 8646 * We are adjusting the pagelock region to the large page size 8647 * boundary because the unlocked part of a large page cannot 8648 * be freed anyway unless all constituent pages of a large 8649 * page are locked. Bigger regions reduce pcache chain length 8650 * and improve lookup performance. The tradeoff is that the 8651 * very first segvn_pagelock() call for a given page is more 8652 * expensive if only 1 page_t is needed for IO. This is only 8653 * an issue if pcache entry doesn't get reused by several 8654 * subsequent calls. We optimize here for the case when pcache 8655 * is heavily used by repeated IOs to the same address range. 8656 * 8657 * Note segment's page size cannot change while we are holding 8658 * as lock. And then it cannot change while softlockcnt is 8659 * not 0. This will allow us to correctly recalculate large 8660 * page size region for the matching pageunlock/reclaim call 8661 * since as_pageunlock() caller must always match 8662 * as_pagelock() call's addr and len. 8663 * 8664 * For pageunlock *ppp points to the pointer of page_t that 8665 * corresponds to the real unadjusted start address. Similar 8666 * for pagelock *ppp must point to the pointer of page_t that 8667 * corresponds to the real unadjusted start address. 8668 */ 8669 pgsz = page_get_pagesize(seg->s_szc); 8670 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr); 8671 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8672 } else if (len < segvn_pglock_comb_thrshld) { 8673 lpgaddr = addr; 8674 lpgeaddr = addr + len; 8675 adjustpages = 0; 8676 pgsz = PAGESIZE; 8677 } else { 8678 /* 8679 * Align the address range of large enough requests to allow 8680 * combining of different shadow lists into 1 to reduce memory 8681 * overhead from potentially overlapping large shadow lists 8682 * (worst case is we have a 1MB IO into buffers with start 8683 * addresses separated by 4K). Alignment is only possible if 8684 * padded chunks have sufficient access permissions. Note 8685 * permissions won't change between L_PAGELOCK and 8686 * L_PAGEUNLOCK calls since non 0 softlockcnt will force 8687 * segvn_setprot() to wait until softlockcnt drops to 0. This 8688 * allows us to determine in L_PAGEUNLOCK the same range we 8689 * computed in L_PAGELOCK. 8690 * 8691 * If alignment is limited by segment ends set 8692 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when 8693 * these flags are set bump softlockcnt_sbase/softlockcnt_send 8694 * per segment counters. In L_PAGEUNLOCK case decrease 8695 * softlockcnt_sbase/softlockcnt_send counters if 8696 * sftlck_sbase/sftlck_send flags are set. When 8697 * softlockcnt_sbase/softlockcnt_send are non 0 8698 * segvn_concat()/segvn_extend_prev()/segvn_extend_next() 8699 * won't merge the segments. This restriction combined with 8700 * restriction on segment unmapping and splitting for segments 8701 * that have non 0 softlockcnt allows L_PAGEUNLOCK to 8702 * correctly determine the same range that was previously 8703 * locked by matching L_PAGELOCK. 8704 */ 8705 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16); 8706 pgsz = PAGESIZE; 8707 if (svd->type == MAP_PRIVATE) { 8708 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr, 8709 segvn_pglock_comb_balign); 8710 if (lpgaddr < seg->s_base) { 8711 lpgaddr = seg->s_base; 8712 sftlck_sbase = 1; 8713 } 8714 } else { 8715 ulong_t aix = svd->anon_index + seg_page(seg, addr); 8716 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign); 8717 if (aaix < svd->anon_index) { 8718 lpgaddr = seg->s_base; 8719 sftlck_sbase = 1; 8720 } else { 8721 lpgaddr = addr - ptob(aix - aaix); 8722 ASSERT(lpgaddr >= seg->s_base); 8723 } 8724 } 8725 if (svd->pageprot && lpgaddr != addr) { 8726 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)]; 8727 struct vpage *evp = &svd->vpage[seg_page(seg, addr)]; 8728 while (vp < evp) { 8729 if ((VPP_PROT(vp) & protchk) == 0) { 8730 break; 8731 } 8732 vp++; 8733 } 8734 if (vp < evp) { 8735 lpgaddr = addr; 8736 pflags = 0; 8737 } 8738 } 8739 lpgeaddr = addr + len; 8740 if (pflags) { 8741 if (svd->type == MAP_PRIVATE) { 8742 lpgeaddr = (caddr_t)P2ROUNDUP( 8743 (uintptr_t)lpgeaddr, 8744 segvn_pglock_comb_balign); 8745 } else { 8746 ulong_t aix = svd->anon_index + 8747 seg_page(seg, lpgeaddr); 8748 ulong_t aaix = P2ROUNDUP(aix, 8749 segvn_pglock_comb_palign); 8750 if (aaix < aix) { 8751 lpgeaddr = 0; 8752 } else { 8753 lpgeaddr += ptob(aaix - aix); 8754 } 8755 } 8756 if (lpgeaddr == 0 || 8757 lpgeaddr > seg->s_base + seg->s_size) { 8758 lpgeaddr = seg->s_base + seg->s_size; 8759 sftlck_send = 1; 8760 } 8761 } 8762 if (svd->pageprot && lpgeaddr != addr + len) { 8763 struct vpage *vp; 8764 struct vpage *evp; 8765 8766 vp = &svd->vpage[seg_page(seg, addr + len)]; 8767 evp = &svd->vpage[seg_page(seg, lpgeaddr)]; 8768 8769 while (vp < evp) { 8770 if ((VPP_PROT(vp) & protchk) == 0) { 8771 break; 8772 } 8773 vp++; 8774 } 8775 if (vp < evp) { 8776 lpgeaddr = addr + len; 8777 } 8778 } 8779 adjustpages = btop((uintptr_t)(addr - lpgaddr)); 8780 } 8781 8782 /* 8783 * For MAP_SHARED segments we create pcache entries tagged by amp and 8784 * anon index so that we can share pcache entries with other segments 8785 * that map this amp. For private segments pcache entries are tagged 8786 * with segment and virtual address. 8787 */ 8788 if (svd->type == MAP_SHARED) { 8789 pamp = amp; 8790 paddr = (caddr_t)((lpgaddr - seg->s_base) + 8791 ptob(svd->anon_index)); 8792 preclaim_callback = shamp_reclaim; 8793 } else { 8794 pamp = NULL; 8795 paddr = lpgaddr; 8796 preclaim_callback = segvn_reclaim; 8797 } 8798 8799 if (type == L_PAGEUNLOCK) { 8800 VM_STAT_ADD(segvnvmstats.pagelock[0]); 8801 8802 /* 8803 * update hat ref bits for /proc. We need to make sure 8804 * that threads tracing the ref and mod bits of the 8805 * address space get the right data. 8806 * Note: page ref and mod bits are updated at reclaim time 8807 */ 8808 if (seg->s_as->a_vbits) { 8809 for (a = addr; a < addr + len; a += PAGESIZE) { 8810 if (rw == S_WRITE) { 8811 hat_setstat(seg->s_as, a, 8812 PAGESIZE, P_REF | P_MOD); 8813 } else { 8814 hat_setstat(seg->s_as, a, 8815 PAGESIZE, P_REF); 8816 } 8817 } 8818 } 8819 8820 /* 8821 * Check the shadow list entry after the last page used in 8822 * this IO request. If it's NOPCACHE_SHWLIST the shadow list 8823 * was not inserted into pcache and is not large page 8824 * adjusted. In this case call reclaim callback directly and 8825 * don't adjust the shadow list start and size for large 8826 * pages. 8827 */ 8828 npages = btop(len); 8829 if ((*ppp)[npages] == NOPCACHE_SHWLIST) { 8830 void *ptag; 8831 if (pamp != NULL) { 8832 ASSERT(svd->type == MAP_SHARED); 8833 ptag = (void *)pamp; 8834 paddr = (caddr_t)((addr - seg->s_base) + 8835 ptob(svd->anon_index)); 8836 } else { 8837 ptag = (void *)seg; 8838 paddr = addr; 8839 } 8840 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0); 8841 } else { 8842 ASSERT((*ppp)[npages] == PCACHE_SHWLIST || 8843 IS_SWAPFSVP((*ppp)[npages]->p_vnode)); 8844 len = lpgeaddr - lpgaddr; 8845 npages = btop(len); 8846 seg_pinactive(seg, pamp, paddr, len, 8847 *ppp - adjustpages, rw, pflags, preclaim_callback); 8848 } 8849 8850 if (pamp != NULL) { 8851 ASSERT(svd->type == MAP_SHARED); 8852 ASSERT(svd->softlockcnt >= npages); 8853 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages); 8854 } 8855 8856 if (sftlck_sbase) { 8857 ASSERT(svd->softlockcnt_sbase > 0); 8858 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase); 8859 } 8860 if (sftlck_send) { 8861 ASSERT(svd->softlockcnt_send > 0); 8862 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send); 8863 } 8864 8865 /* 8866 * If someone is blocked while unmapping, we purge 8867 * segment page cache and thus reclaim pplist synchronously 8868 * without waiting for seg_pasync_thread. This speeds up 8869 * unmapping in cases where munmap(2) is called, while 8870 * raw async i/o is still in progress or where a thread 8871 * exits on data fault in a multithreaded application. 8872 */ 8873 if (AS_ISUNMAPWAIT(seg->s_as)) { 8874 if (svd->softlockcnt == 0) { 8875 mutex_enter(&seg->s_as->a_contents); 8876 if (AS_ISUNMAPWAIT(seg->s_as)) { 8877 AS_CLRUNMAPWAIT(seg->s_as); 8878 cv_broadcast(&seg->s_as->a_cv); 8879 } 8880 mutex_exit(&seg->s_as->a_contents); 8881 } else if (pamp == NULL) { 8882 /* 8883 * softlockcnt is not 0 and this is a 8884 * MAP_PRIVATE segment. Try to purge its 8885 * pcache entries to reduce softlockcnt. 8886 * If it drops to 0 segvn_reclaim() 8887 * will wake up a thread waiting on 8888 * unmapwait flag. 8889 * 8890 * We don't purge MAP_SHARED segments with non 8891 * 0 softlockcnt since IO is still in progress 8892 * for such segments. 8893 */ 8894 ASSERT(svd->type == MAP_PRIVATE); 8895 segvn_purge(seg); 8896 } 8897 } 8898 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8899 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END, 8900 "segvn_pagelock: unlock seg %p addr %p", seg, addr); 8901 return (0); 8902 } 8903 8904 /* The L_PAGELOCK case ... */ 8905 8906 VM_STAT_ADD(segvnvmstats.pagelock[1]); 8907 8908 /* 8909 * For MAP_SHARED segments we have to check protections before 8910 * seg_plookup() since pcache entries may be shared by many segments 8911 * with potentially different page protections. 8912 */ 8913 if (pamp != NULL) { 8914 ASSERT(svd->type == MAP_SHARED); 8915 if (svd->pageprot == 0) { 8916 if ((svd->prot & protchk) == 0) { 8917 error = EACCES; 8918 goto out; 8919 } 8920 } else { 8921 /* 8922 * check page protections 8923 */ 8924 caddr_t ea; 8925 8926 if (seg->s_szc) { 8927 a = lpgaddr; 8928 ea = lpgeaddr; 8929 } else { 8930 a = addr; 8931 ea = addr + len; 8932 } 8933 for (; a < ea; a += pgsz) { 8934 struct vpage *vp; 8935 8936 ASSERT(seg->s_szc == 0 || 8937 sameprot(seg, a, pgsz)); 8938 vp = &svd->vpage[seg_page(seg, a)]; 8939 if ((VPP_PROT(vp) & protchk) == 0) { 8940 error = EACCES; 8941 goto out; 8942 } 8943 } 8944 } 8945 } 8946 8947 /* 8948 * try to find pages in segment page cache 8949 */ 8950 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags); 8951 if (pplist != NULL) { 8952 if (pamp != NULL) { 8953 npages = btop((uintptr_t)(lpgeaddr - lpgaddr)); 8954 ASSERT(svd->type == MAP_SHARED); 8955 atomic_add_long((ulong_t *)&svd->softlockcnt, 8956 npages); 8957 } 8958 if (sftlck_sbase) { 8959 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase); 8960 } 8961 if (sftlck_send) { 8962 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send); 8963 } 8964 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 8965 *ppp = pplist + adjustpages; 8966 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END, 8967 "segvn_pagelock: cache hit seg %p addr %p", seg, addr); 8968 return (0); 8969 } 8970 8971 /* 8972 * For MAP_SHARED segments we already verified above that segment 8973 * protections allow this pagelock operation. 8974 */ 8975 if (pamp == NULL) { 8976 ASSERT(svd->type == MAP_PRIVATE); 8977 if (svd->pageprot == 0) { 8978 if ((svd->prot & protchk) == 0) { 8979 error = EACCES; 8980 goto out; 8981 } 8982 if (svd->prot & PROT_WRITE) { 8983 wlen = lpgeaddr - lpgaddr; 8984 } else { 8985 wlen = 0; 8986 ASSERT(rw == S_READ); 8987 } 8988 } else { 8989 int wcont = 1; 8990 /* 8991 * check page protections 8992 */ 8993 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) { 8994 struct vpage *vp; 8995 8996 ASSERT(seg->s_szc == 0 || 8997 sameprot(seg, a, pgsz)); 8998 vp = &svd->vpage[seg_page(seg, a)]; 8999 if ((VPP_PROT(vp) & protchk) == 0) { 9000 error = EACCES; 9001 goto out; 9002 } 9003 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) { 9004 wlen += pgsz; 9005 } else { 9006 wcont = 0; 9007 ASSERT(rw == S_READ); 9008 } 9009 } 9010 } 9011 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr); 9012 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr); 9013 } 9014 9015 /* 9016 * Only build large page adjusted shadow list if we expect to insert 9017 * it into pcache. For large enough pages it's a big overhead to 9018 * create a shadow list of the entire large page. But this overhead 9019 * should be amortized over repeated pcache hits on subsequent reuse 9020 * of this shadow list (IO into any range within this shadow list will 9021 * find it in pcache since we large page align the request for pcache 9022 * lookups). pcache performance is improved with bigger shadow lists 9023 * as it reduces the time to pcache the entire big segment and reduces 9024 * pcache chain length. 9025 */ 9026 if (seg_pinsert_check(seg, pamp, paddr, 9027 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) { 9028 addr = lpgaddr; 9029 len = lpgeaddr - lpgaddr; 9030 use_pcache = 1; 9031 } else { 9032 use_pcache = 0; 9033 /* 9034 * Since this entry will not be inserted into the pcache, we 9035 * will not do any adjustments to the starting address or 9036 * size of the memory to be locked. 9037 */ 9038 adjustpages = 0; 9039 } 9040 npages = btop(len); 9041 9042 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP); 9043 pl = pplist; 9044 *ppp = pplist + adjustpages; 9045 /* 9046 * If use_pcache is 0 this shadow list is not large page adjusted. 9047 * Record this info in the last entry of shadow array so that 9048 * L_PAGEUNLOCK can determine if it should large page adjust the 9049 * address range to find the real range that was locked. 9050 */ 9051 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST; 9052 9053 page = seg_page(seg, addr); 9054 anon_index = svd->anon_index + page; 9055 9056 anlock = 0; 9057 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9058 ASSERT(amp->a_szc >= seg->s_szc); 9059 anpgcnt = page_get_pagecnt(amp->a_szc); 9060 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) { 9061 struct anon *ap; 9062 struct vnode *vp; 9063 u_offset_t off; 9064 9065 /* 9066 * Lock and unlock anon array only once per large page. 9067 * anon_array_enter() locks the root anon slot according to 9068 * a_szc which can't change while anon map is locked. We lock 9069 * anon the first time through this loop and each time we 9070 * reach anon index that corresponds to a root of a large 9071 * page. 9072 */ 9073 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) { 9074 ASSERT(anlock == 0); 9075 anon_array_enter(amp, anon_index, &cookie); 9076 anlock = 1; 9077 } 9078 ap = anon_get_ptr(amp->ahp, anon_index); 9079 9080 /* 9081 * We must never use seg_pcache for COW pages 9082 * because we might end up with original page still 9083 * lying in seg_pcache even after private page is 9084 * created. This leads to data corruption as 9085 * aio_write refers to the page still in cache 9086 * while all other accesses refer to the private 9087 * page. 9088 */ 9089 if (ap == NULL || ap->an_refcnt != 1) { 9090 struct vpage *vpage; 9091 9092 if (seg->s_szc) { 9093 error = EFAULT; 9094 break; 9095 } 9096 if (svd->vpage != NULL) { 9097 vpage = &svd->vpage[seg_page(seg, a)]; 9098 } else { 9099 vpage = NULL; 9100 } 9101 ASSERT(anlock); 9102 anon_array_exit(&cookie); 9103 anlock = 0; 9104 pp = NULL; 9105 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0, 9106 vpage, &pp, 0, F_INVAL, rw, 1); 9107 if (error) { 9108 error = fc_decode(error); 9109 break; 9110 } 9111 anon_array_enter(amp, anon_index, &cookie); 9112 anlock = 1; 9113 ap = anon_get_ptr(amp->ahp, anon_index); 9114 if (ap == NULL || ap->an_refcnt != 1) { 9115 error = EFAULT; 9116 break; 9117 } 9118 } 9119 swap_xlate(ap, &vp, &off); 9120 pp = page_lookup_nowait(vp, off, SE_SHARED); 9121 if (pp == NULL) { 9122 error = EFAULT; 9123 break; 9124 } 9125 if (ap->an_pvp != NULL) { 9126 anon_swap_free(ap, pp); 9127 } 9128 /* 9129 * Unlock anon if this is the last slot in a large page. 9130 */ 9131 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) { 9132 ASSERT(anlock); 9133 anon_array_exit(&cookie); 9134 anlock = 0; 9135 } 9136 *pplist++ = pp; 9137 } 9138 if (anlock) { /* Ensure the lock is dropped */ 9139 anon_array_exit(&cookie); 9140 } 9141 ANON_LOCK_EXIT(&->a_rwlock); 9142 9143 if (a >= addr + len) { 9144 atomic_add_long((ulong_t *)&svd->softlockcnt, npages); 9145 if (pamp != NULL) { 9146 ASSERT(svd->type == MAP_SHARED); 9147 atomic_add_long((ulong_t *)&pamp->a_softlockcnt, 9148 npages); 9149 wlen = len; 9150 } 9151 if (sftlck_sbase) { 9152 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase); 9153 } 9154 if (sftlck_send) { 9155 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send); 9156 } 9157 if (use_pcache) { 9158 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl, 9159 rw, pflags, preclaim_callback); 9160 } 9161 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9162 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END, 9163 "segvn_pagelock: cache fill seg %p addr %p", seg, addr); 9164 return (0); 9165 } 9166 9167 pplist = pl; 9168 np = ((uintptr_t)(a - addr)) >> PAGESHIFT; 9169 while (np > (uint_t)0) { 9170 ASSERT(PAGE_LOCKED(*pplist)); 9171 page_unlock(*pplist); 9172 np--; 9173 pplist++; 9174 } 9175 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9176 out: 9177 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9178 *ppp = NULL; 9179 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END, 9180 "segvn_pagelock: cache miss seg %p addr %p", seg, addr); 9181 return (error); 9182 } 9183 9184 /* 9185 * purge any cached pages in the I/O page cache 9186 */ 9187 static void 9188 segvn_purge(struct seg *seg) 9189 { 9190 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9191 9192 /* 9193 * pcache is only used by pure anon segments. 9194 */ 9195 if (svd->amp == NULL || svd->vp != NULL) { 9196 return; 9197 } 9198 9199 /* 9200 * For MAP_SHARED segments non 0 segment's softlockcnt means 9201 * active IO is still in progress via this segment. So we only 9202 * purge MAP_SHARED segments when their softlockcnt is 0. 9203 */ 9204 if (svd->type == MAP_PRIVATE) { 9205 if (svd->softlockcnt) { 9206 seg_ppurge(seg, NULL, 0); 9207 } 9208 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) { 9209 seg_ppurge(seg, svd->amp, 0); 9210 } 9211 } 9212 9213 /* 9214 * If async argument is not 0 we are called from pcache async thread and don't 9215 * hold AS lock. 9216 */ 9217 9218 /*ARGSUSED*/ 9219 static int 9220 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9221 enum seg_rw rw, int async) 9222 { 9223 struct seg *seg = (struct seg *)ptag; 9224 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9225 pgcnt_t np, npages; 9226 struct page **pl; 9227 9228 npages = np = btop(len); 9229 ASSERT(npages); 9230 9231 ASSERT(svd->vp == NULL && svd->amp != NULL); 9232 ASSERT(svd->softlockcnt >= npages); 9233 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9234 9235 pl = pplist; 9236 9237 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9238 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9239 9240 while (np > (uint_t)0) { 9241 if (rw == S_WRITE) { 9242 hat_setrefmod(*pplist); 9243 } else { 9244 hat_setref(*pplist); 9245 } 9246 page_unlock(*pplist); 9247 np--; 9248 pplist++; 9249 } 9250 9251 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9252 9253 /* 9254 * If we are pcache async thread we don't hold AS lock. This means if 9255 * softlockcnt drops to 0 after the decrement below address space may 9256 * get freed. We can't allow it since after softlock derement to 0 we 9257 * still need to access as structure for possible wakeup of unmap 9258 * waiters. To prevent the disappearance of as we take this segment 9259 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to 9260 * make sure this routine completes before segment is freed. 9261 * 9262 * The second complication we have to deal with in async case is a 9263 * possibility of missed wake up of unmap wait thread. When we don't 9264 * hold as lock here we may take a_contents lock before unmap wait 9265 * thread that was first to see softlockcnt was still not 0. As a 9266 * result we'll fail to wake up an unmap wait thread. To avoid this 9267 * race we set nounmapwait flag in as structure if we drop softlockcnt 9268 * to 0 when we were called by pcache async thread. unmapwait thread 9269 * will not block if this flag is set. 9270 */ 9271 if (async) { 9272 mutex_enter(&svd->segfree_syncmtx); 9273 } 9274 9275 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) { 9276 if (async || AS_ISUNMAPWAIT(seg->s_as)) { 9277 mutex_enter(&seg->s_as->a_contents); 9278 if (async) { 9279 AS_SETNOUNMAPWAIT(seg->s_as); 9280 } 9281 if (AS_ISUNMAPWAIT(seg->s_as)) { 9282 AS_CLRUNMAPWAIT(seg->s_as); 9283 cv_broadcast(&seg->s_as->a_cv); 9284 } 9285 mutex_exit(&seg->s_as->a_contents); 9286 } 9287 } 9288 9289 if (async) { 9290 mutex_exit(&svd->segfree_syncmtx); 9291 } 9292 return (0); 9293 } 9294 9295 /*ARGSUSED*/ 9296 static int 9297 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist, 9298 enum seg_rw rw, int async) 9299 { 9300 amp_t *amp = (amp_t *)ptag; 9301 pgcnt_t np, npages; 9302 struct page **pl; 9303 9304 npages = np = btop(len); 9305 ASSERT(npages); 9306 ASSERT(amp->a_softlockcnt >= npages); 9307 9308 pl = pplist; 9309 9310 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST); 9311 ASSERT(!async || pl[np] == PCACHE_SHWLIST); 9312 9313 while (np > (uint_t)0) { 9314 if (rw == S_WRITE) { 9315 hat_setrefmod(*pplist); 9316 } else { 9317 hat_setref(*pplist); 9318 } 9319 page_unlock(*pplist); 9320 np--; 9321 pplist++; 9322 } 9323 9324 kmem_free(pl, sizeof (page_t *) * (npages + 1)); 9325 9326 /* 9327 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt 9328 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0 9329 * and anonmap_purge() acquires a_purgemtx. 9330 */ 9331 mutex_enter(&->a_purgemtx); 9332 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) && 9333 amp->a_purgewait) { 9334 amp->a_purgewait = 0; 9335 cv_broadcast(&->a_purgecv); 9336 } 9337 mutex_exit(&->a_purgemtx); 9338 return (0); 9339 } 9340 9341 /* 9342 * get a memory ID for an addr in a given segment 9343 * 9344 * XXX only creates PAGESIZE pages if anon slots are not initialized. 9345 * At fault time they will be relocated into larger pages. 9346 */ 9347 static int 9348 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp) 9349 { 9350 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9351 struct anon *ap = NULL; 9352 ulong_t anon_index; 9353 struct anon_map *amp; 9354 anon_sync_obj_t cookie; 9355 9356 if (svd->type == MAP_PRIVATE) { 9357 memidp->val[0] = (uintptr_t)seg->s_as; 9358 memidp->val[1] = (uintptr_t)addr; 9359 return (0); 9360 } 9361 9362 if (svd->type == MAP_SHARED) { 9363 if (svd->vp) { 9364 memidp->val[0] = (uintptr_t)svd->vp; 9365 memidp->val[1] = (u_longlong_t)svd->offset + 9366 (uintptr_t)(addr - seg->s_base); 9367 return (0); 9368 } else { 9369 9370 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER); 9371 if ((amp = svd->amp) != NULL) { 9372 anon_index = svd->anon_index + 9373 seg_page(seg, addr); 9374 } 9375 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 9376 9377 ASSERT(amp != NULL); 9378 9379 ANON_LOCK_ENTER(&->a_rwlock, RW_READER); 9380 anon_array_enter(amp, anon_index, &cookie); 9381 ap = anon_get_ptr(amp->ahp, anon_index); 9382 if (ap == NULL) { 9383 page_t *pp; 9384 9385 pp = anon_zero(seg, addr, &ap, svd->cred); 9386 if (pp == NULL) { 9387 anon_array_exit(&cookie); 9388 ANON_LOCK_EXIT(&->a_rwlock); 9389 return (ENOMEM); 9390 } 9391 ASSERT(anon_get_ptr(amp->ahp, anon_index) 9392 == NULL); 9393 (void) anon_set_ptr(amp->ahp, anon_index, 9394 ap, ANON_SLEEP); 9395 page_unlock(pp); 9396 } 9397 9398 anon_array_exit(&cookie); 9399 ANON_LOCK_EXIT(&->a_rwlock); 9400 9401 memidp->val[0] = (uintptr_t)ap; 9402 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET; 9403 return (0); 9404 } 9405 } 9406 return (EINVAL); 9407 } 9408 9409 static int 9410 sameprot(struct seg *seg, caddr_t a, size_t len) 9411 { 9412 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9413 struct vpage *vpage; 9414 spgcnt_t pages = btop(len); 9415 uint_t prot; 9416 9417 if (svd->pageprot == 0) 9418 return (1); 9419 9420 ASSERT(svd->vpage != NULL); 9421 9422 vpage = &svd->vpage[seg_page(seg, a)]; 9423 prot = VPP_PROT(vpage); 9424 vpage++; 9425 pages--; 9426 while (pages-- > 0) { 9427 if (prot != VPP_PROT(vpage)) 9428 return (0); 9429 vpage++; 9430 } 9431 return (1); 9432 } 9433 9434 /* 9435 * Get memory allocation policy info for specified address in given segment 9436 */ 9437 static lgrp_mem_policy_info_t * 9438 segvn_getpolicy(struct seg *seg, caddr_t addr) 9439 { 9440 struct anon_map *amp; 9441 ulong_t anon_index; 9442 lgrp_mem_policy_info_t *policy_info; 9443 struct segvn_data *svn_data; 9444 u_offset_t vn_off; 9445 vnode_t *vp; 9446 9447 ASSERT(seg != NULL); 9448 9449 svn_data = (struct segvn_data *)seg->s_data; 9450 if (svn_data == NULL) 9451 return (NULL); 9452 9453 /* 9454 * Get policy info for private or shared memory 9455 */ 9456 if (svn_data->type != MAP_SHARED) { 9457 if (svn_data->tr_state != SEGVN_TR_ON) { 9458 policy_info = &svn_data->policy_info; 9459 } else { 9460 policy_info = &svn_data->tr_policy_info; 9461 ASSERT(policy_info->mem_policy == 9462 LGRP_MEM_POLICY_NEXT_SEG); 9463 } 9464 } else { 9465 amp = svn_data->amp; 9466 anon_index = svn_data->anon_index + seg_page(seg, addr); 9467 vp = svn_data->vp; 9468 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base); 9469 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off); 9470 } 9471 9472 return (policy_info); 9473 } 9474 9475 /* 9476 * Bind text vnode segment to an amp. If we bind successfully mappings will be 9477 * established to per vnode mapping per lgroup amp pages instead of to vnode 9478 * pages. There's one amp per vnode text mapping per lgroup. Many processes 9479 * may share the same text replication amp. If a suitable amp doesn't already 9480 * exist in svntr hash table create a new one. We may fail to bind to amp if 9481 * segment is not eligible for text replication. Code below first checks for 9482 * these conditions. If binding is successful segment tr_state is set to on 9483 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and 9484 * svd->amp remains as NULL. 9485 */ 9486 static void 9487 segvn_textrepl(struct seg *seg) 9488 { 9489 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9490 vnode_t *vp = svd->vp; 9491 u_offset_t off = svd->offset; 9492 size_t size = seg->s_size; 9493 u_offset_t eoff = off + size; 9494 uint_t szc = seg->s_szc; 9495 ulong_t hash = SVNTR_HASH_FUNC(vp); 9496 svntr_t *svntrp; 9497 struct vattr va; 9498 proc_t *p = seg->s_as->a_proc; 9499 lgrp_id_t lgrp_id; 9500 lgrp_id_t olid; 9501 int first; 9502 struct anon_map *amp; 9503 9504 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9505 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9506 ASSERT(p != NULL); 9507 ASSERT(svd->tr_state == SEGVN_TR_INIT); 9508 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9509 ASSERT(svd->flags & MAP_TEXT); 9510 ASSERT(svd->type == MAP_PRIVATE); 9511 ASSERT(vp != NULL && svd->amp == NULL); 9512 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE)); 9513 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0); 9514 ASSERT(seg->s_as != &kas); 9515 ASSERT(off < eoff); 9516 ASSERT(svntr_hashtab != NULL); 9517 9518 /* 9519 * If numa optimizations are no longer desired bail out. 9520 */ 9521 if (!lgrp_optimizations()) { 9522 svd->tr_state = SEGVN_TR_OFF; 9523 return; 9524 } 9525 9526 /* 9527 * Avoid creating anon maps with size bigger than the file size. 9528 * If VOP_GETATTR() call fails bail out. 9529 */ 9530 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME; 9531 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) { 9532 svd->tr_state = SEGVN_TR_OFF; 9533 SEGVN_TR_ADDSTAT(gaerr); 9534 return; 9535 } 9536 if (btopr(va.va_size) < btopr(eoff)) { 9537 svd->tr_state = SEGVN_TR_OFF; 9538 SEGVN_TR_ADDSTAT(overmap); 9539 return; 9540 } 9541 9542 /* 9543 * VVMEXEC may not be set yet if exec() prefaults text segment. Set 9544 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED 9545 * mapping that checks if trcache for this vnode needs to be 9546 * invalidated can't miss us. 9547 */ 9548 if (!(vp->v_flag & VVMEXEC)) { 9549 mutex_enter(&vp->v_lock); 9550 vp->v_flag |= VVMEXEC; 9551 mutex_exit(&vp->v_lock); 9552 } 9553 mutex_enter(&svntr_hashtab[hash].tr_lock); 9554 /* 9555 * Bail out if potentially MAP_SHARED writable mappings exist to this 9556 * vnode. We don't want to use old file contents from existing 9557 * replicas if this mapping was established after the original file 9558 * was changed. 9559 */ 9560 if (vn_is_mapped(vp, V_WRITE)) { 9561 mutex_exit(&svntr_hashtab[hash].tr_lock); 9562 svd->tr_state = SEGVN_TR_OFF; 9563 SEGVN_TR_ADDSTAT(wrcnt); 9564 return; 9565 } 9566 svntrp = svntr_hashtab[hash].tr_head; 9567 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9568 ASSERT(svntrp->tr_refcnt != 0); 9569 if (svntrp->tr_vp != vp) { 9570 continue; 9571 } 9572 9573 /* 9574 * Bail out if the file or its attributes were changed after 9575 * this replication entry was created since we need to use the 9576 * latest file contents. Note that mtime test alone is not 9577 * sufficient because a user can explicitly change mtime via 9578 * utimes(2) interfaces back to the old value after modifiying 9579 * the file contents. To detect this case we also have to test 9580 * ctime which among other things records the time of the last 9581 * mtime change by utimes(2). ctime is not changed when the file 9582 * is only read or executed so we expect that typically existing 9583 * replication amp's can be used most of the time. 9584 */ 9585 if (!svntrp->tr_valid || 9586 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec || 9587 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec || 9588 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec || 9589 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) { 9590 mutex_exit(&svntr_hashtab[hash].tr_lock); 9591 svd->tr_state = SEGVN_TR_OFF; 9592 SEGVN_TR_ADDSTAT(stale); 9593 return; 9594 } 9595 /* 9596 * if off, eoff and szc match current segment we found the 9597 * existing entry we can use. 9598 */ 9599 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff && 9600 svntrp->tr_szc == szc) { 9601 break; 9602 } 9603 /* 9604 * Don't create different but overlapping in file offsets 9605 * entries to avoid replication of the same file pages more 9606 * than once per lgroup. 9607 */ 9608 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) || 9609 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) { 9610 mutex_exit(&svntr_hashtab[hash].tr_lock); 9611 svd->tr_state = SEGVN_TR_OFF; 9612 SEGVN_TR_ADDSTAT(overlap); 9613 return; 9614 } 9615 } 9616 /* 9617 * If we didn't find existing entry create a new one. 9618 */ 9619 if (svntrp == NULL) { 9620 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP); 9621 if (svntrp == NULL) { 9622 mutex_exit(&svntr_hashtab[hash].tr_lock); 9623 svd->tr_state = SEGVN_TR_OFF; 9624 SEGVN_TR_ADDSTAT(nokmem); 9625 return; 9626 } 9627 #ifdef DEBUG 9628 { 9629 lgrp_id_t i; 9630 for (i = 0; i < NLGRPS_MAX; i++) { 9631 ASSERT(svntrp->tr_amp[i] == NULL); 9632 } 9633 } 9634 #endif /* DEBUG */ 9635 svntrp->tr_vp = vp; 9636 svntrp->tr_off = off; 9637 svntrp->tr_eoff = eoff; 9638 svntrp->tr_szc = szc; 9639 svntrp->tr_valid = 1; 9640 svntrp->tr_mtime = va.va_mtime; 9641 svntrp->tr_ctime = va.va_ctime; 9642 svntrp->tr_refcnt = 0; 9643 svntrp->tr_next = svntr_hashtab[hash].tr_head; 9644 svntr_hashtab[hash].tr_head = svntrp; 9645 } 9646 first = 1; 9647 again: 9648 /* 9649 * We want to pick a replica with pages on main thread's (t_tid = 1, 9650 * aka T1) lgrp. Currently text replication is only optimized for 9651 * workloads that either have all threads of a process on the same 9652 * lgrp or execute their large text primarily on main thread. 9653 */ 9654 lgrp_id = p->p_t1_lgrpid; 9655 if (lgrp_id == LGRP_NONE) { 9656 /* 9657 * In case exec() prefaults text on non main thread use 9658 * current thread lgrpid. It will become main thread anyway 9659 * soon. 9660 */ 9661 lgrp_id = lgrp_home_id(curthread); 9662 } 9663 /* 9664 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise 9665 * just set it to NLGRPS_MAX if it's different from current process T1 9666 * home lgrp. p_tr_lgrpid is used to detect if process uses text 9667 * replication and T1 new home is different from lgrp used for text 9668 * replication. When this happens asyncronous segvn thread rechecks if 9669 * segments should change lgrps used for text replication. If we fail 9670 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX 9671 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id 9672 * we want to use. We don't need to use cas in this case because 9673 * another thread that races in between our non atomic check and set 9674 * may only change p_tr_lgrpid to NLGRPS_MAX at this point. 9675 */ 9676 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9677 olid = p->p_tr_lgrpid; 9678 if (lgrp_id != olid && olid != NLGRPS_MAX) { 9679 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX; 9680 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) != 9681 olid) { 9682 olid = p->p_tr_lgrpid; 9683 ASSERT(olid != LGRP_NONE); 9684 if (olid != lgrp_id && olid != NLGRPS_MAX) { 9685 p->p_tr_lgrpid = NLGRPS_MAX; 9686 } 9687 } 9688 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9689 membar_producer(); 9690 /* 9691 * lgrp_move_thread() won't schedule async recheck after 9692 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not 9693 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid 9694 * is not LGRP_NONE. 9695 */ 9696 if (first && p->p_t1_lgrpid != LGRP_NONE && 9697 p->p_t1_lgrpid != lgrp_id) { 9698 first = 0; 9699 goto again; 9700 } 9701 } 9702 /* 9703 * If no amp was created yet for lgrp_id create a new one as long as 9704 * we have enough memory to afford it. 9705 */ 9706 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) { 9707 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 9708 if (trmem > segvn_textrepl_max_bytes) { 9709 SEGVN_TR_ADDSTAT(normem); 9710 goto fail; 9711 } 9712 if (anon_try_resv_zone(size, NULL) == 0) { 9713 SEGVN_TR_ADDSTAT(noanon); 9714 goto fail; 9715 } 9716 amp = anonmap_alloc(size, size, ANON_NOSLEEP); 9717 if (amp == NULL) { 9718 anon_unresv_zone(size, NULL); 9719 SEGVN_TR_ADDSTAT(nokmem); 9720 goto fail; 9721 } 9722 ASSERT(amp->refcnt == 1); 9723 amp->a_szc = szc; 9724 svntrp->tr_amp[lgrp_id] = amp; 9725 SEGVN_TR_ADDSTAT(newamp); 9726 } 9727 svntrp->tr_refcnt++; 9728 ASSERT(svd->svn_trnext == NULL); 9729 ASSERT(svd->svn_trprev == NULL); 9730 svd->svn_trnext = svntrp->tr_svnhead; 9731 svd->svn_trprev = NULL; 9732 if (svntrp->tr_svnhead != NULL) { 9733 svntrp->tr_svnhead->svn_trprev = svd; 9734 } 9735 svntrp->tr_svnhead = svd; 9736 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size); 9737 ASSERT(amp->refcnt >= 1); 9738 svd->amp = amp; 9739 svd->anon_index = 0; 9740 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG; 9741 svd->tr_policy_info.mem_lgrpid = lgrp_id; 9742 svd->tr_state = SEGVN_TR_ON; 9743 mutex_exit(&svntr_hashtab[hash].tr_lock); 9744 SEGVN_TR_ADDSTAT(repl); 9745 return; 9746 fail: 9747 ASSERT(segvn_textrepl_bytes >= size); 9748 atomic_add_long(&segvn_textrepl_bytes, -size); 9749 ASSERT(svntrp != NULL); 9750 ASSERT(svntrp->tr_amp[lgrp_id] == NULL); 9751 if (svntrp->tr_refcnt == 0) { 9752 ASSERT(svntrp == svntr_hashtab[hash].tr_head); 9753 svntr_hashtab[hash].tr_head = svntrp->tr_next; 9754 mutex_exit(&svntr_hashtab[hash].tr_lock); 9755 kmem_cache_free(svntr_cache, svntrp); 9756 } else { 9757 mutex_exit(&svntr_hashtab[hash].tr_lock); 9758 } 9759 svd->tr_state = SEGVN_TR_OFF; 9760 } 9761 9762 /* 9763 * Convert seg back to regular vnode mapping seg by unbinding it from its text 9764 * replication amp. This routine is most typically called when segment is 9765 * unmapped but can also be called when segment no longer qualifies for text 9766 * replication (e.g. due to protection changes). If unload_unmap is set use 9767 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of 9768 * svntr free all its anon maps and remove it from the hash table. 9769 */ 9770 static void 9771 segvn_textunrepl(struct seg *seg, int unload_unmap) 9772 { 9773 struct segvn_data *svd = (struct segvn_data *)seg->s_data; 9774 vnode_t *vp = svd->vp; 9775 u_offset_t off = svd->offset; 9776 size_t size = seg->s_size; 9777 u_offset_t eoff = off + size; 9778 uint_t szc = seg->s_szc; 9779 ulong_t hash = SVNTR_HASH_FUNC(vp); 9780 svntr_t *svntrp; 9781 svntr_t **prv_svntrp; 9782 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid; 9783 lgrp_id_t i; 9784 9785 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock)); 9786 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) || 9787 SEGVN_WRITE_HELD(seg->s_as, &svd->lock)); 9788 ASSERT(svd->tr_state == SEGVN_TR_ON); 9789 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9790 ASSERT(svd->amp != NULL); 9791 ASSERT(svd->amp->refcnt >= 1); 9792 ASSERT(svd->anon_index == 0); 9793 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX); 9794 ASSERT(svntr_hashtab != NULL); 9795 9796 mutex_enter(&svntr_hashtab[hash].tr_lock); 9797 prv_svntrp = &svntr_hashtab[hash].tr_head; 9798 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) { 9799 ASSERT(svntrp->tr_refcnt != 0); 9800 if (svntrp->tr_vp == vp && svntrp->tr_off == off && 9801 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) { 9802 break; 9803 } 9804 } 9805 if (svntrp == NULL) { 9806 panic("segvn_textunrepl: svntr record not found"); 9807 } 9808 if (svntrp->tr_amp[lgrp_id] != svd->amp) { 9809 panic("segvn_textunrepl: amp mismatch"); 9810 } 9811 svd->tr_state = SEGVN_TR_OFF; 9812 svd->amp = NULL; 9813 if (svd->svn_trprev == NULL) { 9814 ASSERT(svntrp->tr_svnhead == svd); 9815 svntrp->tr_svnhead = svd->svn_trnext; 9816 if (svntrp->tr_svnhead != NULL) { 9817 svntrp->tr_svnhead->svn_trprev = NULL; 9818 } 9819 svd->svn_trnext = NULL; 9820 } else { 9821 svd->svn_trprev->svn_trnext = svd->svn_trnext; 9822 if (svd->svn_trnext != NULL) { 9823 svd->svn_trnext->svn_trprev = svd->svn_trprev; 9824 svd->svn_trnext = NULL; 9825 } 9826 svd->svn_trprev = NULL; 9827 } 9828 if (--svntrp->tr_refcnt) { 9829 mutex_exit(&svntr_hashtab[hash].tr_lock); 9830 goto done; 9831 } 9832 *prv_svntrp = svntrp->tr_next; 9833 mutex_exit(&svntr_hashtab[hash].tr_lock); 9834 for (i = 0; i < NLGRPS_MAX; i++) { 9835 struct anon_map *amp = svntrp->tr_amp[i]; 9836 if (amp == NULL) { 9837 continue; 9838 } 9839 ASSERT(amp->refcnt == 1); 9840 ASSERT(amp->swresv == size); 9841 ASSERT(amp->size == size); 9842 ASSERT(amp->a_szc == szc); 9843 if (amp->a_szc != 0) { 9844 anon_free_pages(amp->ahp, 0, size, szc); 9845 } else { 9846 anon_free(amp->ahp, 0, size); 9847 } 9848 svntrp->tr_amp[i] = NULL; 9849 ASSERT(segvn_textrepl_bytes >= size); 9850 atomic_add_long(&segvn_textrepl_bytes, -size); 9851 anon_unresv_zone(amp->swresv, NULL); 9852 amp->refcnt = 0; 9853 anonmap_free(amp); 9854 } 9855 kmem_cache_free(svntr_cache, svntrp); 9856 done: 9857 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size, 9858 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL); 9859 } 9860 9861 /* 9862 * This is called when a MAP_SHARED writable mapping is created to a vnode 9863 * that is currently used for execution (VVMEXEC flag is set). In this case we 9864 * need to prevent further use of existing replicas. 9865 */ 9866 static void 9867 segvn_inval_trcache(vnode_t *vp) 9868 { 9869 ulong_t hash = SVNTR_HASH_FUNC(vp); 9870 svntr_t *svntrp; 9871 9872 ASSERT(vp->v_flag & VVMEXEC); 9873 9874 if (svntr_hashtab == NULL) { 9875 return; 9876 } 9877 9878 mutex_enter(&svntr_hashtab[hash].tr_lock); 9879 svntrp = svntr_hashtab[hash].tr_head; 9880 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9881 ASSERT(svntrp->tr_refcnt != 0); 9882 if (svntrp->tr_vp == vp && svntrp->tr_valid) { 9883 svntrp->tr_valid = 0; 9884 } 9885 } 9886 mutex_exit(&svntr_hashtab[hash].tr_lock); 9887 } 9888 9889 static void 9890 segvn_trasync_thread(void) 9891 { 9892 callb_cpr_t cpr_info; 9893 kmutex_t cpr_lock; /* just for CPR stuff */ 9894 9895 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 9896 9897 CALLB_CPR_INIT(&cpr_info, &cpr_lock, 9898 callb_generic_cpr, "segvn_async"); 9899 9900 if (segvn_update_textrepl_interval == 0) { 9901 segvn_update_textrepl_interval = segvn_update_tr_time * hz; 9902 } else { 9903 segvn_update_textrepl_interval *= hz; 9904 } 9905 (void) timeout(segvn_trupdate_wakeup, NULL, 9906 segvn_update_textrepl_interval); 9907 9908 for (;;) { 9909 mutex_enter(&cpr_lock); 9910 CALLB_CPR_SAFE_BEGIN(&cpr_info); 9911 mutex_exit(&cpr_lock); 9912 sema_p(&segvn_trasync_sem); 9913 mutex_enter(&cpr_lock); 9914 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 9915 mutex_exit(&cpr_lock); 9916 segvn_trupdate(); 9917 } 9918 } 9919 9920 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0; 9921 9922 static void 9923 segvn_trupdate_wakeup(void *dummy) 9924 { 9925 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations(); 9926 9927 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) { 9928 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs; 9929 sema_v(&segvn_trasync_sem); 9930 } 9931 9932 if (!segvn_disable_textrepl_update && 9933 segvn_update_textrepl_interval != 0) { 9934 (void) timeout(segvn_trupdate_wakeup, dummy, 9935 segvn_update_textrepl_interval); 9936 } 9937 } 9938 9939 static void 9940 segvn_trupdate(void) 9941 { 9942 ulong_t hash; 9943 svntr_t *svntrp; 9944 segvn_data_t *svd; 9945 9946 ASSERT(svntr_hashtab != NULL); 9947 9948 for (hash = 0; hash < svntr_hashtab_sz; hash++) { 9949 mutex_enter(&svntr_hashtab[hash].tr_lock); 9950 svntrp = svntr_hashtab[hash].tr_head; 9951 for (; svntrp != NULL; svntrp = svntrp->tr_next) { 9952 ASSERT(svntrp->tr_refcnt != 0); 9953 svd = svntrp->tr_svnhead; 9954 for (; svd != NULL; svd = svd->svn_trnext) { 9955 segvn_trupdate_seg(svd->seg, svd, svntrp, 9956 hash); 9957 } 9958 } 9959 mutex_exit(&svntr_hashtab[hash].tr_lock); 9960 } 9961 } 9962 9963 static void 9964 segvn_trupdate_seg(struct seg *seg, 9965 segvn_data_t *svd, 9966 svntr_t *svntrp, 9967 ulong_t hash) 9968 { 9969 proc_t *p; 9970 lgrp_id_t lgrp_id; 9971 struct as *as; 9972 size_t size; 9973 struct anon_map *amp; 9974 9975 ASSERT(svd->vp != NULL); 9976 ASSERT(svd->vp == svntrp->tr_vp); 9977 ASSERT(svd->offset == svntrp->tr_off); 9978 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff); 9979 ASSERT(seg != NULL); 9980 ASSERT(svd->seg == seg); 9981 ASSERT(seg->s_data == (void *)svd); 9982 ASSERT(seg->s_szc == svntrp->tr_szc); 9983 ASSERT(svd->tr_state == SEGVN_TR_ON); 9984 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie)); 9985 ASSERT(svd->amp != NULL); 9986 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 9987 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE); 9988 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX); 9989 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp); 9990 ASSERT(svntrp->tr_refcnt != 0); 9991 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock)); 9992 9993 as = seg->s_as; 9994 ASSERT(as != NULL && as != &kas); 9995 p = as->a_proc; 9996 ASSERT(p != NULL); 9997 ASSERT(p->p_tr_lgrpid != LGRP_NONE); 9998 lgrp_id = p->p_t1_lgrpid; 9999 if (lgrp_id == LGRP_NONE) { 10000 return; 10001 } 10002 ASSERT(lgrp_id < NLGRPS_MAX); 10003 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) { 10004 return; 10005 } 10006 10007 /* 10008 * Use tryenter locking since we are locking as/seg and svntr hash 10009 * lock in reverse from syncrounous thread order. 10010 */ 10011 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) { 10012 SEGVN_TR_ADDSTAT(nolock); 10013 if (segvn_lgrp_trthr_migrs_snpsht) { 10014 segvn_lgrp_trthr_migrs_snpsht = 0; 10015 } 10016 return; 10017 } 10018 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) { 10019 AS_LOCK_EXIT(as, &as->a_lock); 10020 SEGVN_TR_ADDSTAT(nolock); 10021 if (segvn_lgrp_trthr_migrs_snpsht) { 10022 segvn_lgrp_trthr_migrs_snpsht = 0; 10023 } 10024 return; 10025 } 10026 size = seg->s_size; 10027 if (svntrp->tr_amp[lgrp_id] == NULL) { 10028 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size); 10029 if (trmem > segvn_textrepl_max_bytes) { 10030 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10031 AS_LOCK_EXIT(as, &as->a_lock); 10032 atomic_add_long(&segvn_textrepl_bytes, -size); 10033 SEGVN_TR_ADDSTAT(normem); 10034 return; 10035 } 10036 if (anon_try_resv_zone(size, NULL) == 0) { 10037 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10038 AS_LOCK_EXIT(as, &as->a_lock); 10039 atomic_add_long(&segvn_textrepl_bytes, -size); 10040 SEGVN_TR_ADDSTAT(noanon); 10041 return; 10042 } 10043 amp = anonmap_alloc(size, size, KM_NOSLEEP); 10044 if (amp == NULL) { 10045 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10046 AS_LOCK_EXIT(as, &as->a_lock); 10047 atomic_add_long(&segvn_textrepl_bytes, -size); 10048 anon_unresv_zone(size, NULL); 10049 SEGVN_TR_ADDSTAT(nokmem); 10050 return; 10051 } 10052 ASSERT(amp->refcnt == 1); 10053 amp->a_szc = seg->s_szc; 10054 svntrp->tr_amp[lgrp_id] = amp; 10055 } 10056 /* 10057 * We don't need to drop the bucket lock but here we give other 10058 * threads a chance. svntr and svd can't be unlinked as long as 10059 * segment lock is held as a writer and AS held as well. After we 10060 * retake bucket lock we'll continue from where we left. We'll be able 10061 * to reach the end of either list since new entries are always added 10062 * to the beginning of the lists. 10063 */ 10064 mutex_exit(&svntr_hashtab[hash].tr_lock); 10065 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL); 10066 mutex_enter(&svntr_hashtab[hash].tr_lock); 10067 10068 ASSERT(svd->tr_state == SEGVN_TR_ON); 10069 ASSERT(svd->amp != NULL); 10070 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG); 10071 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id); 10072 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]); 10073 10074 svd->tr_policy_info.mem_lgrpid = lgrp_id; 10075 svd->amp = svntrp->tr_amp[lgrp_id]; 10076 p->p_tr_lgrpid = NLGRPS_MAX; 10077 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock); 10078 AS_LOCK_EXIT(as, &as->a_lock); 10079 10080 ASSERT(svntrp->tr_refcnt != 0); 10081 ASSERT(svd->vp == svntrp->tr_vp); 10082 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id); 10083 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]); 10084 ASSERT(svd->seg == seg); 10085 ASSERT(svd->tr_state == SEGVN_TR_ON); 10086 10087 SEGVN_TR_ADDSTAT(asyncrepl); 10088 }