Print this page
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_vn.c
+++ new/usr/src/uts/common/vm/seg_vn.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2015, Joyent, Inc. All rights reserved.
24 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 32 * The Regents of the University of California
33 33 * All Rights Reserved
34 34 *
35 35 * University Acknowledgment- Portions of this document are derived from
36 36 * software developed by the University of California, Berkeley, and its
37 37 * contributors.
38 38 */
39 39
40 40 /*
41 41 * VM - shared or copy-on-write from a vnode/anonymous memory.
42 42 */
43 43
44 44 #include <sys/types.h>
45 45 #include <sys/param.h>
46 46 #include <sys/t_lock.h>
47 47 #include <sys/errno.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/mman.h>
50 50 #include <sys/debug.h>
51 51 #include <sys/cred.h>
52 52 #include <sys/vmsystm.h>
53 53 #include <sys/tuneable.h>
54 54 #include <sys/bitmap.h>
55 55 #include <sys/swap.h>
56 56 #include <sys/kmem.h>
57 57 #include <sys/sysmacros.h>
58 58 #include <sys/vtrace.h>
59 59 #include <sys/cmn_err.h>
60 60 #include <sys/callb.h>
61 61 #include <sys/vm.h>
62 62 #include <sys/dumphdr.h>
63 63 #include <sys/lgrp.h>
64 64
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_vn.h>
69 69 #include <vm/pvn.h>
70 70 #include <vm/anon.h>
71 71 #include <vm/page.h>
72 72 #include <vm/vpage.h>
73 73 #include <sys/proc.h>
74 74 #include <sys/task.h>
75 75 #include <sys/project.h>
76 76 #include <sys/zone.h>
77 77 #include <sys/shm_impl.h>
78 78
79 79 /*
80 80 * segvn_fault needs a temporary page list array. To avoid calling kmem all
81 81 * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if
82 82 * it can. In the rare case when this page list is not large enough, it
83 83 * goes and gets a large enough array from kmem.
84 84 *
85 85 * This small page list array covers either 8 pages or 64kB worth of pages -
86 86 * whichever is smaller.
87 87 */
88 88 #define PVN_MAX_GETPAGE_SZ 0x10000
89 89 #define PVN_MAX_GETPAGE_NUM 0x8
90 90
91 91 #if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE
92 92 #define PVN_GETPAGE_SZ ptob(PVN_MAX_GETPAGE_NUM)
93 93 #define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM
94 94 #else
95 95 #define PVN_GETPAGE_SZ PVN_MAX_GETPAGE_SZ
96 96 #define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ)
97 97 #endif
98 98
99 99 /*
100 100 * Private seg op routines.
101 101 */
102 102 static int segvn_dup(struct seg *seg, struct seg *newseg);
103 103 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len);
104 104 static void segvn_free(struct seg *seg);
105 105 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg,
106 106 caddr_t addr, size_t len, enum fault_type type,
107 107 enum seg_rw rw);
108 108 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr);
109 109 static int segvn_setprot(struct seg *seg, caddr_t addr,
110 110 size_t len, uint_t prot);
111 111 static int segvn_checkprot(struct seg *seg, caddr_t addr,
112 112 size_t len, uint_t prot);
113 113 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
114 114 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len,
115 115 int attr, uint_t flags);
116 116 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len,
117 117 char *vec);
118 118 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
119 119 int attr, int op, ulong_t *lockmap, size_t pos);
120 120 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len,
121 121 uint_t *protv);
122 122 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr);
123 123 static int segvn_gettype(struct seg *seg, caddr_t addr);
124 124 static int segvn_getvp(struct seg *seg, caddr_t addr,
125 125 struct vnode **vpp);
↓ open down ↓ |
125 lines elided |
↑ open up ↑ |
126 126 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len,
127 127 uint_t behav);
128 128 static void segvn_dump(struct seg *seg);
129 129 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len,
130 130 struct page ***ppp, enum lock_type type, enum seg_rw rw);
131 131 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len,
132 132 uint_t szc);
133 133 static int segvn_getmemid(struct seg *seg, caddr_t addr,
134 134 memid_t *memidp);
135 135 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t);
136 -static int segvn_capable(struct seg *seg, segcapability_t capable);
137 136 static int segvn_inherit(struct seg *, caddr_t, size_t, uint_t);
138 137
139 138 struct seg_ops segvn_ops = {
140 139 .dup = segvn_dup,
141 140 .unmap = segvn_unmap,
142 141 .free = segvn_free,
143 142 .fault = segvn_fault,
144 143 .faulta = segvn_faulta,
145 144 .setprot = segvn_setprot,
146 145 .checkprot = segvn_checkprot,
147 146 .kluster = segvn_kluster,
148 147 .sync = segvn_sync,
149 148 .incore = segvn_incore,
150 149 .lockop = segvn_lockop,
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
151 150 .getprot = segvn_getprot,
152 151 .getoffset = segvn_getoffset,
153 152 .gettype = segvn_gettype,
154 153 .getvp = segvn_getvp,
155 154 .advise = segvn_advise,
156 155 .dump = segvn_dump,
157 156 .pagelock = segvn_pagelock,
158 157 .setpagesize = segvn_setpagesize,
159 158 .getmemid = segvn_getmemid,
160 159 .getpolicy = segvn_getpolicy,
161 - .capable = segvn_capable,
162 160 .inherit = segvn_inherit,
163 161 };
164 162
165 163 /*
166 164 * Common zfod structures, provided as a shorthand for others to use.
167 165 */
168 166 static segvn_crargs_t zfod_segvn_crargs =
169 167 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
170 168 static segvn_crargs_t kzfod_segvn_crargs =
171 169 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER,
172 170 PROT_ALL & ~PROT_USER);
173 171 static segvn_crargs_t stack_noexec_crargs =
174 172 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL);
175 173
176 174 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */
177 175 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */
178 176 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */
179 177 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */
180 178
181 179 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
182 180
183 181 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */
184 182
185 183 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */
186 184 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */
187 185 uint_t segvn_pglock_comb_bshift;
188 186 size_t segvn_pglock_comb_palign;
189 187
190 188 static int segvn_concat(struct seg *, struct seg *, int);
191 189 static int segvn_extend_prev(struct seg *, struct seg *,
192 190 struct segvn_crargs *, size_t);
193 191 static int segvn_extend_next(struct seg *, struct seg *,
194 192 struct segvn_crargs *, size_t);
195 193 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw);
196 194 static void segvn_pagelist_rele(page_t **);
197 195 static void segvn_setvnode_mpss(vnode_t *);
198 196 static void segvn_relocate_pages(page_t **, page_t *);
199 197 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *);
200 198 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t,
201 199 uint_t, page_t **, page_t **, uint_t *, int *);
202 200 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t,
203 201 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
204 202 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t,
205 203 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
206 204 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t,
207 205 u_offset_t, struct vpage *, page_t **, uint_t,
208 206 enum fault_type, enum seg_rw, int);
209 207 static void segvn_vpage(struct seg *);
210 208 static size_t segvn_count_swap_by_vpages(struct seg *);
211 209
212 210 static void segvn_purge(struct seg *seg);
213 211 static int segvn_reclaim(void *, caddr_t, size_t, struct page **,
214 212 enum seg_rw, int);
215 213 static int shamp_reclaim(void *, caddr_t, size_t, struct page **,
216 214 enum seg_rw, int);
217 215
218 216 static int sameprot(struct seg *, caddr_t, size_t);
219 217
220 218 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t);
221 219 static int segvn_clrszc(struct seg *);
222 220 static struct seg *segvn_split_seg(struct seg *, caddr_t);
223 221 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t,
224 222 ulong_t, uint_t);
225 223
226 224 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t,
227 225 size_t, void *, u_offset_t);
228 226
229 227 static struct kmem_cache *segvn_cache;
230 228 static struct kmem_cache **segvn_szc_cache;
231 229
232 230 #ifdef VM_STATS
233 231 static struct segvnvmstats_str {
234 232 ulong_t fill_vp_pages[31];
235 233 ulong_t fltvnpages[49];
236 234 ulong_t fullszcpages[10];
237 235 ulong_t relocatepages[3];
238 236 ulong_t fltanpages[17];
239 237 ulong_t pagelock[2];
240 238 ulong_t demoterange[3];
241 239 } segvnvmstats;
242 240 #endif /* VM_STATS */
243 241
244 242 #define SDR_RANGE 1 /* demote entire range */
245 243 #define SDR_END 2 /* demote non aligned ends only */
246 244
247 245 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
248 246 if ((len) != 0) { \
249 247 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
250 248 ASSERT(lpgaddr >= (seg)->s_base); \
251 249 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
252 250 (len)), pgsz); \
253 251 ASSERT(lpgeaddr > lpgaddr); \
254 252 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
255 253 } else { \
256 254 lpgeaddr = lpgaddr = (addr); \
257 255 } \
258 256 }
259 257
260 258 /*ARGSUSED*/
261 259 static int
262 260 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags)
263 261 {
264 262 struct segvn_data *svd = buf;
265 263
266 264 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
267 265 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
268 266 svd->svn_trnext = svd->svn_trprev = NULL;
269 267 return (0);
270 268 }
271 269
272 270 /*ARGSUSED1*/
273 271 static void
274 272 segvn_cache_destructor(void *buf, void *cdrarg)
275 273 {
276 274 struct segvn_data *svd = buf;
277 275
278 276 rw_destroy(&svd->lock);
279 277 mutex_destroy(&svd->segfree_syncmtx);
280 278 }
281 279
282 280 /*ARGSUSED*/
283 281 static int
284 282 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags)
285 283 {
286 284 bzero(buf, sizeof (svntr_t));
287 285 return (0);
288 286 }
289 287
290 288 /*
291 289 * Patching this variable to non-zero allows the system to run with
292 290 * stacks marked as "not executable". It's a bit of a kludge, but is
293 291 * provided as a tweakable for platforms that export those ABIs
294 292 * (e.g. sparc V8) that have executable stacks enabled by default.
295 293 * There are also some restrictions for platforms that don't actually
296 294 * implement 'noexec' protections.
297 295 *
298 296 * Once enabled, the system is (therefore) unable to provide a fully
299 297 * ABI-compliant execution environment, though practically speaking,
300 298 * most everything works. The exceptions are generally some interpreters
301 299 * and debuggers that create executable code on the stack and jump
302 300 * into it (without explicitly mprotecting the address range to include
303 301 * PROT_EXEC).
304 302 *
305 303 * One important class of applications that are disabled are those
306 304 * that have been transformed into malicious agents using one of the
307 305 * numerous "buffer overflow" attacks. See 4007890.
308 306 */
309 307 int noexec_user_stack = 0;
310 308 int noexec_user_stack_log = 1;
311 309
312 310 int segvn_lpg_disable = 0;
313 311 uint_t segvn_maxpgszc = 0;
314 312
315 313 ulong_t segvn_vmpss_clrszc_cnt;
316 314 ulong_t segvn_vmpss_clrszc_err;
317 315 ulong_t segvn_fltvnpages_clrszc_cnt;
318 316 ulong_t segvn_fltvnpages_clrszc_err;
319 317 ulong_t segvn_setpgsz_align_err;
320 318 ulong_t segvn_setpgsz_anon_align_err;
321 319 ulong_t segvn_setpgsz_getattr_err;
322 320 ulong_t segvn_setpgsz_eof_err;
323 321 ulong_t segvn_faultvnmpss_align_err1;
324 322 ulong_t segvn_faultvnmpss_align_err2;
325 323 ulong_t segvn_faultvnmpss_align_err3;
326 324 ulong_t segvn_faultvnmpss_align_err4;
327 325 ulong_t segvn_faultvnmpss_align_err5;
328 326 ulong_t segvn_vmpss_pageio_deadlk_err;
329 327
330 328 int segvn_use_regions = 1;
331 329
332 330 /*
333 331 * Segvn supports text replication optimization for NUMA platforms. Text
334 332 * replica's are represented by anon maps (amp). There's one amp per text file
335 333 * region per lgroup. A process chooses the amp for each of its text mappings
336 334 * based on the lgroup assignment of its main thread (t_tid = 1). All
337 335 * processes that want a replica on a particular lgroup for the same text file
338 336 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
339 337 * with vp,off,size,szc used as a key. Text replication segments are read only
340 338 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
341 339 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
342 340 * pages. Replication amp is assigned to a segment when it gets its first
343 341 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
344 342 * rechecks periodically if the process still maps an amp local to the main
345 343 * thread. If not async thread forces process to remap to an amp in the new
346 344 * home lgroup of the main thread. Current text replication implementation
347 345 * only provides the benefit to workloads that do most of their work in the
348 346 * main thread of a process or all the threads of a process run in the same
349 347 * lgroup. To extend text replication benefit to different types of
350 348 * multithreaded workloads further work would be needed in the hat layer to
351 349 * allow the same virtual address in the same hat to simultaneously map
352 350 * different physical addresses (i.e. page table replication would be needed
353 351 * for x86).
354 352 *
355 353 * amp pages are used instead of vnode pages as long as segment has a very
356 354 * simple life cycle. It's created via segvn_create(), handles S_EXEC
357 355 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
358 356 * happens such as protection is changed, real COW fault happens, pagesize is
359 357 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
360 358 * text replication by converting the segment back to vnode only segment
361 359 * (unmap segment's address range and set svd->amp to NULL).
362 360 *
363 361 * The original file can be changed after amp is inserted into
364 362 * svntr_hashtab. Processes that are launched after the file is already
365 363 * changed can't use the replica's created prior to the file change. To
366 364 * implement this functionality hash entries are timestamped. Replica's can
367 365 * only be used if current file modification time is the same as the timestamp
368 366 * saved when hash entry was created. However just timestamps alone are not
369 367 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
370 368 * deal with file changes via MAP_SHARED mappings differently. When writable
371 369 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
372 370 * existing replica's for this vnode as not usable for future text
373 371 * mappings. And we don't create new replica's for files that currently have
374 372 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
375 373 * true).
376 374 */
377 375
378 376 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
379 377 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR;
380 378
381 379 static ulong_t svntr_hashtab_sz = 512;
382 380 static svntr_bucket_t *svntr_hashtab = NULL;
383 381 static struct kmem_cache *svntr_cache;
384 382 static svntr_stats_t *segvn_textrepl_stats;
385 383 static ksema_t segvn_trasync_sem;
386 384
387 385 int segvn_disable_textrepl = 1;
388 386 size_t textrepl_size_thresh = (size_t)-1;
389 387 size_t segvn_textrepl_bytes = 0;
390 388 size_t segvn_textrepl_max_bytes = 0;
391 389 clock_t segvn_update_textrepl_interval = 0;
392 390 int segvn_update_tr_time = 10;
393 391 int segvn_disable_textrepl_update = 0;
394 392
395 393 static void segvn_textrepl(struct seg *);
396 394 static void segvn_textunrepl(struct seg *, int);
397 395 static void segvn_inval_trcache(vnode_t *);
398 396 static void segvn_trasync_thread(void);
399 397 static void segvn_trupdate_wakeup(void *);
400 398 static void segvn_trupdate(void);
401 399 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *,
402 400 ulong_t);
403 401
404 402 /*
405 403 * Initialize segvn data structures
406 404 */
407 405 void
408 406 segvn_init(void)
409 407 {
410 408 uint_t maxszc;
411 409 uint_t szc;
412 410 size_t pgsz;
413 411
414 412 segvn_cache = kmem_cache_create("segvn_cache",
415 413 sizeof (struct segvn_data), 0,
416 414 segvn_cache_constructor, segvn_cache_destructor, NULL,
417 415 NULL, NULL, 0);
418 416
419 417 if (segvn_lpg_disable == 0) {
420 418 szc = maxszc = page_num_pagesizes() - 1;
421 419 if (szc == 0) {
422 420 segvn_lpg_disable = 1;
423 421 }
424 422 if (page_get_pagesize(0) != PAGESIZE) {
425 423 panic("segvn_init: bad szc 0");
426 424 /*NOTREACHED*/
427 425 }
428 426 while (szc != 0) {
429 427 pgsz = page_get_pagesize(szc);
430 428 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) {
431 429 panic("segvn_init: bad szc %d", szc);
432 430 /*NOTREACHED*/
433 431 }
434 432 szc--;
435 433 }
436 434 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc)
437 435 segvn_maxpgszc = maxszc;
438 436 }
439 437
440 438 if (segvn_maxpgszc) {
441 439 segvn_szc_cache = (struct kmem_cache **)kmem_alloc(
442 440 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *),
443 441 KM_SLEEP);
444 442 }
445 443
446 444 for (szc = 1; szc <= segvn_maxpgszc; szc++) {
447 445 char str[32];
448 446
449 447 (void) sprintf(str, "segvn_szc_cache%d", szc);
450 448 segvn_szc_cache[szc] = kmem_cache_create(str,
451 449 page_get_pagecnt(szc) * sizeof (page_t *), 0,
452 450 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
453 451 }
454 452
455 453
456 454 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL))
457 455 segvn_use_regions = 0;
458 456
459 457 /*
460 458 * For now shared regions and text replication segvn support
461 459 * are mutually exclusive. This is acceptable because
462 460 * currently significant benefit from text replication was
463 461 * only observed on AMD64 NUMA platforms (due to relatively
464 462 * small L2$ size) and currently we don't support shared
465 463 * regions on x86.
466 464 */
467 465 if (segvn_use_regions && !segvn_disable_textrepl) {
468 466 segvn_disable_textrepl = 1;
469 467 }
470 468
471 469 #if defined(_LP64)
472 470 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 &&
473 471 !segvn_disable_textrepl) {
474 472 ulong_t i;
475 473 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t);
476 474
477 475 svntr_cache = kmem_cache_create("svntr_cache",
478 476 sizeof (svntr_t), 0, svntr_cache_constructor, NULL,
479 477 NULL, NULL, NULL, 0);
480 478 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP);
481 479 for (i = 0; i < svntr_hashtab_sz; i++) {
482 480 mutex_init(&svntr_hashtab[i].tr_lock, NULL,
483 481 MUTEX_DEFAULT, NULL);
484 482 }
485 483 segvn_textrepl_max_bytes = ptob(physmem) /
486 484 segvn_textrepl_max_bytes_factor;
487 485 segvn_textrepl_stats = kmem_zalloc(NCPU *
488 486 sizeof (svntr_stats_t), KM_SLEEP);
489 487 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL);
490 488 (void) thread_create(NULL, 0, segvn_trasync_thread,
491 489 NULL, 0, &p0, TS_RUN, minclsyspri);
492 490 }
493 491 #endif
494 492
495 493 if (!ISP2(segvn_pglock_comb_balign) ||
496 494 segvn_pglock_comb_balign < PAGESIZE) {
497 495 segvn_pglock_comb_balign = 1UL << 16; /* 64K */
498 496 }
499 497 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1;
500 498 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign);
501 499 }
502 500
503 501 #define SEGVN_PAGEIO ((void *)0x1)
504 502 #define SEGVN_NOPAGEIO ((void *)0x2)
505 503
506 504 static void
507 505 segvn_setvnode_mpss(vnode_t *vp)
508 506 {
509 507 int err;
510 508
511 509 ASSERT(vp->v_mpssdata == NULL ||
512 510 vp->v_mpssdata == SEGVN_PAGEIO ||
513 511 vp->v_mpssdata == SEGVN_NOPAGEIO);
514 512
515 513 if (vp->v_mpssdata == NULL) {
516 514 if (vn_vmpss_usepageio(vp)) {
517 515 err = VOP_PAGEIO(vp, (page_t *)NULL,
518 516 (u_offset_t)0, 0, 0, CRED(), NULL);
519 517 } else {
520 518 err = ENOSYS;
521 519 }
522 520 /*
523 521 * set v_mpssdata just once per vnode life
524 522 * so that it never changes.
525 523 */
526 524 mutex_enter(&vp->v_lock);
527 525 if (vp->v_mpssdata == NULL) {
528 526 if (err == EINVAL) {
529 527 vp->v_mpssdata = SEGVN_PAGEIO;
530 528 } else {
531 529 vp->v_mpssdata = SEGVN_NOPAGEIO;
532 530 }
533 531 }
534 532 mutex_exit(&vp->v_lock);
535 533 }
536 534 }
537 535
538 536 int
539 537 segvn_create(struct seg *seg, void *argsp)
540 538 {
541 539 struct segvn_crargs *a = (struct segvn_crargs *)argsp;
542 540 struct segvn_data *svd;
543 541 size_t swresv = 0;
544 542 struct cred *cred;
545 543 struct anon_map *amp;
546 544 int error = 0;
547 545 size_t pgsz;
548 546 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT;
549 547 int use_rgn = 0;
550 548 int trok = 0;
551 549
552 550 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
553 551
554 552 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) {
555 553 panic("segvn_create type");
556 554 /*NOTREACHED*/
557 555 }
558 556
559 557 /*
560 558 * Check arguments. If a shared anon structure is given then
561 559 * it is illegal to also specify a vp.
562 560 */
563 561 if (a->amp != NULL && a->vp != NULL) {
564 562 panic("segvn_create anon_map");
565 563 /*NOTREACHED*/
566 564 }
567 565
568 566 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) &&
569 567 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) &&
570 568 segvn_use_regions) {
571 569 use_rgn = 1;
572 570 }
573 571
574 572 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
575 573 if (a->type == MAP_SHARED)
576 574 a->flags &= ~MAP_NORESERVE;
577 575
578 576 if (a->szc != 0) {
579 577 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) ||
580 578 (a->amp != NULL && a->type == MAP_PRIVATE) ||
581 579 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) {
582 580 a->szc = 0;
583 581 } else {
584 582 if (a->szc > segvn_maxpgszc)
585 583 a->szc = segvn_maxpgszc;
586 584 pgsz = page_get_pagesize(a->szc);
587 585 if (!IS_P2ALIGNED(seg->s_base, pgsz) ||
588 586 !IS_P2ALIGNED(seg->s_size, pgsz)) {
589 587 a->szc = 0;
590 588 } else if (a->vp != NULL) {
591 589 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) {
592 590 /*
593 591 * paranoid check.
594 592 * hat_page_demote() is not supported
595 593 * on swapfs pages.
596 594 */
597 595 a->szc = 0;
598 596 } else if (map_addr_vacalign_check(seg->s_base,
599 597 a->offset & PAGEMASK)) {
600 598 a->szc = 0;
601 599 }
602 600 } else if (a->amp != NULL) {
603 601 pgcnt_t anum = btopr(a->offset);
604 602 pgcnt_t pgcnt = page_get_pagecnt(a->szc);
605 603 if (!IS_P2ALIGNED(anum, pgcnt)) {
606 604 a->szc = 0;
607 605 }
608 606 }
609 607 }
610 608 }
611 609
612 610 /*
613 611 * If segment may need private pages, reserve them now.
614 612 */
615 613 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) ||
616 614 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) {
617 615 if (anon_resv_zone(seg->s_size,
618 616 seg->s_as->a_proc->p_zone) == 0)
619 617 return (EAGAIN);
620 618 swresv = seg->s_size;
621 619 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
622 620 seg, swresv, 1);
623 621 }
624 622
625 623 /*
626 624 * Reserve any mapping structures that may be required.
627 625 *
628 626 * Don't do it for segments that may use regions. It's currently a
629 627 * noop in the hat implementations anyway.
630 628 */
631 629 if (!use_rgn) {
632 630 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
633 631 }
634 632
635 633 if (a->cred) {
636 634 cred = a->cred;
637 635 crhold(cred);
638 636 } else {
639 637 crhold(cred = CRED());
640 638 }
641 639
642 640 /* Inform the vnode of the new mapping */
643 641 if (a->vp != NULL) {
644 642 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK,
645 643 seg->s_as, seg->s_base, seg->s_size, a->prot,
646 644 a->maxprot, a->type, cred, NULL);
647 645 if (error) {
648 646 if (swresv != 0) {
649 647 anon_unresv_zone(swresv,
650 648 seg->s_as->a_proc->p_zone);
651 649 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
652 650 "anon proc:%p %lu %u", seg, swresv, 0);
653 651 }
654 652 crfree(cred);
655 653 if (!use_rgn) {
656 654 hat_unload(seg->s_as->a_hat, seg->s_base,
657 655 seg->s_size, HAT_UNLOAD_UNMAP);
658 656 }
659 657 return (error);
660 658 }
661 659 /*
662 660 * svntr_hashtab will be NULL if we support shared regions.
663 661 */
664 662 trok = ((a->flags & MAP_TEXT) &&
665 663 (seg->s_size > textrepl_size_thresh ||
666 664 (a->flags & _MAP_TEXTREPL)) &&
667 665 lgrp_optimizations() && svntr_hashtab != NULL &&
668 666 a->type == MAP_PRIVATE && swresv == 0 &&
669 667 !(a->flags & MAP_NORESERVE) &&
670 668 seg->s_as != &kas && a->vp->v_type == VREG);
671 669
672 670 ASSERT(!trok || !use_rgn);
673 671 }
674 672
675 673 /*
676 674 * MAP_NORESERVE mappings don't count towards the VSZ of a process
677 675 * until we fault the pages in.
678 676 */
679 677 if ((a->vp == NULL || a->vp->v_type != VREG) &&
680 678 a->flags & MAP_NORESERVE) {
681 679 seg->s_as->a_resvsize -= seg->s_size;
682 680 }
683 681
684 682 /*
685 683 * If more than one segment in the address space, and they're adjacent
686 684 * virtually, try to concatenate them. Don't concatenate if an
687 685 * explicit anon_map structure was supplied (e.g., SystemV shared
688 686 * memory) or if we'll use text replication for this segment.
689 687 */
690 688 if (a->amp == NULL && !use_rgn && !trok) {
691 689 struct seg *pseg, *nseg;
692 690 struct segvn_data *psvd, *nsvd;
693 691 lgrp_mem_policy_t ppolicy, npolicy;
694 692 uint_t lgrp_mem_policy_flags = 0;
695 693 extern lgrp_mem_policy_t lgrp_mem_default_policy;
696 694
697 695 /*
698 696 * Memory policy flags (lgrp_mem_policy_flags) is valid when
699 697 * extending stack/heap segments.
700 698 */
701 699 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) &&
702 700 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) {
703 701 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags;
704 702 } else {
705 703 /*
706 704 * Get policy when not extending it from another segment
707 705 */
708 706 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type);
709 707 }
710 708
711 709 /*
712 710 * First, try to concatenate the previous and new segments
713 711 */
714 712 pseg = AS_SEGPREV(seg->s_as, seg);
715 713 if (pseg != NULL &&
716 714 pseg->s_base + pseg->s_size == seg->s_base &&
717 715 pseg->s_ops == &segvn_ops) {
718 716 /*
719 717 * Get memory allocation policy from previous segment.
720 718 * When extension is specified (e.g. for heap) apply
721 719 * this policy to the new segment regardless of the
722 720 * outcome of segment concatenation. Extension occurs
723 721 * for non-default policy otherwise default policy is
724 722 * used and is based on extended segment size.
725 723 */
726 724 psvd = (struct segvn_data *)pseg->s_data;
727 725 ppolicy = psvd->policy_info.mem_policy;
728 726 if (lgrp_mem_policy_flags ==
729 727 LGRP_MP_FLAG_EXTEND_UP) {
730 728 if (ppolicy != lgrp_mem_default_policy) {
731 729 mpolicy = ppolicy;
732 730 } else {
733 731 mpolicy = lgrp_mem_policy_default(
734 732 pseg->s_size + seg->s_size,
735 733 a->type);
736 734 }
737 735 }
738 736
739 737 if (mpolicy == ppolicy &&
740 738 (pseg->s_size + seg->s_size <=
741 739 segvn_comb_thrshld || psvd->amp == NULL) &&
742 740 segvn_extend_prev(pseg, seg, a, swresv) == 0) {
743 741 /*
744 742 * success! now try to concatenate
745 743 * with following seg
746 744 */
747 745 crfree(cred);
748 746 nseg = AS_SEGNEXT(pseg->s_as, pseg);
749 747 if (nseg != NULL &&
750 748 nseg != pseg &&
751 749 nseg->s_ops == &segvn_ops &&
752 750 pseg->s_base + pseg->s_size ==
753 751 nseg->s_base)
754 752 (void) segvn_concat(pseg, nseg, 0);
755 753 ASSERT(pseg->s_szc == 0 ||
756 754 (a->szc == pseg->s_szc &&
757 755 IS_P2ALIGNED(pseg->s_base, pgsz) &&
758 756 IS_P2ALIGNED(pseg->s_size, pgsz)));
759 757 return (0);
760 758 }
761 759 }
762 760
763 761 /*
764 762 * Failed, so try to concatenate with following seg
765 763 */
766 764 nseg = AS_SEGNEXT(seg->s_as, seg);
767 765 if (nseg != NULL &&
768 766 seg->s_base + seg->s_size == nseg->s_base &&
769 767 nseg->s_ops == &segvn_ops) {
770 768 /*
771 769 * Get memory allocation policy from next segment.
772 770 * When extension is specified (e.g. for stack) apply
773 771 * this policy to the new segment regardless of the
774 772 * outcome of segment concatenation. Extension occurs
775 773 * for non-default policy otherwise default policy is
776 774 * used and is based on extended segment size.
777 775 */
778 776 nsvd = (struct segvn_data *)nseg->s_data;
779 777 npolicy = nsvd->policy_info.mem_policy;
780 778 if (lgrp_mem_policy_flags ==
781 779 LGRP_MP_FLAG_EXTEND_DOWN) {
782 780 if (npolicy != lgrp_mem_default_policy) {
783 781 mpolicy = npolicy;
784 782 } else {
785 783 mpolicy = lgrp_mem_policy_default(
786 784 nseg->s_size + seg->s_size,
787 785 a->type);
788 786 }
789 787 }
790 788
791 789 if (mpolicy == npolicy &&
792 790 segvn_extend_next(seg, nseg, a, swresv) == 0) {
793 791 crfree(cred);
794 792 ASSERT(nseg->s_szc == 0 ||
795 793 (a->szc == nseg->s_szc &&
796 794 IS_P2ALIGNED(nseg->s_base, pgsz) &&
797 795 IS_P2ALIGNED(nseg->s_size, pgsz)));
798 796 return (0);
799 797 }
800 798 }
801 799 }
802 800
803 801 if (a->vp != NULL) {
804 802 VN_HOLD(a->vp);
805 803 if (a->type == MAP_SHARED)
806 804 lgrp_shm_policy_init(NULL, a->vp);
807 805 }
808 806 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
809 807
810 808 seg->s_ops = &segvn_ops;
811 809 seg->s_data = (void *)svd;
812 810 seg->s_szc = a->szc;
813 811
814 812 svd->seg = seg;
815 813 svd->vp = a->vp;
816 814 /*
817 815 * Anonymous mappings have no backing file so the offset is meaningless.
818 816 */
819 817 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0;
820 818 svd->prot = a->prot;
821 819 svd->maxprot = a->maxprot;
822 820 svd->pageprot = 0;
823 821 svd->type = a->type;
824 822 svd->vpage = NULL;
825 823 svd->cred = cred;
826 824 svd->advice = MADV_NORMAL;
827 825 svd->pageadvice = 0;
828 826 svd->flags = (ushort_t)a->flags;
829 827 svd->softlockcnt = 0;
830 828 svd->softlockcnt_sbase = 0;
831 829 svd->softlockcnt_send = 0;
832 830 svd->svn_inz = 0;
833 831 svd->rcookie = HAT_INVALID_REGION_COOKIE;
834 832 svd->pageswap = 0;
835 833
836 834 if (a->szc != 0 && a->vp != NULL) {
837 835 segvn_setvnode_mpss(a->vp);
838 836 }
839 837 if (svd->type == MAP_SHARED && svd->vp != NULL &&
840 838 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) {
841 839 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
842 840 segvn_inval_trcache(svd->vp);
843 841 }
844 842
845 843 amp = a->amp;
846 844 if ((svd->amp = amp) == NULL) {
847 845 svd->anon_index = 0;
848 846 if (svd->type == MAP_SHARED) {
849 847 svd->swresv = 0;
850 848 /*
851 849 * Shared mappings to a vp need no other setup.
852 850 * If we have a shared mapping to an anon_map object
853 851 * which hasn't been allocated yet, allocate the
854 852 * struct now so that it will be properly shared
855 853 * by remembering the swap reservation there.
856 854 */
857 855 if (a->vp == NULL) {
858 856 svd->amp = anonmap_alloc(seg->s_size, swresv,
859 857 ANON_SLEEP);
860 858 svd->amp->a_szc = seg->s_szc;
861 859 }
862 860 } else {
863 861 /*
864 862 * Private mapping (with or without a vp).
865 863 * Allocate anon_map when needed.
866 864 */
867 865 svd->swresv = swresv;
868 866 }
869 867 } else {
870 868 pgcnt_t anon_num;
871 869
872 870 /*
873 871 * Mapping to an existing anon_map structure without a vp.
874 872 * For now we will insure that the segment size isn't larger
875 873 * than the size - offset gives us. Later on we may wish to
876 874 * have the anon array dynamically allocated itself so that
877 875 * we don't always have to allocate all the anon pointer slots.
878 876 * This of course involves adding extra code to check that we
879 877 * aren't trying to use an anon pointer slot beyond the end
880 878 * of the currently allocated anon array.
881 879 */
882 880 if ((amp->size - a->offset) < seg->s_size) {
883 881 panic("segvn_create anon_map size");
884 882 /*NOTREACHED*/
885 883 }
886 884
887 885 anon_num = btopr(a->offset);
888 886
889 887 if (a->type == MAP_SHARED) {
890 888 /*
891 889 * SHARED mapping to a given anon_map.
892 890 */
893 891 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
894 892 amp->refcnt++;
895 893 if (a->szc > amp->a_szc) {
896 894 amp->a_szc = a->szc;
897 895 }
898 896 ANON_LOCK_EXIT(&->a_rwlock);
899 897 svd->anon_index = anon_num;
900 898 svd->swresv = 0;
901 899 } else {
902 900 /*
903 901 * PRIVATE mapping to a given anon_map.
904 902 * Make sure that all the needed anon
905 903 * structures are created (so that we will
906 904 * share the underlying pages if nothing
907 905 * is written by this mapping) and then
908 906 * duplicate the anon array as is done
909 907 * when a privately mapped segment is dup'ed.
910 908 */
911 909 struct anon *ap;
912 910 caddr_t addr;
913 911 caddr_t eaddr;
914 912 ulong_t anon_idx;
915 913 int hat_flag = HAT_LOAD;
916 914
917 915 if (svd->flags & MAP_TEXT) {
918 916 hat_flag |= HAT_LOAD_TEXT;
919 917 }
920 918
921 919 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
922 920 svd->amp->a_szc = seg->s_szc;
923 921 svd->anon_index = 0;
924 922 svd->swresv = swresv;
925 923
926 924 /*
927 925 * Prevent 2 threads from allocating anon
928 926 * slots simultaneously.
929 927 */
930 928 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
931 929 eaddr = seg->s_base + seg->s_size;
932 930
933 931 for (anon_idx = anon_num, addr = seg->s_base;
934 932 addr < eaddr; addr += PAGESIZE, anon_idx++) {
935 933 page_t *pp;
936 934
937 935 if ((ap = anon_get_ptr(amp->ahp,
938 936 anon_idx)) != NULL)
939 937 continue;
940 938
941 939 /*
942 940 * Allocate the anon struct now.
943 941 * Might as well load up translation
944 942 * to the page while we're at it...
945 943 */
946 944 pp = anon_zero(seg, addr, &ap, cred);
947 945 if (ap == NULL || pp == NULL) {
948 946 panic("segvn_create anon_zero");
949 947 /*NOTREACHED*/
950 948 }
951 949
952 950 /*
953 951 * Re-acquire the anon_map lock and
954 952 * initialize the anon array entry.
955 953 */
956 954 ASSERT(anon_get_ptr(amp->ahp,
957 955 anon_idx) == NULL);
958 956 (void) anon_set_ptr(amp->ahp, anon_idx, ap,
959 957 ANON_SLEEP);
960 958
961 959 ASSERT(seg->s_szc == 0);
962 960 ASSERT(!IS_VMODSORT(pp->p_vnode));
963 961
964 962 ASSERT(use_rgn == 0);
965 963 hat_memload(seg->s_as->a_hat, addr, pp,
966 964 svd->prot & ~PROT_WRITE, hat_flag);
967 965
968 966 page_unlock(pp);
969 967 }
970 968 ASSERT(seg->s_szc == 0);
971 969 anon_dup(amp->ahp, anon_num, svd->amp->ahp,
972 970 0, seg->s_size);
973 971 ANON_LOCK_EXIT(&->a_rwlock);
974 972 }
975 973 }
976 974
977 975 /*
978 976 * Set default memory allocation policy for segment
979 977 *
980 978 * Always set policy for private memory at least for initialization
981 979 * even if this is a shared memory segment
982 980 */
983 981 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size);
984 982
985 983 if (svd->type == MAP_SHARED)
986 984 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index,
987 985 svd->vp, svd->offset, seg->s_size);
988 986
989 987 if (use_rgn) {
990 988 ASSERT(!trok);
991 989 ASSERT(svd->amp == NULL);
992 990 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base,
993 991 seg->s_size, (void *)svd->vp, svd->offset, svd->prot,
994 992 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback,
995 993 HAT_REGION_TEXT);
996 994 }
997 995
998 996 ASSERT(!trok || !(svd->prot & PROT_WRITE));
999 997 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF;
1000 998
1001 999 return (0);
1002 1000 }
1003 1001
1004 1002 /*
1005 1003 * Concatenate two existing segments, if possible.
1006 1004 * Return 0 on success, -1 if two segments are not compatible
1007 1005 * or -2 on memory allocation failure.
1008 1006 * If amp_cat == 1 then try and concat segments with anon maps
1009 1007 */
1010 1008 static int
1011 1009 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat)
1012 1010 {
1013 1011 struct segvn_data *svd1 = seg1->s_data;
1014 1012 struct segvn_data *svd2 = seg2->s_data;
1015 1013 struct anon_map *amp1 = svd1->amp;
1016 1014 struct anon_map *amp2 = svd2->amp;
1017 1015 struct vpage *vpage1 = svd1->vpage;
1018 1016 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL;
1019 1017 size_t size, nvpsize;
1020 1018 pgcnt_t npages1, npages2;
1021 1019
1022 1020 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as);
1023 1021 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1024 1022 ASSERT(seg1->s_ops == seg2->s_ops);
1025 1023
1026 1024 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) ||
1027 1025 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1028 1026 return (-1);
1029 1027 }
1030 1028
1031 1029 /* both segments exist, try to merge them */
1032 1030 #define incompat(x) (svd1->x != svd2->x)
1033 1031 if (incompat(vp) || incompat(maxprot) ||
1034 1032 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) ||
1035 1033 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) ||
1036 1034 incompat(type) || incompat(cred) || incompat(flags) ||
1037 1035 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) ||
1038 1036 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0)
1039 1037 return (-1);
1040 1038 #undef incompat
1041 1039
1042 1040 /*
1043 1041 * vp == NULL implies zfod, offset doesn't matter
1044 1042 */
1045 1043 if (svd1->vp != NULL &&
1046 1044 svd1->offset + seg1->s_size != svd2->offset) {
1047 1045 return (-1);
1048 1046 }
1049 1047
1050 1048 /*
1051 1049 * Don't concatenate if either segment uses text replication.
1052 1050 */
1053 1051 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) {
1054 1052 return (-1);
1055 1053 }
1056 1054
1057 1055 /*
1058 1056 * Fail early if we're not supposed to concatenate
1059 1057 * segments with non NULL amp.
1060 1058 */
1061 1059 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) {
1062 1060 return (-1);
1063 1061 }
1064 1062
1065 1063 if (svd1->vp == NULL && svd1->type == MAP_SHARED) {
1066 1064 if (amp1 != amp2) {
1067 1065 return (-1);
1068 1066 }
1069 1067 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) !=
1070 1068 svd2->anon_index) {
1071 1069 return (-1);
1072 1070 }
1073 1071 ASSERT(amp1 == NULL || amp1->refcnt >= 2);
1074 1072 }
1075 1073
1076 1074 /*
1077 1075 * If either seg has vpages, create a new merged vpage array.
1078 1076 */
1079 1077 if (vpage1 != NULL || vpage2 != NULL) {
1080 1078 struct vpage *vp, *evp;
1081 1079
1082 1080 npages1 = seg_pages(seg1);
1083 1081 npages2 = seg_pages(seg2);
1084 1082 nvpsize = vpgtob(npages1 + npages2);
1085 1083
1086 1084 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) {
1087 1085 return (-2);
1088 1086 }
1089 1087
1090 1088 if (vpage1 != NULL) {
1091 1089 bcopy(vpage1, nvpage, vpgtob(npages1));
1092 1090 } else {
1093 1091 evp = nvpage + npages1;
1094 1092 for (vp = nvpage; vp < evp; vp++) {
1095 1093 VPP_SETPROT(vp, svd1->prot);
1096 1094 VPP_SETADVICE(vp, svd1->advice);
1097 1095 }
1098 1096 }
1099 1097
1100 1098 if (vpage2 != NULL) {
1101 1099 bcopy(vpage2, nvpage + npages1, vpgtob(npages2));
1102 1100 } else {
1103 1101 evp = nvpage + npages1 + npages2;
1104 1102 for (vp = nvpage + npages1; vp < evp; vp++) {
1105 1103 VPP_SETPROT(vp, svd2->prot);
1106 1104 VPP_SETADVICE(vp, svd2->advice);
1107 1105 }
1108 1106 }
1109 1107
1110 1108 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) {
1111 1109 ASSERT(svd1->swresv == seg1->s_size);
1112 1110 ASSERT(!(svd1->flags & MAP_NORESERVE));
1113 1111 ASSERT(!(svd2->flags & MAP_NORESERVE));
1114 1112 evp = nvpage + npages1;
1115 1113 for (vp = nvpage; vp < evp; vp++) {
1116 1114 VPP_SETSWAPRES(vp);
1117 1115 }
1118 1116 }
1119 1117
1120 1118 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) {
1121 1119 ASSERT(svd2->swresv == seg2->s_size);
1122 1120 ASSERT(!(svd1->flags & MAP_NORESERVE));
1123 1121 ASSERT(!(svd2->flags & MAP_NORESERVE));
1124 1122 vp = nvpage + npages1;
1125 1123 evp = vp + npages2;
1126 1124 for (; vp < evp; vp++) {
1127 1125 VPP_SETSWAPRES(vp);
1128 1126 }
1129 1127 }
1130 1128 }
1131 1129 ASSERT((vpage1 != NULL || vpage2 != NULL) ||
1132 1130 (svd1->pageswap == 0 && svd2->pageswap == 0));
1133 1131
1134 1132 /*
1135 1133 * If either segment has private pages, create a new merged anon
1136 1134 * array. If mergeing shared anon segments just decrement anon map's
1137 1135 * refcnt.
1138 1136 */
1139 1137 if (amp1 != NULL && svd1->type == MAP_SHARED) {
1140 1138 ASSERT(amp1 == amp2 && svd1->vp == NULL);
1141 1139 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1142 1140 ASSERT(amp1->refcnt >= 2);
1143 1141 amp1->refcnt--;
1144 1142 ANON_LOCK_EXIT(&1->a_rwlock);
1145 1143 svd2->amp = NULL;
1146 1144 } else if (amp1 != NULL || amp2 != NULL) {
1147 1145 struct anon_hdr *nahp;
1148 1146 struct anon_map *namp = NULL;
1149 1147 size_t asize;
1150 1148
1151 1149 ASSERT(svd1->type == MAP_PRIVATE);
1152 1150
1153 1151 asize = seg1->s_size + seg2->s_size;
1154 1152 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) {
1155 1153 if (nvpage != NULL) {
1156 1154 kmem_free(nvpage, nvpsize);
1157 1155 }
1158 1156 return (-2);
1159 1157 }
1160 1158 if (amp1 != NULL) {
1161 1159 /*
1162 1160 * XXX anon rwlock is not really needed because
1163 1161 * this is a private segment and we are writers.
1164 1162 */
1165 1163 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1166 1164 ASSERT(amp1->refcnt == 1);
1167 1165 if (anon_copy_ptr(amp1->ahp, svd1->anon_index,
1168 1166 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) {
1169 1167 anon_release(nahp, btop(asize));
1170 1168 ANON_LOCK_EXIT(&1->a_rwlock);
1171 1169 if (nvpage != NULL) {
1172 1170 kmem_free(nvpage, nvpsize);
1173 1171 }
1174 1172 return (-2);
1175 1173 }
1176 1174 }
1177 1175 if (amp2 != NULL) {
1178 1176 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1179 1177 ASSERT(amp2->refcnt == 1);
1180 1178 if (anon_copy_ptr(amp2->ahp, svd2->anon_index,
1181 1179 nahp, btop(seg1->s_size), btop(seg2->s_size),
1182 1180 ANON_NOSLEEP)) {
1183 1181 anon_release(nahp, btop(asize));
1184 1182 ANON_LOCK_EXIT(&2->a_rwlock);
1185 1183 if (amp1 != NULL) {
1186 1184 ANON_LOCK_EXIT(&1->a_rwlock);
1187 1185 }
1188 1186 if (nvpage != NULL) {
1189 1187 kmem_free(nvpage, nvpsize);
1190 1188 }
1191 1189 return (-2);
1192 1190 }
1193 1191 }
1194 1192 if (amp1 != NULL) {
1195 1193 namp = amp1;
1196 1194 anon_release(amp1->ahp, btop(amp1->size));
1197 1195 }
1198 1196 if (amp2 != NULL) {
1199 1197 if (namp == NULL) {
1200 1198 ASSERT(amp1 == NULL);
1201 1199 namp = amp2;
1202 1200 anon_release(amp2->ahp, btop(amp2->size));
1203 1201 } else {
1204 1202 amp2->refcnt--;
1205 1203 ANON_LOCK_EXIT(&2->a_rwlock);
1206 1204 anonmap_free(amp2);
1207 1205 }
1208 1206 svd2->amp = NULL; /* needed for seg_free */
1209 1207 }
1210 1208 namp->ahp = nahp;
1211 1209 namp->size = asize;
1212 1210 svd1->amp = namp;
1213 1211 svd1->anon_index = 0;
1214 1212 ANON_LOCK_EXIT(&namp->a_rwlock);
1215 1213 }
1216 1214 /*
1217 1215 * Now free the old vpage structures.
1218 1216 */
1219 1217 if (nvpage != NULL) {
1220 1218 if (vpage1 != NULL) {
1221 1219 kmem_free(vpage1, vpgtob(npages1));
1222 1220 }
1223 1221 if (vpage2 != NULL) {
1224 1222 svd2->vpage = NULL;
1225 1223 kmem_free(vpage2, vpgtob(npages2));
1226 1224 }
1227 1225 if (svd2->pageprot) {
1228 1226 svd1->pageprot = 1;
1229 1227 }
1230 1228 if (svd2->pageadvice) {
1231 1229 svd1->pageadvice = 1;
1232 1230 }
1233 1231 if (svd2->pageswap) {
1234 1232 svd1->pageswap = 1;
1235 1233 }
1236 1234 svd1->vpage = nvpage;
1237 1235 }
1238 1236
1239 1237 /* all looks ok, merge segments */
1240 1238 svd1->swresv += svd2->swresv;
1241 1239 svd2->swresv = 0; /* so seg_free doesn't release swap space */
1242 1240 size = seg2->s_size;
1243 1241 seg_free(seg2);
1244 1242 seg1->s_size += size;
1245 1243 return (0);
1246 1244 }
1247 1245
1248 1246 /*
1249 1247 * Extend the previous segment (seg1) to include the
1250 1248 * new segment (seg2 + a), if possible.
1251 1249 * Return 0 on success.
1252 1250 */
1253 1251 static int
1254 1252 segvn_extend_prev(seg1, seg2, a, swresv)
1255 1253 struct seg *seg1, *seg2;
1256 1254 struct segvn_crargs *a;
1257 1255 size_t swresv;
1258 1256 {
1259 1257 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data;
1260 1258 size_t size;
1261 1259 struct anon_map *amp1;
1262 1260 struct vpage *new_vpage;
1263 1261
1264 1262 /*
1265 1263 * We don't need any segment level locks for "segvn" data
1266 1264 * since the address space is "write" locked.
1267 1265 */
1268 1266 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1269 1267
1270 1268 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) {
1271 1269 return (-1);
1272 1270 }
1273 1271
1274 1272 /* second segment is new, try to extend first */
1275 1273 /* XXX - should also check cred */
1276 1274 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot ||
1277 1275 (!svd1->pageprot && (svd1->prot != a->prot)) ||
1278 1276 svd1->type != a->type || svd1->flags != a->flags ||
1279 1277 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0)
1280 1278 return (-1);
1281 1279
1282 1280 /* vp == NULL implies zfod, offset doesn't matter */
1283 1281 if (svd1->vp != NULL &&
1284 1282 svd1->offset + seg1->s_size != (a->offset & PAGEMASK))
1285 1283 return (-1);
1286 1284
1287 1285 if (svd1->tr_state != SEGVN_TR_OFF) {
1288 1286 return (-1);
1289 1287 }
1290 1288
1291 1289 amp1 = svd1->amp;
1292 1290 if (amp1) {
1293 1291 pgcnt_t newpgs;
1294 1292
1295 1293 /*
1296 1294 * Segment has private pages, can data structures
1297 1295 * be expanded?
1298 1296 *
1299 1297 * Acquire the anon_map lock to prevent it from changing,
1300 1298 * if it is shared. This ensures that the anon_map
1301 1299 * will not change while a thread which has a read/write
1302 1300 * lock on an address space references it.
1303 1301 * XXX - Don't need the anon_map lock at all if "refcnt"
1304 1302 * is 1.
1305 1303 *
1306 1304 * Can't grow a MAP_SHARED segment with an anonmap because
1307 1305 * there may be existing anon slots where we want to extend
1308 1306 * the segment and we wouldn't know what to do with them
1309 1307 * (e.g., for tmpfs right thing is to just leave them there,
1310 1308 * for /dev/zero they should be cleared out).
1311 1309 */
1312 1310 if (svd1->type == MAP_SHARED)
1313 1311 return (-1);
1314 1312
1315 1313 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1316 1314 if (amp1->refcnt > 1) {
1317 1315 ANON_LOCK_EXIT(&1->a_rwlock);
1318 1316 return (-1);
1319 1317 }
1320 1318 newpgs = anon_grow(amp1->ahp, &svd1->anon_index,
1321 1319 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP);
1322 1320
1323 1321 if (newpgs == 0) {
1324 1322 ANON_LOCK_EXIT(&1->a_rwlock);
1325 1323 return (-1);
1326 1324 }
1327 1325 amp1->size = ptob(newpgs);
1328 1326 ANON_LOCK_EXIT(&1->a_rwlock);
1329 1327 }
1330 1328 if (svd1->vpage != NULL) {
1331 1329 struct vpage *vp, *evp;
1332 1330 new_vpage =
1333 1331 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1334 1332 KM_NOSLEEP);
1335 1333 if (new_vpage == NULL)
1336 1334 return (-1);
1337 1335 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1)));
1338 1336 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1)));
1339 1337 svd1->vpage = new_vpage;
1340 1338
1341 1339 vp = new_vpage + seg_pages(seg1);
1342 1340 evp = vp + seg_pages(seg2);
1343 1341 for (; vp < evp; vp++)
1344 1342 VPP_SETPROT(vp, a->prot);
1345 1343 if (svd1->pageswap && swresv) {
1346 1344 ASSERT(!(svd1->flags & MAP_NORESERVE));
1347 1345 ASSERT(swresv == seg2->s_size);
1348 1346 vp = new_vpage + seg_pages(seg1);
1349 1347 for (; vp < evp; vp++) {
1350 1348 VPP_SETSWAPRES(vp);
1351 1349 }
1352 1350 }
1353 1351 }
1354 1352 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0);
1355 1353 size = seg2->s_size;
1356 1354 seg_free(seg2);
1357 1355 seg1->s_size += size;
1358 1356 svd1->swresv += swresv;
1359 1357 if (svd1->pageprot && (a->prot & PROT_WRITE) &&
1360 1358 svd1->type == MAP_SHARED && svd1->vp != NULL &&
1361 1359 (svd1->vp->v_flag & VVMEXEC)) {
1362 1360 ASSERT(vn_is_mapped(svd1->vp, V_WRITE));
1363 1361 segvn_inval_trcache(svd1->vp);
1364 1362 }
1365 1363 return (0);
1366 1364 }
1367 1365
1368 1366 /*
1369 1367 * Extend the next segment (seg2) to include the
1370 1368 * new segment (seg1 + a), if possible.
1371 1369 * Return 0 on success.
1372 1370 */
1373 1371 static int
1374 1372 segvn_extend_next(
1375 1373 struct seg *seg1,
1376 1374 struct seg *seg2,
1377 1375 struct segvn_crargs *a,
1378 1376 size_t swresv)
1379 1377 {
1380 1378 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data;
1381 1379 size_t size;
1382 1380 struct anon_map *amp2;
1383 1381 struct vpage *new_vpage;
1384 1382
1385 1383 /*
1386 1384 * We don't need any segment level locks for "segvn" data
1387 1385 * since the address space is "write" locked.
1388 1386 */
1389 1387 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock));
1390 1388
1391 1389 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1392 1390 return (-1);
1393 1391 }
1394 1392
1395 1393 /* first segment is new, try to extend second */
1396 1394 /* XXX - should also check cred */
1397 1395 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot ||
1398 1396 (!svd2->pageprot && (svd2->prot != a->prot)) ||
1399 1397 svd2->type != a->type || svd2->flags != a->flags ||
1400 1398 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0)
1401 1399 return (-1);
1402 1400 /* vp == NULL implies zfod, offset doesn't matter */
1403 1401 if (svd2->vp != NULL &&
1404 1402 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset)
1405 1403 return (-1);
1406 1404
1407 1405 if (svd2->tr_state != SEGVN_TR_OFF) {
1408 1406 return (-1);
1409 1407 }
1410 1408
1411 1409 amp2 = svd2->amp;
1412 1410 if (amp2) {
1413 1411 pgcnt_t newpgs;
1414 1412
1415 1413 /*
1416 1414 * Segment has private pages, can data structures
1417 1415 * be expanded?
1418 1416 *
1419 1417 * Acquire the anon_map lock to prevent it from changing,
1420 1418 * if it is shared. This ensures that the anon_map
1421 1419 * will not change while a thread which has a read/write
1422 1420 * lock on an address space references it.
1423 1421 *
1424 1422 * XXX - Don't need the anon_map lock at all if "refcnt"
1425 1423 * is 1.
1426 1424 */
1427 1425 if (svd2->type == MAP_SHARED)
1428 1426 return (-1);
1429 1427
1430 1428 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1431 1429 if (amp2->refcnt > 1) {
1432 1430 ANON_LOCK_EXIT(&2->a_rwlock);
1433 1431 return (-1);
1434 1432 }
1435 1433 newpgs = anon_grow(amp2->ahp, &svd2->anon_index,
1436 1434 btop(seg2->s_size), btop(seg1->s_size),
1437 1435 ANON_NOSLEEP | ANON_GROWDOWN);
1438 1436
1439 1437 if (newpgs == 0) {
1440 1438 ANON_LOCK_EXIT(&2->a_rwlock);
1441 1439 return (-1);
1442 1440 }
1443 1441 amp2->size = ptob(newpgs);
1444 1442 ANON_LOCK_EXIT(&2->a_rwlock);
1445 1443 }
1446 1444 if (svd2->vpage != NULL) {
1447 1445 struct vpage *vp, *evp;
1448 1446 new_vpage =
1449 1447 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1450 1448 KM_NOSLEEP);
1451 1449 if (new_vpage == NULL) {
1452 1450 /* Not merging segments so adjust anon_index back */
1453 1451 if (amp2)
1454 1452 svd2->anon_index += seg_pages(seg1);
1455 1453 return (-1);
1456 1454 }
1457 1455 bcopy(svd2->vpage, new_vpage + seg_pages(seg1),
1458 1456 vpgtob(seg_pages(seg2)));
1459 1457 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2)));
1460 1458 svd2->vpage = new_vpage;
1461 1459
1462 1460 vp = new_vpage;
1463 1461 evp = vp + seg_pages(seg1);
1464 1462 for (; vp < evp; vp++)
1465 1463 VPP_SETPROT(vp, a->prot);
1466 1464 if (svd2->pageswap && swresv) {
1467 1465 ASSERT(!(svd2->flags & MAP_NORESERVE));
1468 1466 ASSERT(swresv == seg1->s_size);
1469 1467 vp = new_vpage;
1470 1468 for (; vp < evp; vp++) {
1471 1469 VPP_SETSWAPRES(vp);
1472 1470 }
1473 1471 }
1474 1472 }
1475 1473 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0);
1476 1474 size = seg1->s_size;
1477 1475 seg_free(seg1);
1478 1476 seg2->s_size += size;
1479 1477 seg2->s_base -= size;
1480 1478 svd2->offset -= size;
1481 1479 svd2->swresv += swresv;
1482 1480 if (svd2->pageprot && (a->prot & PROT_WRITE) &&
1483 1481 svd2->type == MAP_SHARED && svd2->vp != NULL &&
1484 1482 (svd2->vp->v_flag & VVMEXEC)) {
1485 1483 ASSERT(vn_is_mapped(svd2->vp, V_WRITE));
1486 1484 segvn_inval_trcache(svd2->vp);
1487 1485 }
1488 1486 return (0);
1489 1487 }
1490 1488
1491 1489 /*
1492 1490 * Duplicate all the pages in the segment. This may break COW sharing for a
1493 1491 * given page. If the page is marked with inherit zero set, then instead of
1494 1492 * duplicating the page, we zero the page.
1495 1493 */
1496 1494 static int
1497 1495 segvn_dup_pages(struct seg *seg, struct seg *newseg)
1498 1496 {
1499 1497 int error;
1500 1498 uint_t prot;
1501 1499 page_t *pp;
1502 1500 struct anon *ap, *newap;
1503 1501 size_t i;
1504 1502 caddr_t addr;
1505 1503
1506 1504 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1507 1505 struct segvn_data *newsvd = (struct segvn_data *)newseg->s_data;
1508 1506 ulong_t old_idx = svd->anon_index;
1509 1507 ulong_t new_idx = 0;
1510 1508
1511 1509 i = btopr(seg->s_size);
1512 1510 addr = seg->s_base;
1513 1511
1514 1512 /*
1515 1513 * XXX break cow sharing using PAGESIZE
1516 1514 * pages. They will be relocated into larger
1517 1515 * pages at fault time.
1518 1516 */
1519 1517 while (i-- > 0) {
1520 1518 if ((ap = anon_get_ptr(svd->amp->ahp, old_idx)) != NULL) {
1521 1519 struct vpage *vpp;
1522 1520
1523 1521 vpp = &svd->vpage[seg_page(seg, addr)];
1524 1522
1525 1523 /*
1526 1524 * prot need not be computed below 'cause anon_private
1527 1525 * is going to ignore it anyway as child doesn't inherit
1528 1526 * pagelock from parent.
1529 1527 */
1530 1528 prot = svd->pageprot ? VPP_PROT(vpp) : svd->prot;
1531 1529
1532 1530 /*
1533 1531 * Check whether we should zero this or dup it.
1534 1532 */
1535 1533 if (svd->svn_inz == SEGVN_INZ_ALL ||
1536 1534 (svd->svn_inz == SEGVN_INZ_VPP &&
1537 1535 VPP_ISINHZERO(vpp))) {
1538 1536 pp = anon_zero(newseg, addr, &newap,
1539 1537 newsvd->cred);
1540 1538 } else {
1541 1539 page_t *anon_pl[1+1];
1542 1540 uint_t vpprot;
1543 1541 error = anon_getpage(&ap, &vpprot, anon_pl,
1544 1542 PAGESIZE, seg, addr, S_READ, svd->cred);
1545 1543 if (error != 0)
1546 1544 return (error);
1547 1545
1548 1546 pp = anon_private(&newap, newseg, addr, prot,
1549 1547 anon_pl[0], 0, newsvd->cred);
1550 1548 }
1551 1549 if (pp == NULL) {
1552 1550 return (ENOMEM);
1553 1551 }
1554 1552 (void) anon_set_ptr(newsvd->amp->ahp, new_idx, newap,
1555 1553 ANON_SLEEP);
1556 1554 page_unlock(pp);
1557 1555 }
1558 1556 addr += PAGESIZE;
1559 1557 old_idx++;
1560 1558 new_idx++;
1561 1559 }
1562 1560
1563 1561 return (0);
1564 1562 }
1565 1563
1566 1564 static int
1567 1565 segvn_dup(struct seg *seg, struct seg *newseg)
1568 1566 {
1569 1567 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1570 1568 struct segvn_data *newsvd;
1571 1569 pgcnt_t npages = seg_pages(seg);
1572 1570 int error = 0;
1573 1571 size_t len;
1574 1572 struct anon_map *amp;
1575 1573
1576 1574 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1577 1575 ASSERT(newseg->s_as->a_proc->p_parent == curproc);
1578 1576
1579 1577 /*
1580 1578 * If segment has anon reserved, reserve more for the new seg.
1581 1579 * For a MAP_NORESERVE segment swresv will be a count of all the
1582 1580 * allocated anon slots; thus we reserve for the child as many slots
1583 1581 * as the parent has allocated. This semantic prevents the child or
1584 1582 * parent from dieing during a copy-on-write fault caused by trying
1585 1583 * to write a shared pre-existing anon page.
1586 1584 */
1587 1585 if ((len = svd->swresv) != 0) {
1588 1586 if (anon_resv(svd->swresv) == 0)
1589 1587 return (ENOMEM);
1590 1588
1591 1589 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
1592 1590 seg, len, 0);
1593 1591 }
1594 1592
1595 1593 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
1596 1594
1597 1595 newseg->s_ops = &segvn_ops;
1598 1596 newseg->s_data = (void *)newsvd;
1599 1597 newseg->s_szc = seg->s_szc;
1600 1598
1601 1599 newsvd->seg = newseg;
1602 1600 if ((newsvd->vp = svd->vp) != NULL) {
1603 1601 VN_HOLD(svd->vp);
1604 1602 if (svd->type == MAP_SHARED)
1605 1603 lgrp_shm_policy_init(NULL, svd->vp);
1606 1604 }
1607 1605 newsvd->offset = svd->offset;
1608 1606 newsvd->prot = svd->prot;
1609 1607 newsvd->maxprot = svd->maxprot;
1610 1608 newsvd->pageprot = svd->pageprot;
1611 1609 newsvd->type = svd->type;
1612 1610 newsvd->cred = svd->cred;
1613 1611 crhold(newsvd->cred);
1614 1612 newsvd->advice = svd->advice;
1615 1613 newsvd->pageadvice = svd->pageadvice;
1616 1614 newsvd->svn_inz = svd->svn_inz;
1617 1615 newsvd->swresv = svd->swresv;
1618 1616 newsvd->pageswap = svd->pageswap;
1619 1617 newsvd->flags = svd->flags;
1620 1618 newsvd->softlockcnt = 0;
1621 1619 newsvd->softlockcnt_sbase = 0;
1622 1620 newsvd->softlockcnt_send = 0;
1623 1621 newsvd->policy_info = svd->policy_info;
1624 1622 newsvd->rcookie = HAT_INVALID_REGION_COOKIE;
1625 1623
1626 1624 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) {
1627 1625 /*
1628 1626 * Not attaching to a shared anon object.
1629 1627 */
1630 1628 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) ||
1631 1629 svd->tr_state == SEGVN_TR_OFF);
1632 1630 if (svd->tr_state == SEGVN_TR_ON) {
1633 1631 ASSERT(newsvd->vp != NULL && amp != NULL);
1634 1632 newsvd->tr_state = SEGVN_TR_INIT;
1635 1633 } else {
1636 1634 newsvd->tr_state = svd->tr_state;
1637 1635 }
1638 1636 newsvd->amp = NULL;
1639 1637 newsvd->anon_index = 0;
1640 1638 } else {
1641 1639 /* regions for now are only used on pure vnode segments */
1642 1640 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
1643 1641 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1644 1642 newsvd->tr_state = SEGVN_TR_OFF;
1645 1643 if (svd->type == MAP_SHARED) {
1646 1644 ASSERT(svd->svn_inz == SEGVN_INZ_NONE);
1647 1645 newsvd->amp = amp;
1648 1646 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1649 1647 amp->refcnt++;
1650 1648 ANON_LOCK_EXIT(&->a_rwlock);
1651 1649 newsvd->anon_index = svd->anon_index;
1652 1650 } else {
1653 1651 int reclaim = 1;
1654 1652
1655 1653 /*
1656 1654 * Allocate and initialize new anon_map structure.
1657 1655 */
1658 1656 newsvd->amp = anonmap_alloc(newseg->s_size, 0,
1659 1657 ANON_SLEEP);
1660 1658 newsvd->amp->a_szc = newseg->s_szc;
1661 1659 newsvd->anon_index = 0;
1662 1660 ASSERT(svd->svn_inz == SEGVN_INZ_NONE ||
1663 1661 svd->svn_inz == SEGVN_INZ_ALL ||
1664 1662 svd->svn_inz == SEGVN_INZ_VPP);
1665 1663
1666 1664 /*
1667 1665 * We don't have to acquire the anon_map lock
1668 1666 * for the new segment (since it belongs to an
1669 1667 * address space that is still not associated
1670 1668 * with any process), or the segment in the old
1671 1669 * address space (since all threads in it
1672 1670 * are stopped while duplicating the address space).
1673 1671 */
1674 1672
1675 1673 /*
1676 1674 * The goal of the following code is to make sure that
1677 1675 * softlocked pages do not end up as copy on write
1678 1676 * pages. This would cause problems where one
1679 1677 * thread writes to a page that is COW and a different
1680 1678 * thread in the same process has softlocked it. The
1681 1679 * softlock lock would move away from this process
1682 1680 * because the write would cause this process to get
1683 1681 * a copy (without the softlock).
1684 1682 *
1685 1683 * The strategy here is to just break the
1686 1684 * sharing on pages that could possibly be
1687 1685 * softlocked.
1688 1686 *
1689 1687 * In addition, if any pages have been marked that they
1690 1688 * should be inherited as zero, then we immediately go
1691 1689 * ahead and break COW and zero them. In the case of a
1692 1690 * softlocked page that should be inherited zero, we
1693 1691 * break COW and just get a zero page.
1694 1692 */
1695 1693 retry:
1696 1694 if (svd->softlockcnt ||
1697 1695 svd->svn_inz != SEGVN_INZ_NONE) {
1698 1696 /*
1699 1697 * The softlock count might be non zero
1700 1698 * because some pages are still stuck in the
1701 1699 * cache for lazy reclaim. Flush the cache
1702 1700 * now. This should drop the count to zero.
1703 1701 * [or there is really I/O going on to these
1704 1702 * pages]. Note, we have the writers lock so
1705 1703 * nothing gets inserted during the flush.
1706 1704 */
1707 1705 if (svd->softlockcnt && reclaim == 1) {
1708 1706 segvn_purge(seg);
1709 1707 reclaim = 0;
1710 1708 goto retry;
1711 1709 }
1712 1710
1713 1711 error = segvn_dup_pages(seg, newseg);
1714 1712 if (error != 0) {
1715 1713 newsvd->vpage = NULL;
1716 1714 goto out;
1717 1715 }
1718 1716 } else { /* common case */
1719 1717 if (seg->s_szc != 0) {
1720 1718 /*
1721 1719 * If at least one of anon slots of a
1722 1720 * large page exists then make sure
1723 1721 * all anon slots of a large page
1724 1722 * exist to avoid partial cow sharing
1725 1723 * of a large page in the future.
1726 1724 */
1727 1725 anon_dup_fill_holes(amp->ahp,
1728 1726 svd->anon_index, newsvd->amp->ahp,
1729 1727 0, seg->s_size, seg->s_szc,
1730 1728 svd->vp != NULL);
1731 1729 } else {
1732 1730 anon_dup(amp->ahp, svd->anon_index,
1733 1731 newsvd->amp->ahp, 0, seg->s_size);
1734 1732 }
1735 1733
1736 1734 hat_clrattr(seg->s_as->a_hat, seg->s_base,
1737 1735 seg->s_size, PROT_WRITE);
1738 1736 }
1739 1737 }
1740 1738 }
1741 1739 /*
1742 1740 * If necessary, create a vpage structure for the new segment.
1743 1741 * Do not copy any page lock indications.
1744 1742 */
1745 1743 if (svd->vpage != NULL) {
1746 1744 uint_t i;
1747 1745 struct vpage *ovp = svd->vpage;
1748 1746 struct vpage *nvp;
1749 1747
1750 1748 nvp = newsvd->vpage =
1751 1749 kmem_alloc(vpgtob(npages), KM_SLEEP);
1752 1750 for (i = 0; i < npages; i++) {
1753 1751 *nvp = *ovp++;
1754 1752 VPP_CLRPPLOCK(nvp++);
1755 1753 }
1756 1754 } else
1757 1755 newsvd->vpage = NULL;
1758 1756
1759 1757 /* Inform the vnode of the new mapping */
1760 1758 if (newsvd->vp != NULL) {
1761 1759 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset,
1762 1760 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot,
1763 1761 newsvd->maxprot, newsvd->type, newsvd->cred, NULL);
1764 1762 }
1765 1763 out:
1766 1764 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1767 1765 ASSERT(newsvd->amp == NULL);
1768 1766 ASSERT(newsvd->tr_state == SEGVN_TR_OFF);
1769 1767 newsvd->rcookie = svd->rcookie;
1770 1768 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie);
1771 1769 }
1772 1770 return (error);
1773 1771 }
1774 1772
1775 1773
1776 1774 /*
1777 1775 * callback function to invoke free_vp_pages() for only those pages actually
1778 1776 * processed by the HAT when a shared region is destroyed.
1779 1777 */
1780 1778 extern int free_pages;
1781 1779
1782 1780 static void
1783 1781 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
1784 1782 size_t r_size, void *r_obj, u_offset_t r_objoff)
1785 1783 {
1786 1784 u_offset_t off;
1787 1785 size_t len;
1788 1786 vnode_t *vp = (vnode_t *)r_obj;
1789 1787
1790 1788 ASSERT(eaddr > saddr);
1791 1789 ASSERT(saddr >= r_saddr);
1792 1790 ASSERT(saddr < r_saddr + r_size);
1793 1791 ASSERT(eaddr > r_saddr);
1794 1792 ASSERT(eaddr <= r_saddr + r_size);
1795 1793 ASSERT(vp != NULL);
1796 1794
1797 1795 if (!free_pages) {
1798 1796 return;
1799 1797 }
1800 1798
1801 1799 len = eaddr - saddr;
1802 1800 off = (saddr - r_saddr) + r_objoff;
1803 1801 free_vp_pages(vp, off, len);
1804 1802 }
1805 1803
1806 1804 /*
1807 1805 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1808 1806 * those pages actually processed by the HAT
1809 1807 */
1810 1808 static void
1811 1809 segvn_hat_unload_callback(hat_callback_t *cb)
1812 1810 {
1813 1811 struct seg *seg = cb->hcb_data;
1814 1812 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1815 1813 size_t len;
1816 1814 u_offset_t off;
1817 1815
1818 1816 ASSERT(svd->vp != NULL);
1819 1817 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr);
1820 1818 ASSERT(cb->hcb_start_addr >= seg->s_base);
1821 1819
1822 1820 len = cb->hcb_end_addr - cb->hcb_start_addr;
1823 1821 off = cb->hcb_start_addr - seg->s_base;
1824 1822 free_vp_pages(svd->vp, svd->offset + off, len);
1825 1823 }
1826 1824
1827 1825 /*
1828 1826 * This function determines the number of bytes of swap reserved by
1829 1827 * a segment for which per-page accounting is present. It is used to
1830 1828 * calculate the correct value of a segvn_data's swresv.
1831 1829 */
1832 1830 static size_t
1833 1831 segvn_count_swap_by_vpages(struct seg *seg)
1834 1832 {
1835 1833 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1836 1834 struct vpage *vp, *evp;
1837 1835 size_t nswappages = 0;
1838 1836
1839 1837 ASSERT(svd->pageswap);
1840 1838 ASSERT(svd->vpage != NULL);
1841 1839
1842 1840 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
1843 1841
1844 1842 for (vp = svd->vpage; vp < evp; vp++) {
1845 1843 if (VPP_ISSWAPRES(vp))
1846 1844 nswappages++;
1847 1845 }
1848 1846
1849 1847 return (nswappages << PAGESHIFT);
1850 1848 }
1851 1849
1852 1850 static int
1853 1851 segvn_unmap(struct seg *seg, caddr_t addr, size_t len)
1854 1852 {
1855 1853 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1856 1854 struct segvn_data *nsvd;
1857 1855 struct seg *nseg;
1858 1856 struct anon_map *amp;
1859 1857 pgcnt_t opages; /* old segment size in pages */
1860 1858 pgcnt_t npages; /* new segment size in pages */
1861 1859 pgcnt_t dpages; /* pages being deleted (unmapped) */
1862 1860 hat_callback_t callback; /* used for free_vp_pages() */
1863 1861 hat_callback_t *cbp = NULL;
1864 1862 caddr_t nbase;
1865 1863 size_t nsize;
1866 1864 size_t oswresv;
1867 1865 int reclaim = 1;
1868 1866
1869 1867 /*
1870 1868 * We don't need any segment level locks for "segvn" data
1871 1869 * since the address space is "write" locked.
1872 1870 */
1873 1871 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1874 1872
1875 1873 /*
1876 1874 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1877 1875 * softlockcnt is protected from change by the as write lock.
1878 1876 */
1879 1877 retry:
1880 1878 if (svd->softlockcnt > 0) {
1881 1879 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1882 1880
1883 1881 /*
1884 1882 * If this is shared segment non 0 softlockcnt
1885 1883 * means locked pages are still in use.
1886 1884 */
1887 1885 if (svd->type == MAP_SHARED) {
1888 1886 return (EAGAIN);
1889 1887 }
1890 1888
1891 1889 /*
1892 1890 * since we do have the writers lock nobody can fill
1893 1891 * the cache during the purge. The flush either succeeds
1894 1892 * or we still have pending I/Os.
1895 1893 */
1896 1894 if (reclaim == 1) {
1897 1895 segvn_purge(seg);
1898 1896 reclaim = 0;
1899 1897 goto retry;
1900 1898 }
1901 1899 return (EAGAIN);
1902 1900 }
1903 1901
1904 1902 /*
1905 1903 * Check for bad sizes
1906 1904 */
1907 1905 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
1908 1906 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
1909 1907 panic("segvn_unmap");
1910 1908 /*NOTREACHED*/
1911 1909 }
1912 1910
1913 1911 if (seg->s_szc != 0) {
1914 1912 size_t pgsz = page_get_pagesize(seg->s_szc);
1915 1913 int err;
1916 1914 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
1917 1915 ASSERT(seg->s_base != addr || seg->s_size != len);
1918 1916 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1919 1917 ASSERT(svd->amp == NULL);
1920 1918 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1921 1919 hat_leave_region(seg->s_as->a_hat,
1922 1920 svd->rcookie, HAT_REGION_TEXT);
1923 1921 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1924 1922 /*
1925 1923 * could pass a flag to segvn_demote_range()
1926 1924 * below to tell it not to do any unloads but
1927 1925 * this case is rare enough to not bother for
1928 1926 * now.
1929 1927 */
1930 1928 } else if (svd->tr_state == SEGVN_TR_INIT) {
1931 1929 svd->tr_state = SEGVN_TR_OFF;
1932 1930 } else if (svd->tr_state == SEGVN_TR_ON) {
1933 1931 ASSERT(svd->amp != NULL);
1934 1932 segvn_textunrepl(seg, 1);
1935 1933 ASSERT(svd->amp == NULL);
1936 1934 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1937 1935 }
1938 1936 VM_STAT_ADD(segvnvmstats.demoterange[0]);
1939 1937 err = segvn_demote_range(seg, addr, len, SDR_END, 0);
1940 1938 if (err == 0) {
1941 1939 return (IE_RETRY);
1942 1940 }
1943 1941 return (err);
1944 1942 }
1945 1943 }
1946 1944
1947 1945 /* Inform the vnode of the unmapping. */
1948 1946 if (svd->vp) {
1949 1947 int error;
1950 1948
1951 1949 error = VOP_DELMAP(svd->vp,
1952 1950 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1953 1951 seg->s_as, addr, len, svd->prot, svd->maxprot,
1954 1952 svd->type, svd->cred, NULL);
1955 1953
1956 1954 if (error == EAGAIN)
1957 1955 return (error);
1958 1956 }
1959 1957
1960 1958 /*
1961 1959 * Remove any page locks set through this mapping.
1962 1960 * If text replication is not off no page locks could have been
1963 1961 * established via this mapping.
1964 1962 */
1965 1963 if (svd->tr_state == SEGVN_TR_OFF) {
1966 1964 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0);
1967 1965 }
1968 1966
1969 1967 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1970 1968 ASSERT(svd->amp == NULL);
1971 1969 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1972 1970 ASSERT(svd->type == MAP_PRIVATE);
1973 1971 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
1974 1972 HAT_REGION_TEXT);
1975 1973 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1976 1974 } else if (svd->tr_state == SEGVN_TR_ON) {
1977 1975 ASSERT(svd->amp != NULL);
1978 1976 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE));
1979 1977 segvn_textunrepl(seg, 1);
1980 1978 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
1981 1979 } else {
1982 1980 if (svd->tr_state != SEGVN_TR_OFF) {
1983 1981 ASSERT(svd->tr_state == SEGVN_TR_INIT);
1984 1982 svd->tr_state = SEGVN_TR_OFF;
1985 1983 }
1986 1984 /*
1987 1985 * Unload any hardware translations in the range to be taken
1988 1986 * out. Use a callback to invoke free_vp_pages() effectively.
1989 1987 */
1990 1988 if (svd->vp != NULL && free_pages != 0) {
1991 1989 callback.hcb_data = seg;
1992 1990 callback.hcb_function = segvn_hat_unload_callback;
1993 1991 cbp = &callback;
1994 1992 }
1995 1993 hat_unload_callback(seg->s_as->a_hat, addr, len,
1996 1994 HAT_UNLOAD_UNMAP, cbp);
1997 1995
1998 1996 if (svd->type == MAP_SHARED && svd->vp != NULL &&
1999 1997 (svd->vp->v_flag & VVMEXEC) &&
2000 1998 ((svd->prot & PROT_WRITE) || svd->pageprot)) {
2001 1999 segvn_inval_trcache(svd->vp);
2002 2000 }
2003 2001 }
2004 2002
2005 2003 /*
2006 2004 * Check for entire segment
2007 2005 */
2008 2006 if (addr == seg->s_base && len == seg->s_size) {
2009 2007 seg_free(seg);
2010 2008 return (0);
2011 2009 }
2012 2010
2013 2011 opages = seg_pages(seg);
2014 2012 dpages = btop(len);
2015 2013 npages = opages - dpages;
2016 2014 amp = svd->amp;
2017 2015 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc);
2018 2016
2019 2017 /*
2020 2018 * Check for beginning of segment
2021 2019 */
2022 2020 if (addr == seg->s_base) {
2023 2021 if (svd->vpage != NULL) {
2024 2022 size_t nbytes;
2025 2023 struct vpage *ovpage;
2026 2024
2027 2025 ovpage = svd->vpage; /* keep pointer to vpage */
2028 2026
2029 2027 nbytes = vpgtob(npages);
2030 2028 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2031 2029 bcopy(&ovpage[dpages], svd->vpage, nbytes);
2032 2030
2033 2031 /* free up old vpage */
2034 2032 kmem_free(ovpage, vpgtob(opages));
2035 2033 }
2036 2034 if (amp != NULL) {
2037 2035 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2038 2036 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2039 2037 /*
2040 2038 * Shared anon map is no longer in use. Before
2041 2039 * freeing its pages purge all entries from
2042 2040 * pcache that belong to this amp.
2043 2041 */
2044 2042 if (svd->type == MAP_SHARED) {
2045 2043 ASSERT(amp->refcnt == 1);
2046 2044 ASSERT(svd->softlockcnt == 0);
2047 2045 anonmap_purge(amp);
2048 2046 }
2049 2047 /*
2050 2048 * Free up now unused parts of anon_map array.
2051 2049 */
2052 2050 if (amp->a_szc == seg->s_szc) {
2053 2051 if (seg->s_szc != 0) {
2054 2052 anon_free_pages(amp->ahp,
2055 2053 svd->anon_index, len,
2056 2054 seg->s_szc);
2057 2055 } else {
2058 2056 anon_free(amp->ahp,
2059 2057 svd->anon_index,
2060 2058 len);
2061 2059 }
2062 2060 } else {
2063 2061 ASSERT(svd->type == MAP_SHARED);
2064 2062 ASSERT(amp->a_szc > seg->s_szc);
2065 2063 anon_shmap_free_pages(amp,
2066 2064 svd->anon_index, len);
2067 2065 }
2068 2066
2069 2067 /*
2070 2068 * Unreserve swap space for the
2071 2069 * unmapped chunk of this segment in
2072 2070 * case it's MAP_SHARED
2073 2071 */
2074 2072 if (svd->type == MAP_SHARED) {
2075 2073 anon_unresv_zone(len,
2076 2074 seg->s_as->a_proc->p_zone);
2077 2075 amp->swresv -= len;
2078 2076 }
2079 2077 }
2080 2078 ANON_LOCK_EXIT(&->a_rwlock);
2081 2079 svd->anon_index += dpages;
2082 2080 }
2083 2081 if (svd->vp != NULL)
2084 2082 svd->offset += len;
2085 2083
2086 2084 seg->s_base += len;
2087 2085 seg->s_size -= len;
2088 2086
2089 2087 if (svd->swresv) {
2090 2088 if (svd->flags & MAP_NORESERVE) {
2091 2089 ASSERT(amp);
2092 2090 oswresv = svd->swresv;
2093 2091
2094 2092 svd->swresv = ptob(anon_pages(amp->ahp,
2095 2093 svd->anon_index, npages));
2096 2094 anon_unresv_zone(oswresv - svd->swresv,
2097 2095 seg->s_as->a_proc->p_zone);
2098 2096 if (SEG_IS_PARTIAL_RESV(seg))
2099 2097 seg->s_as->a_resvsize -= oswresv -
2100 2098 svd->swresv;
2101 2099 } else {
2102 2100 size_t unlen;
2103 2101
2104 2102 if (svd->pageswap) {
2105 2103 oswresv = svd->swresv;
2106 2104 svd->swresv =
2107 2105 segvn_count_swap_by_vpages(seg);
2108 2106 ASSERT(oswresv >= svd->swresv);
2109 2107 unlen = oswresv - svd->swresv;
2110 2108 } else {
2111 2109 svd->swresv -= len;
2112 2110 ASSERT(svd->swresv == seg->s_size);
2113 2111 unlen = len;
2114 2112 }
2115 2113 anon_unresv_zone(unlen,
2116 2114 seg->s_as->a_proc->p_zone);
2117 2115 }
2118 2116 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2119 2117 seg, len, 0);
2120 2118 }
2121 2119
2122 2120 return (0);
2123 2121 }
2124 2122
2125 2123 /*
2126 2124 * Check for end of segment
2127 2125 */
2128 2126 if (addr + len == seg->s_base + seg->s_size) {
2129 2127 if (svd->vpage != NULL) {
2130 2128 size_t nbytes;
2131 2129 struct vpage *ovpage;
2132 2130
2133 2131 ovpage = svd->vpage; /* keep pointer to vpage */
2134 2132
2135 2133 nbytes = vpgtob(npages);
2136 2134 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2137 2135 bcopy(ovpage, svd->vpage, nbytes);
2138 2136
2139 2137 /* free up old vpage */
2140 2138 kmem_free(ovpage, vpgtob(opages));
2141 2139
2142 2140 }
2143 2141 if (amp != NULL) {
2144 2142 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2145 2143 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2146 2144 /*
2147 2145 * Free up now unused parts of anon_map array.
2148 2146 */
2149 2147 ulong_t an_idx = svd->anon_index + npages;
2150 2148
2151 2149 /*
2152 2150 * Shared anon map is no longer in use. Before
2153 2151 * freeing its pages purge all entries from
2154 2152 * pcache that belong to this amp.
2155 2153 */
2156 2154 if (svd->type == MAP_SHARED) {
2157 2155 ASSERT(amp->refcnt == 1);
2158 2156 ASSERT(svd->softlockcnt == 0);
2159 2157 anonmap_purge(amp);
2160 2158 }
2161 2159
2162 2160 if (amp->a_szc == seg->s_szc) {
2163 2161 if (seg->s_szc != 0) {
2164 2162 anon_free_pages(amp->ahp,
2165 2163 an_idx, len,
2166 2164 seg->s_szc);
2167 2165 } else {
2168 2166 anon_free(amp->ahp, an_idx,
2169 2167 len);
2170 2168 }
2171 2169 } else {
2172 2170 ASSERT(svd->type == MAP_SHARED);
2173 2171 ASSERT(amp->a_szc > seg->s_szc);
2174 2172 anon_shmap_free_pages(amp,
2175 2173 an_idx, len);
2176 2174 }
2177 2175
2178 2176 /*
2179 2177 * Unreserve swap space for the
2180 2178 * unmapped chunk of this segment in
2181 2179 * case it's MAP_SHARED
2182 2180 */
2183 2181 if (svd->type == MAP_SHARED) {
2184 2182 anon_unresv_zone(len,
2185 2183 seg->s_as->a_proc->p_zone);
2186 2184 amp->swresv -= len;
2187 2185 }
2188 2186 }
2189 2187 ANON_LOCK_EXIT(&->a_rwlock);
2190 2188 }
2191 2189
2192 2190 seg->s_size -= len;
2193 2191
2194 2192 if (svd->swresv) {
2195 2193 if (svd->flags & MAP_NORESERVE) {
2196 2194 ASSERT(amp);
2197 2195 oswresv = svd->swresv;
2198 2196 svd->swresv = ptob(anon_pages(amp->ahp,
2199 2197 svd->anon_index, npages));
2200 2198 anon_unresv_zone(oswresv - svd->swresv,
2201 2199 seg->s_as->a_proc->p_zone);
2202 2200 if (SEG_IS_PARTIAL_RESV(seg))
2203 2201 seg->s_as->a_resvsize -= oswresv -
2204 2202 svd->swresv;
2205 2203 } else {
2206 2204 size_t unlen;
2207 2205
2208 2206 if (svd->pageswap) {
2209 2207 oswresv = svd->swresv;
2210 2208 svd->swresv =
2211 2209 segvn_count_swap_by_vpages(seg);
2212 2210 ASSERT(oswresv >= svd->swresv);
2213 2211 unlen = oswresv - svd->swresv;
2214 2212 } else {
2215 2213 svd->swresv -= len;
2216 2214 ASSERT(svd->swresv == seg->s_size);
2217 2215 unlen = len;
2218 2216 }
2219 2217 anon_unresv_zone(unlen,
2220 2218 seg->s_as->a_proc->p_zone);
2221 2219 }
2222 2220 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2223 2221 "anon proc:%p %lu %u", seg, len, 0);
2224 2222 }
2225 2223
2226 2224 return (0);
2227 2225 }
2228 2226
2229 2227 /*
2230 2228 * The section to go is in the middle of the segment,
2231 2229 * have to make it into two segments. nseg is made for
2232 2230 * the high end while seg is cut down at the low end.
2233 2231 */
2234 2232 nbase = addr + len; /* new seg base */
2235 2233 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
2236 2234 seg->s_size = addr - seg->s_base; /* shrink old seg */
2237 2235 nseg = seg_alloc(seg->s_as, nbase, nsize);
2238 2236 if (nseg == NULL) {
2239 2237 panic("segvn_unmap seg_alloc");
2240 2238 /*NOTREACHED*/
2241 2239 }
2242 2240 nseg->s_ops = seg->s_ops;
2243 2241 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
2244 2242 nseg->s_data = (void *)nsvd;
2245 2243 nseg->s_szc = seg->s_szc;
2246 2244 *nsvd = *svd;
2247 2245 nsvd->seg = nseg;
2248 2246 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base);
2249 2247 nsvd->swresv = 0;
2250 2248 nsvd->softlockcnt = 0;
2251 2249 nsvd->softlockcnt_sbase = 0;
2252 2250 nsvd->softlockcnt_send = 0;
2253 2251 nsvd->svn_inz = svd->svn_inz;
2254 2252 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
2255 2253
2256 2254 if (svd->vp != NULL) {
2257 2255 VN_HOLD(nsvd->vp);
2258 2256 if (nsvd->type == MAP_SHARED)
2259 2257 lgrp_shm_policy_init(NULL, nsvd->vp);
2260 2258 }
2261 2259 crhold(svd->cred);
2262 2260
2263 2261 if (svd->vpage == NULL) {
2264 2262 nsvd->vpage = NULL;
2265 2263 } else {
2266 2264 /* need to split vpage into two arrays */
2267 2265 size_t nbytes;
2268 2266 struct vpage *ovpage;
2269 2267
2270 2268 ovpage = svd->vpage; /* keep pointer to vpage */
2271 2269
2272 2270 npages = seg_pages(seg); /* seg has shrunk */
2273 2271 nbytes = vpgtob(npages);
2274 2272 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2275 2273
2276 2274 bcopy(ovpage, svd->vpage, nbytes);
2277 2275
2278 2276 npages = seg_pages(nseg);
2279 2277 nbytes = vpgtob(npages);
2280 2278 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2281 2279
2282 2280 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes);
2283 2281
2284 2282 /* free up old vpage */
2285 2283 kmem_free(ovpage, vpgtob(opages));
2286 2284 }
2287 2285
2288 2286 if (amp == NULL) {
2289 2287 nsvd->amp = NULL;
2290 2288 nsvd->anon_index = 0;
2291 2289 } else {
2292 2290 /*
2293 2291 * Need to create a new anon map for the new segment.
2294 2292 * We'll also allocate a new smaller array for the old
2295 2293 * smaller segment to save space.
2296 2294 */
2297 2295 opages = btop((uintptr_t)(addr - seg->s_base));
2298 2296 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2299 2297 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2300 2298 /*
2301 2299 * Free up now unused parts of anon_map array.
2302 2300 */
2303 2301 ulong_t an_idx = svd->anon_index + opages;
2304 2302
2305 2303 /*
2306 2304 * Shared anon map is no longer in use. Before
2307 2305 * freeing its pages purge all entries from
2308 2306 * pcache that belong to this amp.
2309 2307 */
2310 2308 if (svd->type == MAP_SHARED) {
2311 2309 ASSERT(amp->refcnt == 1);
2312 2310 ASSERT(svd->softlockcnt == 0);
2313 2311 anonmap_purge(amp);
2314 2312 }
2315 2313
2316 2314 if (amp->a_szc == seg->s_szc) {
2317 2315 if (seg->s_szc != 0) {
2318 2316 anon_free_pages(amp->ahp, an_idx, len,
2319 2317 seg->s_szc);
2320 2318 } else {
2321 2319 anon_free(amp->ahp, an_idx,
2322 2320 len);
2323 2321 }
2324 2322 } else {
2325 2323 ASSERT(svd->type == MAP_SHARED);
2326 2324 ASSERT(amp->a_szc > seg->s_szc);
2327 2325 anon_shmap_free_pages(amp, an_idx, len);
2328 2326 }
2329 2327
2330 2328 /*
2331 2329 * Unreserve swap space for the
2332 2330 * unmapped chunk of this segment in
2333 2331 * case it's MAP_SHARED
2334 2332 */
2335 2333 if (svd->type == MAP_SHARED) {
2336 2334 anon_unresv_zone(len,
2337 2335 seg->s_as->a_proc->p_zone);
2338 2336 amp->swresv -= len;
2339 2337 }
2340 2338 }
2341 2339 nsvd->anon_index = svd->anon_index +
2342 2340 btop((uintptr_t)(nseg->s_base - seg->s_base));
2343 2341 if (svd->type == MAP_SHARED) {
2344 2342 amp->refcnt++;
2345 2343 nsvd->amp = amp;
2346 2344 } else {
2347 2345 struct anon_map *namp;
2348 2346 struct anon_hdr *nahp;
2349 2347
2350 2348 ASSERT(svd->type == MAP_PRIVATE);
2351 2349 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
2352 2350 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
2353 2351 namp->a_szc = seg->s_szc;
2354 2352 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp,
2355 2353 0, btop(seg->s_size), ANON_SLEEP);
2356 2354 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index,
2357 2355 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
2358 2356 anon_release(amp->ahp, btop(amp->size));
2359 2357 svd->anon_index = 0;
2360 2358 nsvd->anon_index = 0;
2361 2359 amp->ahp = nahp;
2362 2360 amp->size = seg->s_size;
2363 2361 nsvd->amp = namp;
2364 2362 }
2365 2363 ANON_LOCK_EXIT(&->a_rwlock);
2366 2364 }
2367 2365 if (svd->swresv) {
2368 2366 if (svd->flags & MAP_NORESERVE) {
2369 2367 ASSERT(amp);
2370 2368 oswresv = svd->swresv;
2371 2369 svd->swresv = ptob(anon_pages(amp->ahp,
2372 2370 svd->anon_index, btop(seg->s_size)));
2373 2371 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
2374 2372 nsvd->anon_index, btop(nseg->s_size)));
2375 2373 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2376 2374 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv),
2377 2375 seg->s_as->a_proc->p_zone);
2378 2376 if (SEG_IS_PARTIAL_RESV(seg))
2379 2377 seg->s_as->a_resvsize -= oswresv -
2380 2378 (svd->swresv + nsvd->swresv);
2381 2379 } else {
2382 2380 size_t unlen;
2383 2381
2384 2382 if (svd->pageswap) {
2385 2383 oswresv = svd->swresv;
2386 2384 svd->swresv = segvn_count_swap_by_vpages(seg);
2387 2385 nsvd->swresv = segvn_count_swap_by_vpages(nseg);
2388 2386 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2389 2387 unlen = oswresv - (svd->swresv + nsvd->swresv);
2390 2388 } else {
2391 2389 if (seg->s_size + nseg->s_size + len !=
2392 2390 svd->swresv) {
2393 2391 panic("segvn_unmap: cannot split "
2394 2392 "swap reservation");
2395 2393 /*NOTREACHED*/
2396 2394 }
2397 2395 svd->swresv = seg->s_size;
2398 2396 nsvd->swresv = nseg->s_size;
2399 2397 unlen = len;
2400 2398 }
2401 2399 anon_unresv_zone(unlen,
2402 2400 seg->s_as->a_proc->p_zone);
2403 2401 }
2404 2402 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2405 2403 seg, len, 0);
2406 2404 }
2407 2405
2408 2406 return (0); /* I'm glad that's all over with! */
2409 2407 }
2410 2408
2411 2409 static void
2412 2410 segvn_free(struct seg *seg)
2413 2411 {
2414 2412 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2415 2413 pgcnt_t npages = seg_pages(seg);
2416 2414 struct anon_map *amp;
2417 2415 size_t len;
2418 2416
2419 2417 /*
2420 2418 * We don't need any segment level locks for "segvn" data
2421 2419 * since the address space is "write" locked.
2422 2420 */
2423 2421 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2424 2422 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2425 2423
2426 2424 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2427 2425
2428 2426 /*
2429 2427 * Be sure to unlock pages. XXX Why do things get free'ed instead
2430 2428 * of unmapped? XXX
2431 2429 */
2432 2430 (void) segvn_lockop(seg, seg->s_base, seg->s_size,
2433 2431 0, MC_UNLOCK, NULL, 0);
2434 2432
2435 2433 /*
2436 2434 * Deallocate the vpage and anon pointers if necessary and possible.
2437 2435 */
2438 2436 if (svd->vpage != NULL) {
2439 2437 kmem_free(svd->vpage, vpgtob(npages));
2440 2438 svd->vpage = NULL;
2441 2439 }
2442 2440 if ((amp = svd->amp) != NULL) {
2443 2441 /*
2444 2442 * If there are no more references to this anon_map
2445 2443 * structure, then deallocate the structure after freeing
2446 2444 * up all the anon slot pointers that we can.
2447 2445 */
2448 2446 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2449 2447 ASSERT(amp->a_szc >= seg->s_szc);
2450 2448 if (--amp->refcnt == 0) {
2451 2449 if (svd->type == MAP_PRIVATE) {
2452 2450 /*
2453 2451 * Private - we only need to anon_free
2454 2452 * the part that this segment refers to.
2455 2453 */
2456 2454 if (seg->s_szc != 0) {
2457 2455 anon_free_pages(amp->ahp,
2458 2456 svd->anon_index, seg->s_size,
2459 2457 seg->s_szc);
2460 2458 } else {
2461 2459 anon_free(amp->ahp, svd->anon_index,
2462 2460 seg->s_size);
2463 2461 }
2464 2462 } else {
2465 2463
2466 2464 /*
2467 2465 * Shared anon map is no longer in use. Before
2468 2466 * freeing its pages purge all entries from
2469 2467 * pcache that belong to this amp.
2470 2468 */
2471 2469 ASSERT(svd->softlockcnt == 0);
2472 2470 anonmap_purge(amp);
2473 2471
2474 2472 /*
2475 2473 * Shared - anon_free the entire
2476 2474 * anon_map's worth of stuff and
2477 2475 * release any swap reservation.
2478 2476 */
2479 2477 if (amp->a_szc != 0) {
2480 2478 anon_shmap_free_pages(amp, 0,
2481 2479 amp->size);
2482 2480 } else {
2483 2481 anon_free(amp->ahp, 0, amp->size);
2484 2482 }
2485 2483 if ((len = amp->swresv) != 0) {
2486 2484 anon_unresv_zone(len,
2487 2485 seg->s_as->a_proc->p_zone);
2488 2486 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2489 2487 "anon proc:%p %lu %u", seg, len, 0);
2490 2488 }
2491 2489 }
2492 2490 svd->amp = NULL;
2493 2491 ANON_LOCK_EXIT(&->a_rwlock);
2494 2492 anonmap_free(amp);
2495 2493 } else if (svd->type == MAP_PRIVATE) {
2496 2494 /*
2497 2495 * We had a private mapping which still has
2498 2496 * a held anon_map so just free up all the
2499 2497 * anon slot pointers that we were using.
2500 2498 */
2501 2499 if (seg->s_szc != 0) {
2502 2500 anon_free_pages(amp->ahp, svd->anon_index,
2503 2501 seg->s_size, seg->s_szc);
2504 2502 } else {
2505 2503 anon_free(amp->ahp, svd->anon_index,
2506 2504 seg->s_size);
2507 2505 }
2508 2506 ANON_LOCK_EXIT(&->a_rwlock);
2509 2507 } else {
2510 2508 ANON_LOCK_EXIT(&->a_rwlock);
2511 2509 }
2512 2510 }
2513 2511
2514 2512 /*
2515 2513 * Release swap reservation.
2516 2514 */
2517 2515 if ((len = svd->swresv) != 0) {
2518 2516 anon_unresv_zone(svd->swresv,
2519 2517 seg->s_as->a_proc->p_zone);
2520 2518 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2521 2519 seg, len, 0);
2522 2520 if (SEG_IS_PARTIAL_RESV(seg))
2523 2521 seg->s_as->a_resvsize -= svd->swresv;
2524 2522 svd->swresv = 0;
2525 2523 }
2526 2524 /*
2527 2525 * Release claim on vnode, credentials, and finally free the
2528 2526 * private data.
2529 2527 */
2530 2528 if (svd->vp != NULL) {
2531 2529 if (svd->type == MAP_SHARED)
2532 2530 lgrp_shm_policy_fini(NULL, svd->vp);
2533 2531 VN_RELE(svd->vp);
2534 2532 svd->vp = NULL;
2535 2533 }
2536 2534 crfree(svd->cred);
2537 2535 svd->pageprot = 0;
2538 2536 svd->pageadvice = 0;
2539 2537 svd->pageswap = 0;
2540 2538 svd->cred = NULL;
2541 2539
2542 2540 /*
2543 2541 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2544 2542 * still working with this segment without holding as lock (in case
2545 2543 * it's called by pcache async thread).
2546 2544 */
2547 2545 ASSERT(svd->softlockcnt == 0);
2548 2546 mutex_enter(&svd->segfree_syncmtx);
2549 2547 mutex_exit(&svd->segfree_syncmtx);
2550 2548
2551 2549 seg->s_data = NULL;
2552 2550 kmem_cache_free(segvn_cache, svd);
2553 2551 }
2554 2552
2555 2553 /*
2556 2554 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2557 2555 * already been F_SOFTLOCK'ed.
2558 2556 * Caller must always match addr and len of a softunlock with a previous
2559 2557 * softlock with exactly the same addr and len.
2560 2558 */
2561 2559 static void
2562 2560 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
2563 2561 {
2564 2562 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2565 2563 page_t *pp;
2566 2564 caddr_t adr;
2567 2565 struct vnode *vp;
2568 2566 u_offset_t offset;
2569 2567 ulong_t anon_index;
2570 2568 struct anon_map *amp;
2571 2569 struct anon *ap = NULL;
2572 2570
2573 2571 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2574 2572 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2575 2573
2576 2574 if ((amp = svd->amp) != NULL)
2577 2575 anon_index = svd->anon_index + seg_page(seg, addr);
2578 2576
2579 2577 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2580 2578 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2581 2579 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2582 2580 } else {
2583 2581 hat_unlock(seg->s_as->a_hat, addr, len);
2584 2582 }
2585 2583 for (adr = addr; adr < addr + len; adr += PAGESIZE) {
2586 2584 if (amp != NULL) {
2587 2585 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2588 2586 if ((ap = anon_get_ptr(amp->ahp, anon_index++))
2589 2587 != NULL) {
2590 2588 swap_xlate(ap, &vp, &offset);
2591 2589 } else {
2592 2590 vp = svd->vp;
2593 2591 offset = svd->offset +
2594 2592 (uintptr_t)(adr - seg->s_base);
2595 2593 }
2596 2594 ANON_LOCK_EXIT(&->a_rwlock);
2597 2595 } else {
2598 2596 vp = svd->vp;
2599 2597 offset = svd->offset +
2600 2598 (uintptr_t)(adr - seg->s_base);
2601 2599 }
2602 2600
2603 2601 /*
2604 2602 * Use page_find() instead of page_lookup() to
2605 2603 * find the page since we know that it is locked.
2606 2604 */
2607 2605 pp = page_find(vp, offset);
2608 2606 if (pp == NULL) {
2609 2607 panic(
2610 2608 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2611 2609 (void *)adr, (void *)ap, (void *)vp, offset);
2612 2610 /*NOTREACHED*/
2613 2611 }
2614 2612
2615 2613 if (rw == S_WRITE) {
2616 2614 hat_setrefmod(pp);
2617 2615 if (seg->s_as->a_vbits)
2618 2616 hat_setstat(seg->s_as, adr, PAGESIZE,
2619 2617 P_REF | P_MOD);
2620 2618 } else if (rw != S_OTHER) {
2621 2619 hat_setref(pp);
2622 2620 if (seg->s_as->a_vbits)
2623 2621 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF);
2624 2622 }
2625 2623 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2626 2624 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset);
2627 2625 page_unlock(pp);
2628 2626 }
2629 2627 ASSERT(svd->softlockcnt >= btop(len));
2630 2628 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) {
2631 2629 /*
2632 2630 * All SOFTLOCKS are gone. Wakeup any waiting
2633 2631 * unmappers so they can try again to unmap.
2634 2632 * Check for waiters first without the mutex
2635 2633 * held so we don't always grab the mutex on
2636 2634 * softunlocks.
2637 2635 */
2638 2636 if (AS_ISUNMAPWAIT(seg->s_as)) {
2639 2637 mutex_enter(&seg->s_as->a_contents);
2640 2638 if (AS_ISUNMAPWAIT(seg->s_as)) {
2641 2639 AS_CLRUNMAPWAIT(seg->s_as);
2642 2640 cv_broadcast(&seg->s_as->a_cv);
2643 2641 }
2644 2642 mutex_exit(&seg->s_as->a_contents);
2645 2643 }
2646 2644 }
2647 2645 }
2648 2646
2649 2647 #define PAGE_HANDLED ((page_t *)-1)
2650 2648
2651 2649 /*
2652 2650 * Release all the pages in the NULL terminated ppp list
2653 2651 * which haven't already been converted to PAGE_HANDLED.
2654 2652 */
2655 2653 static void
2656 2654 segvn_pagelist_rele(page_t **ppp)
2657 2655 {
2658 2656 for (; *ppp != NULL; ppp++) {
2659 2657 if (*ppp != PAGE_HANDLED)
2660 2658 page_unlock(*ppp);
2661 2659 }
2662 2660 }
2663 2661
2664 2662 static int stealcow = 1;
2665 2663
2666 2664 /*
2667 2665 * Workaround for viking chip bug. See bug id 1220902.
2668 2666 * To fix this down in pagefault() would require importing so
2669 2667 * much as and segvn code as to be unmaintainable.
2670 2668 */
2671 2669 int enable_mbit_wa = 0;
2672 2670
2673 2671 /*
2674 2672 * Handles all the dirty work of getting the right
2675 2673 * anonymous pages and loading up the translations.
2676 2674 * This routine is called only from segvn_fault()
2677 2675 * when looping over the range of addresses requested.
2678 2676 *
2679 2677 * The basic algorithm here is:
2680 2678 * If this is an anon_zero case
2681 2679 * Call anon_zero to allocate page
2682 2680 * Load up translation
2683 2681 * Return
2684 2682 * endif
2685 2683 * If this is an anon page
2686 2684 * Use anon_getpage to get the page
2687 2685 * else
2688 2686 * Find page in pl[] list passed in
2689 2687 * endif
2690 2688 * If not a cow
2691 2689 * Load up the translation to the page
2692 2690 * return
2693 2691 * endif
2694 2692 * Call anon_private to handle cow
2695 2693 * Load up (writable) translation to new page
2696 2694 */
2697 2695 static faultcode_t
2698 2696 segvn_faultpage(
2699 2697 struct hat *hat, /* the hat to use for mapping */
2700 2698 struct seg *seg, /* seg_vn of interest */
2701 2699 caddr_t addr, /* address in as */
2702 2700 u_offset_t off, /* offset in vp */
2703 2701 struct vpage *vpage, /* pointer to vpage for vp, off */
2704 2702 page_t *pl[], /* object source page pointer */
2705 2703 uint_t vpprot, /* access allowed to object pages */
2706 2704 enum fault_type type, /* type of fault */
2707 2705 enum seg_rw rw, /* type of access at fault */
2708 2706 int brkcow) /* we may need to break cow */
2709 2707 {
2710 2708 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2711 2709 page_t *pp, **ppp;
2712 2710 uint_t pageflags = 0;
2713 2711 page_t *anon_pl[1 + 1];
2714 2712 page_t *opp = NULL; /* original page */
2715 2713 uint_t prot;
2716 2714 int err;
2717 2715 int cow;
2718 2716 int claim;
2719 2717 int steal = 0;
2720 2718 ulong_t anon_index;
2721 2719 struct anon *ap, *oldap;
2722 2720 struct anon_map *amp;
2723 2721 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
2724 2722 int anon_lock = 0;
2725 2723 anon_sync_obj_t cookie;
2726 2724
2727 2725 if (svd->flags & MAP_TEXT) {
2728 2726 hat_flag |= HAT_LOAD_TEXT;
2729 2727 }
2730 2728
2731 2729 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
2732 2730 ASSERT(seg->s_szc == 0);
2733 2731 ASSERT(svd->tr_state != SEGVN_TR_INIT);
2734 2732
2735 2733 /*
2736 2734 * Initialize protection value for this page.
2737 2735 * If we have per page protection values check it now.
2738 2736 */
2739 2737 if (svd->pageprot) {
2740 2738 uint_t protchk;
2741 2739
2742 2740 switch (rw) {
2743 2741 case S_READ:
2744 2742 protchk = PROT_READ;
2745 2743 break;
2746 2744 case S_WRITE:
2747 2745 protchk = PROT_WRITE;
2748 2746 break;
2749 2747 case S_EXEC:
2750 2748 protchk = PROT_EXEC;
2751 2749 break;
2752 2750 case S_OTHER:
2753 2751 default:
2754 2752 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2755 2753 break;
2756 2754 }
2757 2755
2758 2756 prot = VPP_PROT(vpage);
2759 2757 if ((prot & protchk) == 0)
2760 2758 return (FC_PROT); /* illegal access type */
2761 2759 } else {
2762 2760 prot = svd->prot;
2763 2761 }
2764 2762
2765 2763 if (type == F_SOFTLOCK) {
2766 2764 atomic_inc_ulong((ulong_t *)&svd->softlockcnt);
2767 2765 }
2768 2766
2769 2767 /*
2770 2768 * Always acquire the anon array lock to prevent 2 threads from
2771 2769 * allocating separate anon slots for the same "addr".
2772 2770 */
2773 2771
2774 2772 if ((amp = svd->amp) != NULL) {
2775 2773 ASSERT(RW_READ_HELD(&->a_rwlock));
2776 2774 anon_index = svd->anon_index + seg_page(seg, addr);
2777 2775 anon_array_enter(amp, anon_index, &cookie);
2778 2776 anon_lock = 1;
2779 2777 }
2780 2778
2781 2779 if (svd->vp == NULL && amp != NULL) {
2782 2780 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2783 2781 /*
2784 2782 * Allocate a (normally) writable anonymous page of
2785 2783 * zeroes. If no advance reservations, reserve now.
2786 2784 */
2787 2785 if (svd->flags & MAP_NORESERVE) {
2788 2786 if (anon_resv_zone(ptob(1),
2789 2787 seg->s_as->a_proc->p_zone)) {
2790 2788 atomic_add_long(&svd->swresv, ptob(1));
2791 2789 atomic_add_long(&seg->s_as->a_resvsize,
2792 2790 ptob(1));
2793 2791 } else {
2794 2792 err = ENOMEM;
2795 2793 goto out;
2796 2794 }
2797 2795 }
2798 2796 if ((pp = anon_zero(seg, addr, &ap,
2799 2797 svd->cred)) == NULL) {
2800 2798 err = ENOMEM;
2801 2799 goto out; /* out of swap space */
2802 2800 }
2803 2801 /*
2804 2802 * Re-acquire the anon_map lock and
2805 2803 * initialize the anon array entry.
2806 2804 */
2807 2805 (void) anon_set_ptr(amp->ahp, anon_index, ap,
2808 2806 ANON_SLEEP);
2809 2807
2810 2808 ASSERT(pp->p_szc == 0);
2811 2809
2812 2810 /*
2813 2811 * Handle pages that have been marked for migration
2814 2812 */
2815 2813 if (lgrp_optimizations())
2816 2814 page_migrate(seg, addr, &pp, 1);
2817 2815
2818 2816 if (enable_mbit_wa) {
2819 2817 if (rw == S_WRITE)
2820 2818 hat_setmod(pp);
2821 2819 else if (!hat_ismod(pp))
2822 2820 prot &= ~PROT_WRITE;
2823 2821 }
2824 2822 /*
2825 2823 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2826 2824 * with MC_LOCKAS, MCL_FUTURE) and this is a
2827 2825 * MAP_NORESERVE segment, we may need to
2828 2826 * permanently lock the page as it is being faulted
2829 2827 * for the first time. The following text applies
2830 2828 * only to MAP_NORESERVE segments:
2831 2829 *
2832 2830 * As per memcntl(2), if this segment was created
2833 2831 * after MCL_FUTURE was applied (a "future"
2834 2832 * segment), its pages must be locked. If this
2835 2833 * segment existed at MCL_FUTURE application (a
2836 2834 * "past" segment), the interface is unclear.
2837 2835 *
2838 2836 * We decide to lock only if vpage is present:
2839 2837 *
2840 2838 * - "future" segments will have a vpage array (see
2841 2839 * as_map), and so will be locked as required
2842 2840 *
2843 2841 * - "past" segments may not have a vpage array,
2844 2842 * depending on whether events (such as
2845 2843 * mprotect) have occurred. Locking if vpage
2846 2844 * exists will preserve legacy behavior. Not
2847 2845 * locking if vpage is absent, will not break
2848 2846 * the interface or legacy behavior. Note that
2849 2847 * allocating vpage here if it's absent requires
2850 2848 * upgrading the segvn reader lock, the cost of
2851 2849 * which does not seem worthwhile.
2852 2850 *
2853 2851 * Usually testing and setting VPP_ISPPLOCK and
2854 2852 * VPP_SETPPLOCK requires holding the segvn lock as
2855 2853 * writer, but in this case all readers are
2856 2854 * serializing on the anon array lock.
2857 2855 */
2858 2856 if (AS_ISPGLCK(seg->s_as) && vpage != NULL &&
2859 2857 (svd->flags & MAP_NORESERVE) &&
2860 2858 !VPP_ISPPLOCK(vpage)) {
2861 2859 proc_t *p = seg->s_as->a_proc;
2862 2860 ASSERT(svd->type == MAP_PRIVATE);
2863 2861 mutex_enter(&p->p_lock);
2864 2862 if (rctl_incr_locked_mem(p, NULL, PAGESIZE,
2865 2863 1) == 0) {
2866 2864 claim = VPP_PROT(vpage) & PROT_WRITE;
2867 2865 if (page_pp_lock(pp, claim, 0)) {
2868 2866 VPP_SETPPLOCK(vpage);
2869 2867 } else {
2870 2868 rctl_decr_locked_mem(p, NULL,
2871 2869 PAGESIZE, 1);
2872 2870 }
2873 2871 }
2874 2872 mutex_exit(&p->p_lock);
2875 2873 }
2876 2874
2877 2875 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2878 2876 hat_memload(hat, addr, pp, prot, hat_flag);
2879 2877
2880 2878 if (!(hat_flag & HAT_LOAD_LOCK))
2881 2879 page_unlock(pp);
2882 2880
2883 2881 anon_array_exit(&cookie);
2884 2882 return (0);
2885 2883 }
2886 2884 }
2887 2885
2888 2886 /*
2889 2887 * Obtain the page structure via anon_getpage() if it is
2890 2888 * a private copy of an object (the result of a previous
2891 2889 * copy-on-write).
2892 2890 */
2893 2891 if (amp != NULL) {
2894 2892 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) {
2895 2893 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE,
2896 2894 seg, addr, rw, svd->cred);
2897 2895 if (err)
2898 2896 goto out;
2899 2897
2900 2898 if (svd->type == MAP_SHARED) {
2901 2899 /*
2902 2900 * If this is a shared mapping to an
2903 2901 * anon_map, then ignore the write
2904 2902 * permissions returned by anon_getpage().
2905 2903 * They apply to the private mappings
2906 2904 * of this anon_map.
2907 2905 */
2908 2906 vpprot |= PROT_WRITE;
2909 2907 }
2910 2908 opp = anon_pl[0];
2911 2909 }
2912 2910 }
2913 2911
2914 2912 /*
2915 2913 * Search the pl[] list passed in if it is from the
2916 2914 * original object (i.e., not a private copy).
2917 2915 */
2918 2916 if (opp == NULL) {
2919 2917 /*
2920 2918 * Find original page. We must be bringing it in
2921 2919 * from the list in pl[].
2922 2920 */
2923 2921 for (ppp = pl; (opp = *ppp) != NULL; ppp++) {
2924 2922 if (opp == PAGE_HANDLED)
2925 2923 continue;
2926 2924 ASSERT(opp->p_vnode == svd->vp); /* XXX */
2927 2925 if (opp->p_offset == off)
2928 2926 break;
2929 2927 }
2930 2928 if (opp == NULL) {
2931 2929 panic("segvn_faultpage not found");
2932 2930 /*NOTREACHED*/
2933 2931 }
2934 2932 *ppp = PAGE_HANDLED;
2935 2933
2936 2934 }
2937 2935
2938 2936 ASSERT(PAGE_LOCKED(opp));
2939 2937
2940 2938 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2941 2939 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0);
2942 2940
2943 2941 /*
2944 2942 * The fault is treated as a copy-on-write fault if a
2945 2943 * write occurs on a private segment and the object
2946 2944 * page (i.e., mapping) is write protected. We assume
2947 2945 * that fatal protection checks have already been made.
2948 2946 */
2949 2947
2950 2948 if (brkcow) {
2951 2949 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2952 2950 cow = !(vpprot & PROT_WRITE);
2953 2951 } else if (svd->tr_state == SEGVN_TR_ON) {
2954 2952 /*
2955 2953 * If we are doing text replication COW on first touch.
2956 2954 */
2957 2955 ASSERT(amp != NULL);
2958 2956 ASSERT(svd->vp != NULL);
2959 2957 ASSERT(rw != S_WRITE);
2960 2958 cow = (ap == NULL);
2961 2959 } else {
2962 2960 cow = 0;
2963 2961 }
2964 2962
2965 2963 /*
2966 2964 * If not a copy-on-write case load the translation
2967 2965 * and return.
2968 2966 */
2969 2967 if (cow == 0) {
2970 2968
2971 2969 /*
2972 2970 * Handle pages that have been marked for migration
2973 2971 */
2974 2972 if (lgrp_optimizations())
2975 2973 page_migrate(seg, addr, &opp, 1);
2976 2974
2977 2975 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) {
2978 2976 if (rw == S_WRITE)
2979 2977 hat_setmod(opp);
2980 2978 else if (rw != S_OTHER && !hat_ismod(opp))
2981 2979 prot &= ~PROT_WRITE;
2982 2980 }
2983 2981
2984 2982 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
2985 2983 (!svd->pageprot && svd->prot == (prot & vpprot)));
2986 2984 ASSERT(amp == NULL ||
2987 2985 svd->rcookie == HAT_INVALID_REGION_COOKIE);
2988 2986 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag,
2989 2987 svd->rcookie);
2990 2988
2991 2989 if (!(hat_flag & HAT_LOAD_LOCK))
2992 2990 page_unlock(opp);
2993 2991
2994 2992 if (anon_lock) {
2995 2993 anon_array_exit(&cookie);
2996 2994 }
2997 2995 return (0);
2998 2996 }
2999 2997
3000 2998 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3001 2999
3002 3000 hat_setref(opp);
3003 3001
3004 3002 ASSERT(amp != NULL && anon_lock);
3005 3003
3006 3004 /*
3007 3005 * Steal the page only if it isn't a private page
3008 3006 * since stealing a private page is not worth the effort.
3009 3007 */
3010 3008 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL)
3011 3009 steal = 1;
3012 3010
3013 3011 /*
3014 3012 * Steal the original page if the following conditions are true:
3015 3013 *
3016 3014 * We are low on memory, the page is not private, page is not large,
3017 3015 * not shared, not modified, not `locked' or if we have it `locked'
3018 3016 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
3019 3017 * that the page is not shared) and if it doesn't have any
3020 3018 * translations. page_struct_lock isn't needed to look at p_cowcnt
3021 3019 * and p_lckcnt because we first get exclusive lock on page.
3022 3020 */
3023 3021 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD);
3024 3022
3025 3023 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 &&
3026 3024 page_tryupgrade(opp) && !hat_ismod(opp) &&
3027 3025 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) ||
3028 3026 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 &&
3029 3027 vpage != NULL && VPP_ISPPLOCK(vpage)))) {
3030 3028 /*
3031 3029 * Check if this page has other translations
3032 3030 * after unloading our translation.
3033 3031 */
3034 3032 if (hat_page_is_mapped(opp)) {
3035 3033 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3036 3034 hat_unload(seg->s_as->a_hat, addr, PAGESIZE,
3037 3035 HAT_UNLOAD);
3038 3036 }
3039 3037
3040 3038 /*
3041 3039 * hat_unload() might sync back someone else's recent
3042 3040 * modification, so check again.
3043 3041 */
3044 3042 if (!hat_ismod(opp) && !hat_page_is_mapped(opp))
3045 3043 pageflags |= STEAL_PAGE;
3046 3044 }
3047 3045
3048 3046 /*
3049 3047 * If we have a vpage pointer, see if it indicates that we have
3050 3048 * ``locked'' the page we map -- if so, tell anon_private to
3051 3049 * transfer the locking resource to the new page.
3052 3050 *
3053 3051 * See Statement at the beginning of segvn_lockop regarding
3054 3052 * the way lockcnts/cowcnts are handled during COW.
3055 3053 *
3056 3054 */
3057 3055 if (vpage != NULL && VPP_ISPPLOCK(vpage))
3058 3056 pageflags |= LOCK_PAGE;
3059 3057
3060 3058 /*
3061 3059 * Allocate a private page and perform the copy.
3062 3060 * For MAP_NORESERVE reserve swap space now, unless this
3063 3061 * is a cow fault on an existing anon page in which case
3064 3062 * MAP_NORESERVE will have made advance reservations.
3065 3063 */
3066 3064 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) {
3067 3065 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) {
3068 3066 atomic_add_long(&svd->swresv, ptob(1));
3069 3067 atomic_add_long(&seg->s_as->a_resvsize, ptob(1));
3070 3068 } else {
3071 3069 page_unlock(opp);
3072 3070 err = ENOMEM;
3073 3071 goto out;
3074 3072 }
3075 3073 }
3076 3074 oldap = ap;
3077 3075 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3078 3076 if (pp == NULL) {
3079 3077 err = ENOMEM; /* out of swap space */
3080 3078 goto out;
3081 3079 }
3082 3080
3083 3081 /*
3084 3082 * If we copied away from an anonymous page, then
3085 3083 * we are one step closer to freeing up an anon slot.
3086 3084 *
3087 3085 * NOTE: The original anon slot must be released while
3088 3086 * holding the "anon_map" lock. This is necessary to prevent
3089 3087 * other threads from obtaining a pointer to the anon slot
3090 3088 * which may be freed if its "refcnt" is 1.
3091 3089 */
3092 3090 if (oldap != NULL)
3093 3091 anon_decref(oldap);
3094 3092
3095 3093 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3096 3094
3097 3095 /*
3098 3096 * Handle pages that have been marked for migration
3099 3097 */
3100 3098 if (lgrp_optimizations())
3101 3099 page_migrate(seg, addr, &pp, 1);
3102 3100
3103 3101 ASSERT(pp->p_szc == 0);
3104 3102
3105 3103 ASSERT(!IS_VMODSORT(pp->p_vnode));
3106 3104 if (enable_mbit_wa) {
3107 3105 if (rw == S_WRITE)
3108 3106 hat_setmod(pp);
3109 3107 else if (!hat_ismod(pp))
3110 3108 prot &= ~PROT_WRITE;
3111 3109 }
3112 3110
3113 3111 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3114 3112 hat_memload(hat, addr, pp, prot, hat_flag);
3115 3113
3116 3114 if (!(hat_flag & HAT_LOAD_LOCK))
3117 3115 page_unlock(pp);
3118 3116
3119 3117 ASSERT(anon_lock);
3120 3118 anon_array_exit(&cookie);
3121 3119 return (0);
3122 3120 out:
3123 3121 if (anon_lock)
3124 3122 anon_array_exit(&cookie);
3125 3123
3126 3124 if (type == F_SOFTLOCK) {
3127 3125 atomic_dec_ulong((ulong_t *)&svd->softlockcnt);
3128 3126 }
3129 3127 return (FC_MAKE_ERR(err));
3130 3128 }
3131 3129
3132 3130 /*
3133 3131 * relocate a bunch of smaller targ pages into one large repl page. all targ
3134 3132 * pages must be complete pages smaller than replacement pages.
3135 3133 * it's assumed that no page's szc can change since they are all PAGESIZE or
3136 3134 * complete large pages locked SHARED.
3137 3135 */
3138 3136 static void
3139 3137 segvn_relocate_pages(page_t **targ, page_t *replacement)
3140 3138 {
3141 3139 page_t *pp;
3142 3140 pgcnt_t repl_npgs, curnpgs;
3143 3141 pgcnt_t i;
3144 3142 uint_t repl_szc = replacement->p_szc;
3145 3143 page_t *first_repl = replacement;
3146 3144 page_t *repl;
3147 3145 spgcnt_t npgs;
3148 3146
3149 3147 VM_STAT_ADD(segvnvmstats.relocatepages[0]);
3150 3148
3151 3149 ASSERT(repl_szc != 0);
3152 3150 npgs = repl_npgs = page_get_pagecnt(repl_szc);
3153 3151
3154 3152 i = 0;
3155 3153 while (repl_npgs) {
3156 3154 spgcnt_t nreloc;
3157 3155 int err;
3158 3156 ASSERT(replacement != NULL);
3159 3157 pp = targ[i];
3160 3158 ASSERT(pp->p_szc < repl_szc);
3161 3159 ASSERT(PAGE_EXCL(pp));
3162 3160 ASSERT(!PP_ISFREE(pp));
3163 3161 curnpgs = page_get_pagecnt(pp->p_szc);
3164 3162 if (curnpgs == 1) {
3165 3163 VM_STAT_ADD(segvnvmstats.relocatepages[1]);
3166 3164 repl = replacement;
3167 3165 page_sub(&replacement, repl);
3168 3166 ASSERT(PAGE_EXCL(repl));
3169 3167 ASSERT(!PP_ISFREE(repl));
3170 3168 ASSERT(repl->p_szc == repl_szc);
3171 3169 } else {
3172 3170 page_t *repl_savepp;
3173 3171 int j;
3174 3172 VM_STAT_ADD(segvnvmstats.relocatepages[2]);
3175 3173 repl_savepp = replacement;
3176 3174 for (j = 0; j < curnpgs; j++) {
3177 3175 repl = replacement;
3178 3176 page_sub(&replacement, repl);
3179 3177 ASSERT(PAGE_EXCL(repl));
3180 3178 ASSERT(!PP_ISFREE(repl));
3181 3179 ASSERT(repl->p_szc == repl_szc);
3182 3180 ASSERT(page_pptonum(targ[i + j]) ==
3183 3181 page_pptonum(targ[i]) + j);
3184 3182 }
3185 3183 repl = repl_savepp;
3186 3184 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs));
3187 3185 }
3188 3186 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL);
3189 3187 if (err || nreloc != curnpgs) {
3190 3188 panic("segvn_relocate_pages: "
3191 3189 "page_relocate failed err=%d curnpgs=%ld "
3192 3190 "nreloc=%ld", err, curnpgs, nreloc);
3193 3191 }
3194 3192 ASSERT(curnpgs <= repl_npgs);
3195 3193 repl_npgs -= curnpgs;
3196 3194 i += curnpgs;
3197 3195 }
3198 3196 ASSERT(replacement == NULL);
3199 3197
3200 3198 repl = first_repl;
3201 3199 repl_npgs = npgs;
3202 3200 for (i = 0; i < repl_npgs; i++) {
3203 3201 ASSERT(PAGE_EXCL(repl));
3204 3202 ASSERT(!PP_ISFREE(repl));
3205 3203 targ[i] = repl;
3206 3204 page_downgrade(targ[i]);
3207 3205 repl++;
3208 3206 }
3209 3207 }
3210 3208
3211 3209 /*
3212 3210 * Check if all pages in ppa array are complete smaller than szc pages and
3213 3211 * their roots will still be aligned relative to their current size if the
3214 3212 * entire ppa array is relocated into one szc page. If these conditions are
3215 3213 * not met return 0.
3216 3214 *
3217 3215 * If all pages are properly aligned attempt to upgrade their locks
3218 3216 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3219 3217 * upgrdfail was set to 0 by caller.
3220 3218 *
3221 3219 * Return 1 if all pages are aligned and locked exclusively.
3222 3220 *
3223 3221 * If all pages in ppa array happen to be physically contiguous to make one
3224 3222 * szc page and all exclusive locks are successfully obtained promote the page
3225 3223 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3226 3224 */
3227 3225 static int
3228 3226 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)
3229 3227 {
3230 3228 page_t *pp;
3231 3229 pfn_t pfn;
3232 3230 pgcnt_t totnpgs = page_get_pagecnt(szc);
3233 3231 pfn_t first_pfn;
3234 3232 int contig = 1;
3235 3233 pgcnt_t i;
3236 3234 pgcnt_t j;
3237 3235 uint_t curszc;
3238 3236 pgcnt_t curnpgs;
3239 3237 int root = 0;
3240 3238
3241 3239 ASSERT(szc > 0);
3242 3240
3243 3241 VM_STAT_ADD(segvnvmstats.fullszcpages[0]);
3244 3242
3245 3243 for (i = 0; i < totnpgs; i++) {
3246 3244 pp = ppa[i];
3247 3245 ASSERT(PAGE_SHARED(pp));
3248 3246 ASSERT(!PP_ISFREE(pp));
3249 3247 pfn = page_pptonum(pp);
3250 3248 if (i == 0) {
3251 3249 if (!IS_P2ALIGNED(pfn, totnpgs)) {
3252 3250 contig = 0;
3253 3251 } else {
3254 3252 first_pfn = pfn;
3255 3253 }
3256 3254 } else if (contig && pfn != first_pfn + i) {
3257 3255 contig = 0;
3258 3256 }
3259 3257 if (pp->p_szc == 0) {
3260 3258 if (root) {
3261 3259 VM_STAT_ADD(segvnvmstats.fullszcpages[1]);
3262 3260 return (0);
3263 3261 }
3264 3262 } else if (!root) {
3265 3263 if ((curszc = pp->p_szc) >= szc) {
3266 3264 VM_STAT_ADD(segvnvmstats.fullszcpages[2]);
3267 3265 return (0);
3268 3266 }
3269 3267 if (curszc == 0) {
3270 3268 /*
3271 3269 * p_szc changed means we don't have all pages
3272 3270 * locked. return failure.
3273 3271 */
3274 3272 VM_STAT_ADD(segvnvmstats.fullszcpages[3]);
3275 3273 return (0);
3276 3274 }
3277 3275 curnpgs = page_get_pagecnt(curszc);
3278 3276 if (!IS_P2ALIGNED(pfn, curnpgs) ||
3279 3277 !IS_P2ALIGNED(i, curnpgs)) {
3280 3278 VM_STAT_ADD(segvnvmstats.fullszcpages[4]);
3281 3279 return (0);
3282 3280 }
3283 3281 root = 1;
3284 3282 } else {
3285 3283 ASSERT(i > 0);
3286 3284 VM_STAT_ADD(segvnvmstats.fullszcpages[5]);
3287 3285 if (pp->p_szc != curszc) {
3288 3286 VM_STAT_ADD(segvnvmstats.fullszcpages[6]);
3289 3287 return (0);
3290 3288 }
3291 3289 if (pfn - 1 != page_pptonum(ppa[i - 1])) {
3292 3290 panic("segvn_full_szcpages: "
3293 3291 "large page not physically contiguous");
3294 3292 }
3295 3293 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) {
3296 3294 root = 0;
3297 3295 }
3298 3296 }
3299 3297 }
3300 3298
3301 3299 for (i = 0; i < totnpgs; i++) {
3302 3300 ASSERT(ppa[i]->p_szc < szc);
3303 3301 if (!page_tryupgrade(ppa[i])) {
3304 3302 for (j = 0; j < i; j++) {
3305 3303 page_downgrade(ppa[j]);
3306 3304 }
3307 3305 *pszc = ppa[i]->p_szc;
3308 3306 *upgrdfail = 1;
3309 3307 VM_STAT_ADD(segvnvmstats.fullszcpages[7]);
3310 3308 return (0);
3311 3309 }
3312 3310 }
3313 3311
3314 3312 /*
3315 3313 * When a page is put a free cachelist its szc is set to 0. if file
3316 3314 * system reclaimed pages from cachelist targ pages will be physically
3317 3315 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3318 3316 * pages without any relocations.
3319 3317 * To avoid any hat issues with previous small mappings
3320 3318 * hat_pageunload() the target pages first.
3321 3319 */
3322 3320 if (contig) {
3323 3321 VM_STAT_ADD(segvnvmstats.fullszcpages[8]);
3324 3322 for (i = 0; i < totnpgs; i++) {
3325 3323 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD);
3326 3324 }
3327 3325 for (i = 0; i < totnpgs; i++) {
3328 3326 ppa[i]->p_szc = szc;
3329 3327 }
3330 3328 for (i = 0; i < totnpgs; i++) {
3331 3329 ASSERT(PAGE_EXCL(ppa[i]));
3332 3330 page_downgrade(ppa[i]);
3333 3331 }
3334 3332 if (pszc != NULL) {
3335 3333 *pszc = szc;
3336 3334 }
3337 3335 }
3338 3336 VM_STAT_ADD(segvnvmstats.fullszcpages[9]);
3339 3337 return (1);
3340 3338 }
3341 3339
3342 3340 /*
3343 3341 * Create physically contiguous pages for [vp, off] - [vp, off +
3344 3342 * page_size(szc)) range and for private segment return them in ppa array.
3345 3343 * Pages are created either via IO or relocations.
3346 3344 *
3347 3345 * Return 1 on success and 0 on failure.
3348 3346 *
3349 3347 * If physically contiguous pages already exist for this range return 1 without
3350 3348 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3351 3349 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3352 3350 */
3353 3351
3354 3352 static int
3355 3353 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
3356 3354 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc,
3357 3355 int *downsize)
3358 3356
3359 3357 {
3360 3358 page_t *pplist = *ppplist;
3361 3359 size_t pgsz = page_get_pagesize(szc);
3362 3360 pgcnt_t pages = btop(pgsz);
3363 3361 ulong_t start_off = off;
3364 3362 u_offset_t eoff = off + pgsz;
3365 3363 spgcnt_t nreloc;
3366 3364 u_offset_t io_off = off;
3367 3365 size_t io_len;
3368 3366 page_t *io_pplist = NULL;
3369 3367 page_t *done_pplist = NULL;
3370 3368 pgcnt_t pgidx = 0;
3371 3369 page_t *pp;
3372 3370 page_t *newpp;
3373 3371 page_t *targpp;
3374 3372 int io_err = 0;
3375 3373 int i;
3376 3374 pfn_t pfn;
3377 3375 ulong_t ppages;
3378 3376 page_t *targ_pplist = NULL;
3379 3377 page_t *repl_pplist = NULL;
3380 3378 page_t *tmp_pplist;
3381 3379 int nios = 0;
3382 3380 uint_t pszc;
3383 3381 struct vattr va;
3384 3382
3385 3383 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]);
3386 3384
3387 3385 ASSERT(szc != 0);
3388 3386 ASSERT(pplist->p_szc == szc);
3389 3387
3390 3388 /*
3391 3389 * downsize will be set to 1 only if we fail to lock pages. this will
3392 3390 * allow subsequent faults to try to relocate the page again. If we
3393 3391 * fail due to misalignment don't downsize and let the caller map the
3394 3392 * whole region with small mappings to avoid more faults into the area
3395 3393 * where we can't get large pages anyway.
3396 3394 */
3397 3395 *downsize = 0;
3398 3396
3399 3397 while (off < eoff) {
3400 3398 newpp = pplist;
3401 3399 ASSERT(newpp != NULL);
3402 3400 ASSERT(PAGE_EXCL(newpp));
3403 3401 ASSERT(!PP_ISFREE(newpp));
3404 3402 /*
3405 3403 * we pass NULL for nrelocp to page_lookup_create()
3406 3404 * so that it doesn't relocate. We relocate here
3407 3405 * later only after we make sure we can lock all
3408 3406 * pages in the range we handle and they are all
3409 3407 * aligned.
3410 3408 */
3411 3409 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0);
3412 3410 ASSERT(pp != NULL);
3413 3411 ASSERT(!PP_ISFREE(pp));
3414 3412 ASSERT(pp->p_vnode == vp);
3415 3413 ASSERT(pp->p_offset == off);
3416 3414 if (pp == newpp) {
3417 3415 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]);
3418 3416 page_sub(&pplist, pp);
3419 3417 ASSERT(PAGE_EXCL(pp));
3420 3418 ASSERT(page_iolock_assert(pp));
3421 3419 page_list_concat(&io_pplist, &pp);
3422 3420 off += PAGESIZE;
3423 3421 continue;
3424 3422 }
3425 3423 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]);
3426 3424 pfn = page_pptonum(pp);
3427 3425 pszc = pp->p_szc;
3428 3426 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL &&
3429 3427 IS_P2ALIGNED(pfn, pages)) {
3430 3428 ASSERT(repl_pplist == NULL);
3431 3429 ASSERT(done_pplist == NULL);
3432 3430 ASSERT(pplist == *ppplist);
3433 3431 page_unlock(pp);
3434 3432 page_free_replacement_page(pplist);
3435 3433 page_create_putback(pages);
3436 3434 *ppplist = NULL;
3437 3435 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]);
3438 3436 return (1);
3439 3437 }
3440 3438 if (pszc >= szc) {
3441 3439 page_unlock(pp);
3442 3440 segvn_faultvnmpss_align_err1++;
3443 3441 goto out;
3444 3442 }
3445 3443 ppages = page_get_pagecnt(pszc);
3446 3444 if (!IS_P2ALIGNED(pfn, ppages)) {
3447 3445 ASSERT(pszc > 0);
3448 3446 /*
3449 3447 * sizing down to pszc won't help.
3450 3448 */
3451 3449 page_unlock(pp);
3452 3450 segvn_faultvnmpss_align_err2++;
3453 3451 goto out;
3454 3452 }
3455 3453 pfn = page_pptonum(newpp);
3456 3454 if (!IS_P2ALIGNED(pfn, ppages)) {
3457 3455 ASSERT(pszc > 0);
3458 3456 /*
3459 3457 * sizing down to pszc won't help.
3460 3458 */
3461 3459 page_unlock(pp);
3462 3460 segvn_faultvnmpss_align_err3++;
3463 3461 goto out;
3464 3462 }
3465 3463 if (!PAGE_EXCL(pp)) {
3466 3464 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]);
3467 3465 page_unlock(pp);
3468 3466 *downsize = 1;
3469 3467 *ret_pszc = pp->p_szc;
3470 3468 goto out;
3471 3469 }
3472 3470 targpp = pp;
3473 3471 if (io_pplist != NULL) {
3474 3472 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]);
3475 3473 io_len = off - io_off;
3476 3474 /*
3477 3475 * Some file systems like NFS don't check EOF
3478 3476 * conditions in VOP_PAGEIO(). Check it here
3479 3477 * now that pages are locked SE_EXCL. Any file
3480 3478 * truncation will wait until the pages are
3481 3479 * unlocked so no need to worry that file will
3482 3480 * be truncated after we check its size here.
3483 3481 * XXX fix NFS to remove this check.
3484 3482 */
3485 3483 va.va_mask = AT_SIZE;
3486 3484 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) {
3487 3485 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]);
3488 3486 page_unlock(targpp);
3489 3487 goto out;
3490 3488 }
3491 3489 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3492 3490 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]);
3493 3491 *downsize = 1;
3494 3492 *ret_pszc = 0;
3495 3493 page_unlock(targpp);
3496 3494 goto out;
3497 3495 }
3498 3496 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3499 3497 B_READ, svd->cred, NULL);
3500 3498 if (io_err) {
3501 3499 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]);
3502 3500 page_unlock(targpp);
3503 3501 if (io_err == EDEADLK) {
3504 3502 segvn_vmpss_pageio_deadlk_err++;
3505 3503 }
3506 3504 goto out;
3507 3505 }
3508 3506 nios++;
3509 3507 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]);
3510 3508 while (io_pplist != NULL) {
3511 3509 pp = io_pplist;
3512 3510 page_sub(&io_pplist, pp);
3513 3511 ASSERT(page_iolock_assert(pp));
3514 3512 page_io_unlock(pp);
3515 3513 pgidx = (pp->p_offset - start_off) >>
3516 3514 PAGESHIFT;
3517 3515 ASSERT(pgidx < pages);
3518 3516 ppa[pgidx] = pp;
3519 3517 page_list_concat(&done_pplist, &pp);
3520 3518 }
3521 3519 }
3522 3520 pp = targpp;
3523 3521 ASSERT(PAGE_EXCL(pp));
3524 3522 ASSERT(pp->p_szc <= pszc);
3525 3523 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) {
3526 3524 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]);
3527 3525 page_unlock(pp);
3528 3526 *downsize = 1;
3529 3527 *ret_pszc = pp->p_szc;
3530 3528 goto out;
3531 3529 }
3532 3530 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]);
3533 3531 /*
3534 3532 * page szc chould have changed before the entire group was
3535 3533 * locked. reread page szc.
3536 3534 */
3537 3535 pszc = pp->p_szc;
3538 3536 ppages = page_get_pagecnt(pszc);
3539 3537
3540 3538 /* link just the roots */
3541 3539 page_list_concat(&targ_pplist, &pp);
3542 3540 page_sub(&pplist, newpp);
3543 3541 page_list_concat(&repl_pplist, &newpp);
3544 3542 off += PAGESIZE;
3545 3543 while (--ppages != 0) {
3546 3544 newpp = pplist;
3547 3545 page_sub(&pplist, newpp);
3548 3546 off += PAGESIZE;
3549 3547 }
3550 3548 io_off = off;
3551 3549 }
3552 3550 if (io_pplist != NULL) {
3553 3551 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]);
3554 3552 io_len = eoff - io_off;
3555 3553 va.va_mask = AT_SIZE;
3556 3554 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) {
3557 3555 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]);
3558 3556 goto out;
3559 3557 }
3560 3558 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3561 3559 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]);
3562 3560 *downsize = 1;
3563 3561 *ret_pszc = 0;
3564 3562 goto out;
3565 3563 }
3566 3564 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3567 3565 B_READ, svd->cred, NULL);
3568 3566 if (io_err) {
3569 3567 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]);
3570 3568 if (io_err == EDEADLK) {
3571 3569 segvn_vmpss_pageio_deadlk_err++;
3572 3570 }
3573 3571 goto out;
3574 3572 }
3575 3573 nios++;
3576 3574 while (io_pplist != NULL) {
3577 3575 pp = io_pplist;
3578 3576 page_sub(&io_pplist, pp);
3579 3577 ASSERT(page_iolock_assert(pp));
3580 3578 page_io_unlock(pp);
3581 3579 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3582 3580 ASSERT(pgidx < pages);
3583 3581 ppa[pgidx] = pp;
3584 3582 }
3585 3583 }
3586 3584 /*
3587 3585 * we're now bound to succeed or panic.
3588 3586 * remove pages from done_pplist. it's not needed anymore.
3589 3587 */
3590 3588 while (done_pplist != NULL) {
3591 3589 pp = done_pplist;
3592 3590 page_sub(&done_pplist, pp);
3593 3591 }
3594 3592 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]);
3595 3593 ASSERT(pplist == NULL);
3596 3594 *ppplist = NULL;
3597 3595 while (targ_pplist != NULL) {
3598 3596 int ret;
3599 3597 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]);
3600 3598 ASSERT(repl_pplist);
3601 3599 pp = targ_pplist;
3602 3600 page_sub(&targ_pplist, pp);
3603 3601 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3604 3602 newpp = repl_pplist;
3605 3603 page_sub(&repl_pplist, newpp);
3606 3604 #ifdef DEBUG
3607 3605 pfn = page_pptonum(pp);
3608 3606 pszc = pp->p_szc;
3609 3607 ppages = page_get_pagecnt(pszc);
3610 3608 ASSERT(IS_P2ALIGNED(pfn, ppages));
3611 3609 pfn = page_pptonum(newpp);
3612 3610 ASSERT(IS_P2ALIGNED(pfn, ppages));
3613 3611 ASSERT(P2PHASE(pfn, pages) == pgidx);
3614 3612 #endif
3615 3613 nreloc = 0;
3616 3614 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL);
3617 3615 if (ret != 0 || nreloc == 0) {
3618 3616 panic("segvn_fill_vp_pages: "
3619 3617 "page_relocate failed");
3620 3618 }
3621 3619 pp = newpp;
3622 3620 while (nreloc-- != 0) {
3623 3621 ASSERT(PAGE_EXCL(pp));
3624 3622 ASSERT(pp->p_vnode == vp);
3625 3623 ASSERT(pgidx ==
3626 3624 ((pp->p_offset - start_off) >> PAGESHIFT));
3627 3625 ppa[pgidx++] = pp;
3628 3626 pp++;
3629 3627 }
3630 3628 }
3631 3629
3632 3630 if (svd->type == MAP_PRIVATE) {
3633 3631 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]);
3634 3632 for (i = 0; i < pages; i++) {
3635 3633 ASSERT(ppa[i] != NULL);
3636 3634 ASSERT(PAGE_EXCL(ppa[i]));
3637 3635 ASSERT(ppa[i]->p_vnode == vp);
3638 3636 ASSERT(ppa[i]->p_offset ==
3639 3637 start_off + (i << PAGESHIFT));
3640 3638 page_downgrade(ppa[i]);
3641 3639 }
3642 3640 ppa[pages] = NULL;
3643 3641 } else {
3644 3642 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]);
3645 3643 /*
3646 3644 * the caller will still call VOP_GETPAGE() for shared segments
3647 3645 * to check FS write permissions. For private segments we map
3648 3646 * file read only anyway. so no VOP_GETPAGE is needed.
3649 3647 */
3650 3648 for (i = 0; i < pages; i++) {
3651 3649 ASSERT(ppa[i] != NULL);
3652 3650 ASSERT(PAGE_EXCL(ppa[i]));
3653 3651 ASSERT(ppa[i]->p_vnode == vp);
3654 3652 ASSERT(ppa[i]->p_offset ==
3655 3653 start_off + (i << PAGESHIFT));
3656 3654 page_unlock(ppa[i]);
3657 3655 }
3658 3656 ppa[0] = NULL;
3659 3657 }
3660 3658
3661 3659 return (1);
3662 3660 out:
3663 3661 /*
3664 3662 * Do the cleanup. Unlock target pages we didn't relocate. They are
3665 3663 * linked on targ_pplist by root pages. reassemble unused replacement
3666 3664 * and io pages back to pplist.
3667 3665 */
3668 3666 if (io_pplist != NULL) {
3669 3667 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]);
3670 3668 pp = io_pplist;
3671 3669 do {
3672 3670 ASSERT(pp->p_vnode == vp);
3673 3671 ASSERT(pp->p_offset == io_off);
3674 3672 ASSERT(page_iolock_assert(pp));
3675 3673 page_io_unlock(pp);
3676 3674 page_hashout(pp, NULL);
3677 3675 io_off += PAGESIZE;
3678 3676 } while ((pp = pp->p_next) != io_pplist);
3679 3677 page_list_concat(&io_pplist, &pplist);
3680 3678 pplist = io_pplist;
3681 3679 }
3682 3680 tmp_pplist = NULL;
3683 3681 while (targ_pplist != NULL) {
3684 3682 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]);
3685 3683 pp = targ_pplist;
3686 3684 ASSERT(PAGE_EXCL(pp));
3687 3685 page_sub(&targ_pplist, pp);
3688 3686
3689 3687 pszc = pp->p_szc;
3690 3688 ppages = page_get_pagecnt(pszc);
3691 3689 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3692 3690
3693 3691 if (pszc != 0) {
3694 3692 group_page_unlock(pp);
3695 3693 }
3696 3694 page_unlock(pp);
3697 3695
3698 3696 pp = repl_pplist;
3699 3697 ASSERT(pp != NULL);
3700 3698 ASSERT(PAGE_EXCL(pp));
3701 3699 ASSERT(pp->p_szc == szc);
3702 3700 page_sub(&repl_pplist, pp);
3703 3701
3704 3702 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3705 3703
3706 3704 /* relink replacement page */
3707 3705 page_list_concat(&tmp_pplist, &pp);
3708 3706 while (--ppages != 0) {
3709 3707 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]);
3710 3708 pp++;
3711 3709 ASSERT(PAGE_EXCL(pp));
3712 3710 ASSERT(pp->p_szc == szc);
3713 3711 page_list_concat(&tmp_pplist, &pp);
3714 3712 }
3715 3713 }
3716 3714 if (tmp_pplist != NULL) {
3717 3715 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]);
3718 3716 page_list_concat(&tmp_pplist, &pplist);
3719 3717 pplist = tmp_pplist;
3720 3718 }
3721 3719 /*
3722 3720 * at this point all pages are either on done_pplist or
3723 3721 * pplist. They can't be all on done_pplist otherwise
3724 3722 * we'd've been done.
3725 3723 */
3726 3724 ASSERT(pplist != NULL);
3727 3725 if (nios != 0) {
3728 3726 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]);
3729 3727 pp = pplist;
3730 3728 do {
3731 3729 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]);
3732 3730 ASSERT(pp->p_szc == szc);
3733 3731 ASSERT(PAGE_EXCL(pp));
3734 3732 ASSERT(pp->p_vnode != vp);
3735 3733 pp->p_szc = 0;
3736 3734 } while ((pp = pp->p_next) != pplist);
3737 3735
3738 3736 pp = done_pplist;
3739 3737 do {
3740 3738 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]);
3741 3739 ASSERT(pp->p_szc == szc);
3742 3740 ASSERT(PAGE_EXCL(pp));
3743 3741 ASSERT(pp->p_vnode == vp);
3744 3742 pp->p_szc = 0;
3745 3743 } while ((pp = pp->p_next) != done_pplist);
3746 3744
3747 3745 while (pplist != NULL) {
3748 3746 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]);
3749 3747 pp = pplist;
3750 3748 page_sub(&pplist, pp);
3751 3749 page_free(pp, 0);
3752 3750 }
3753 3751
3754 3752 while (done_pplist != NULL) {
3755 3753 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]);
3756 3754 pp = done_pplist;
3757 3755 page_sub(&done_pplist, pp);
3758 3756 page_unlock(pp);
3759 3757 }
3760 3758 *ppplist = NULL;
3761 3759 return (0);
3762 3760 }
3763 3761 ASSERT(pplist == *ppplist);
3764 3762 if (io_err) {
3765 3763 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]);
3766 3764 /*
3767 3765 * don't downsize on io error.
3768 3766 * see if vop_getpage succeeds.
3769 3767 * pplist may still be used in this case
3770 3768 * for relocations.
3771 3769 */
3772 3770 return (0);
3773 3771 }
3774 3772 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]);
3775 3773 page_free_replacement_page(pplist);
3776 3774 page_create_putback(pages);
3777 3775 *ppplist = NULL;
3778 3776 return (0);
3779 3777 }
3780 3778
3781 3779 int segvn_anypgsz = 0;
3782 3780
3783 3781 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3784 3782 if ((type) == F_SOFTLOCK) { \
3785 3783 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3786 3784 -(pages)); \
3787 3785 }
3788 3786
3789 3787 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3790 3788 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3791 3789 if ((rw) == S_WRITE) { \
3792 3790 for (i = 0; i < (pages); i++) { \
3793 3791 ASSERT((ppa)[i]->p_vnode == \
3794 3792 (ppa)[0]->p_vnode); \
3795 3793 hat_setmod((ppa)[i]); \
3796 3794 } \
3797 3795 } else if ((rw) != S_OTHER && \
3798 3796 ((prot) & (vpprot) & PROT_WRITE)) { \
3799 3797 for (i = 0; i < (pages); i++) { \
3800 3798 ASSERT((ppa)[i]->p_vnode == \
3801 3799 (ppa)[0]->p_vnode); \
3802 3800 if (!hat_ismod((ppa)[i])) { \
3803 3801 prot &= ~PROT_WRITE; \
3804 3802 break; \
3805 3803 } \
3806 3804 } \
3807 3805 } \
3808 3806 }
3809 3807
3810 3808 #ifdef VM_STATS
3811 3809
3812 3810 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3813 3811 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3814 3812
3815 3813 #else /* VM_STATS */
3816 3814
3817 3815 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3818 3816
3819 3817 #endif
3820 3818
3821 3819 static faultcode_t
3822 3820 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
3823 3821 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
3824 3822 caddr_t eaddr, int brkcow)
3825 3823 {
3826 3824 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
3827 3825 struct anon_map *amp = svd->amp;
3828 3826 uchar_t segtype = svd->type;
3829 3827 uint_t szc = seg->s_szc;
3830 3828 size_t pgsz = page_get_pagesize(szc);
3831 3829 size_t maxpgsz = pgsz;
3832 3830 pgcnt_t pages = btop(pgsz);
3833 3831 pgcnt_t maxpages = pages;
3834 3832 size_t ppasize = (pages + 1) * sizeof (page_t *);
3835 3833 caddr_t a = lpgaddr;
3836 3834 caddr_t maxlpgeaddr = lpgeaddr;
3837 3835 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3838 3836 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3839 3837 struct vpage *vpage = (svd->vpage != NULL) ?
3840 3838 &svd->vpage[seg_page(seg, a)] : NULL;
3841 3839 vnode_t *vp = svd->vp;
3842 3840 page_t **ppa;
3843 3841 uint_t pszc;
3844 3842 size_t ppgsz;
3845 3843 pgcnt_t ppages;
3846 3844 faultcode_t err = 0;
3847 3845 int ierr;
3848 3846 int vop_size_err = 0;
3849 3847 uint_t protchk, prot, vpprot;
3850 3848 ulong_t i;
3851 3849 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3852 3850 anon_sync_obj_t an_cookie;
3853 3851 enum seg_rw arw;
3854 3852 int alloc_failed = 0;
3855 3853 int adjszc_chk;
3856 3854 struct vattr va;
3857 3855 page_t *pplist;
3858 3856 pfn_t pfn;
3859 3857 int physcontig;
3860 3858 int upgrdfail;
3861 3859 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3862 3860 int tron = (svd->tr_state == SEGVN_TR_ON);
3863 3861
3864 3862 ASSERT(szc != 0);
3865 3863 ASSERT(vp != NULL);
3866 3864 ASSERT(brkcow == 0 || amp != NULL);
3867 3865 ASSERT(tron == 0 || amp != NULL);
3868 3866 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3869 3867 ASSERT(!(svd->flags & MAP_NORESERVE));
3870 3868 ASSERT(type != F_SOFTUNLOCK);
3871 3869 ASSERT(IS_P2ALIGNED(a, maxpgsz));
3872 3870 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3873 3871 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3874 3872 ASSERT(seg->s_szc < NBBY * sizeof (int));
3875 3873 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3876 3874 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3877 3875
3878 3876 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]);
3879 3877 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]);
3880 3878
3881 3879 if (svd->flags & MAP_TEXT) {
3882 3880 hat_flag |= HAT_LOAD_TEXT;
3883 3881 }
3884 3882
3885 3883 if (svd->pageprot) {
3886 3884 switch (rw) {
3887 3885 case S_READ:
3888 3886 protchk = PROT_READ;
3889 3887 break;
3890 3888 case S_WRITE:
3891 3889 protchk = PROT_WRITE;
3892 3890 break;
3893 3891 case S_EXEC:
3894 3892 protchk = PROT_EXEC;
3895 3893 break;
3896 3894 case S_OTHER:
3897 3895 default:
3898 3896 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3899 3897 break;
3900 3898 }
3901 3899 } else {
3902 3900 prot = svd->prot;
3903 3901 /* caller has already done segment level protection check. */
3904 3902 }
3905 3903
3906 3904 if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3907 3905 SEGVN_VMSTAT_FLTVNPAGES(2);
3908 3906 arw = S_READ;
3909 3907 } else {
3910 3908 arw = rw;
3911 3909 }
3912 3910
3913 3911 ppa = kmem_alloc(ppasize, KM_SLEEP);
3914 3912
3915 3913 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3916 3914
3917 3915 for (;;) {
3918 3916 adjszc_chk = 0;
3919 3917 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3920 3918 if (adjszc_chk) {
3921 3919 while (szc < seg->s_szc) {
3922 3920 uintptr_t e;
3923 3921 uint_t tszc;
3924 3922 tszc = segvn_anypgsz_vnode ? szc + 1 :
3925 3923 seg->s_szc;
3926 3924 ppgsz = page_get_pagesize(tszc);
3927 3925 if (!IS_P2ALIGNED(a, ppgsz) ||
3928 3926 ((alloc_failed >> tszc) & 0x1)) {
3929 3927 break;
3930 3928 }
3931 3929 SEGVN_VMSTAT_FLTVNPAGES(4);
3932 3930 szc = tszc;
3933 3931 pgsz = ppgsz;
3934 3932 pages = btop(pgsz);
3935 3933 e = P2ROUNDUP((uintptr_t)eaddr, pgsz);
3936 3934 lpgeaddr = (caddr_t)e;
3937 3935 }
3938 3936 }
3939 3937
3940 3938 again:
3941 3939 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) {
3942 3940 ASSERT(IS_P2ALIGNED(aindx, maxpages));
3943 3941 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3944 3942 anon_array_enter(amp, aindx, &an_cookie);
3945 3943 if (anon_get_ptr(amp->ahp, aindx) != NULL) {
3946 3944 SEGVN_VMSTAT_FLTVNPAGES(5);
3947 3945 ASSERT(anon_pages(amp->ahp, aindx,
3948 3946 maxpages) == maxpages);
3949 3947 anon_array_exit(&an_cookie);
3950 3948 ANON_LOCK_EXIT(&->a_rwlock);
3951 3949 err = segvn_fault_anonpages(hat, seg,
3952 3950 a, a + maxpgsz, type, rw,
3953 3951 MAX(a, addr),
3954 3952 MIN(a + maxpgsz, eaddr), brkcow);
3955 3953 if (err != 0) {
3956 3954 SEGVN_VMSTAT_FLTVNPAGES(6);
3957 3955 goto out;
3958 3956 }
3959 3957 if (szc < seg->s_szc) {
3960 3958 szc = seg->s_szc;
3961 3959 pgsz = maxpgsz;
3962 3960 pages = maxpages;
3963 3961 lpgeaddr = maxlpgeaddr;
3964 3962 }
3965 3963 goto next;
3966 3964 } else {
3967 3965 ASSERT(anon_pages(amp->ahp, aindx,
3968 3966 maxpages) == 0);
3969 3967 SEGVN_VMSTAT_FLTVNPAGES(7);
3970 3968 anon_array_exit(&an_cookie);
3971 3969 ANON_LOCK_EXIT(&->a_rwlock);
3972 3970 }
3973 3971 }
3974 3972 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz));
3975 3973 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz));
3976 3974
3977 3975 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
3978 3976 ASSERT(vpage != NULL);
3979 3977 prot = VPP_PROT(vpage);
3980 3978 ASSERT(sameprot(seg, a, maxpgsz));
3981 3979 if ((prot & protchk) == 0) {
3982 3980 SEGVN_VMSTAT_FLTVNPAGES(8);
3983 3981 err = FC_PROT;
3984 3982 goto out;
3985 3983 }
3986 3984 }
3987 3985 if (type == F_SOFTLOCK) {
3988 3986 atomic_add_long((ulong_t *)&svd->softlockcnt,
3989 3987 pages);
3990 3988 }
3991 3989
3992 3990 pplist = NULL;
3993 3991 physcontig = 0;
3994 3992 ppa[0] = NULL;
3995 3993 if (!brkcow && !tron && szc &&
3996 3994 !page_exists_physcontig(vp, off, szc,
3997 3995 segtype == MAP_PRIVATE ? ppa : NULL)) {
3998 3996 SEGVN_VMSTAT_FLTVNPAGES(9);
3999 3997 if (page_alloc_pages(vp, seg, a, &pplist, NULL,
4000 3998 szc, 0, 0) && type != F_SOFTLOCK) {
4001 3999 SEGVN_VMSTAT_FLTVNPAGES(10);
4002 4000 pszc = 0;
4003 4001 ierr = -1;
4004 4002 alloc_failed |= (1 << szc);
4005 4003 break;
4006 4004 }
4007 4005 if (pplist != NULL &&
4008 4006 vp->v_mpssdata == SEGVN_PAGEIO) {
4009 4007 int downsize;
4010 4008 SEGVN_VMSTAT_FLTVNPAGES(11);
4011 4009 physcontig = segvn_fill_vp_pages(svd,
4012 4010 vp, off, szc, ppa, &pplist,
4013 4011 &pszc, &downsize);
4014 4012 ASSERT(!physcontig || pplist == NULL);
4015 4013 if (!physcontig && downsize &&
4016 4014 type != F_SOFTLOCK) {
4017 4015 ASSERT(pplist == NULL);
4018 4016 SEGVN_VMSTAT_FLTVNPAGES(12);
4019 4017 ierr = -1;
4020 4018 break;
4021 4019 }
4022 4020 ASSERT(!physcontig ||
4023 4021 segtype == MAP_PRIVATE ||
4024 4022 ppa[0] == NULL);
4025 4023 if (physcontig && ppa[0] == NULL) {
4026 4024 physcontig = 0;
4027 4025 }
4028 4026 }
4029 4027 } else if (!brkcow && !tron && szc && ppa[0] != NULL) {
4030 4028 SEGVN_VMSTAT_FLTVNPAGES(13);
4031 4029 ASSERT(segtype == MAP_PRIVATE);
4032 4030 physcontig = 1;
4033 4031 }
4034 4032
4035 4033 if (!physcontig) {
4036 4034 SEGVN_VMSTAT_FLTVNPAGES(14);
4037 4035 ppa[0] = NULL;
4038 4036 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz,
4039 4037 &vpprot, ppa, pgsz, seg, a, arw,
4040 4038 svd->cred, NULL);
4041 4039 #ifdef DEBUG
4042 4040 if (ierr == 0) {
4043 4041 for (i = 0; i < pages; i++) {
4044 4042 ASSERT(PAGE_LOCKED(ppa[i]));
4045 4043 ASSERT(!PP_ISFREE(ppa[i]));
4046 4044 ASSERT(ppa[i]->p_vnode == vp);
4047 4045 ASSERT(ppa[i]->p_offset ==
4048 4046 off + (i << PAGESHIFT));
4049 4047 }
4050 4048 }
4051 4049 #endif /* DEBUG */
4052 4050 if (segtype == MAP_PRIVATE) {
4053 4051 SEGVN_VMSTAT_FLTVNPAGES(15);
4054 4052 vpprot &= ~PROT_WRITE;
4055 4053 }
4056 4054 } else {
4057 4055 ASSERT(segtype == MAP_PRIVATE);
4058 4056 SEGVN_VMSTAT_FLTVNPAGES(16);
4059 4057 vpprot = PROT_ALL & ~PROT_WRITE;
4060 4058 ierr = 0;
4061 4059 }
4062 4060
4063 4061 if (ierr != 0) {
4064 4062 SEGVN_VMSTAT_FLTVNPAGES(17);
4065 4063 if (pplist != NULL) {
4066 4064 SEGVN_VMSTAT_FLTVNPAGES(18);
4067 4065 page_free_replacement_page(pplist);
4068 4066 page_create_putback(pages);
4069 4067 }
4070 4068 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4071 4069 if (a + pgsz <= eaddr) {
4072 4070 SEGVN_VMSTAT_FLTVNPAGES(19);
4073 4071 err = FC_MAKE_ERR(ierr);
4074 4072 goto out;
4075 4073 }
4076 4074 va.va_mask = AT_SIZE;
4077 4075 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) {
4078 4076 SEGVN_VMSTAT_FLTVNPAGES(20);
4079 4077 err = FC_MAKE_ERR(EIO);
4080 4078 goto out;
4081 4079 }
4082 4080 if (btopr(va.va_size) >= btopr(off + pgsz)) {
4083 4081 SEGVN_VMSTAT_FLTVNPAGES(21);
4084 4082 err = FC_MAKE_ERR(ierr);
4085 4083 goto out;
4086 4084 }
4087 4085 if (btopr(va.va_size) <
4088 4086 btopr(off + (eaddr - a))) {
4089 4087 SEGVN_VMSTAT_FLTVNPAGES(22);
4090 4088 err = FC_MAKE_ERR(ierr);
4091 4089 goto out;
4092 4090 }
4093 4091 if (brkcow || tron || type == F_SOFTLOCK) {
4094 4092 /* can't reduce map area */
4095 4093 SEGVN_VMSTAT_FLTVNPAGES(23);
4096 4094 vop_size_err = 1;
4097 4095 goto out;
4098 4096 }
4099 4097 SEGVN_VMSTAT_FLTVNPAGES(24);
4100 4098 ASSERT(szc != 0);
4101 4099 pszc = 0;
4102 4100 ierr = -1;
4103 4101 break;
4104 4102 }
4105 4103
4106 4104 if (amp != NULL) {
4107 4105 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4108 4106 anon_array_enter(amp, aindx, &an_cookie);
4109 4107 }
4110 4108 if (amp != NULL &&
4111 4109 anon_get_ptr(amp->ahp, aindx) != NULL) {
4112 4110 ulong_t taindx = P2ALIGN(aindx, maxpages);
4113 4111
4114 4112 SEGVN_VMSTAT_FLTVNPAGES(25);
4115 4113 ASSERT(anon_pages(amp->ahp, taindx,
4116 4114 maxpages) == maxpages);
4117 4115 for (i = 0; i < pages; i++) {
4118 4116 page_unlock(ppa[i]);
4119 4117 }
4120 4118 anon_array_exit(&an_cookie);
4121 4119 ANON_LOCK_EXIT(&->a_rwlock);
4122 4120 if (pplist != NULL) {
4123 4121 page_free_replacement_page(pplist);
4124 4122 page_create_putback(pages);
4125 4123 }
4126 4124 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4127 4125 if (szc < seg->s_szc) {
4128 4126 SEGVN_VMSTAT_FLTVNPAGES(26);
4129 4127 /*
4130 4128 * For private segments SOFTLOCK
4131 4129 * either always breaks cow (any rw
4132 4130 * type except S_READ_NOCOW) or
4133 4131 * address space is locked as writer
4134 4132 * (S_READ_NOCOW case) and anon slots
4135 4133 * can't show up on second check.
4136 4134 * Therefore if we are here for
4137 4135 * SOFTLOCK case it must be a cow
4138 4136 * break but cow break never reduces
4139 4137 * szc. text replication (tron) in
4140 4138 * this case works as cow break.
4141 4139 * Thus the assert below.
4142 4140 */
4143 4141 ASSERT(!brkcow && !tron &&
4144 4142 type != F_SOFTLOCK);
4145 4143 pszc = seg->s_szc;
4146 4144 ierr = -2;
4147 4145 break;
4148 4146 }
4149 4147 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4150 4148 goto again;
4151 4149 }
4152 4150 #ifdef DEBUG
4153 4151 if (amp != NULL) {
4154 4152 ulong_t taindx = P2ALIGN(aindx, maxpages);
4155 4153 ASSERT(!anon_pages(amp->ahp, taindx, maxpages));
4156 4154 }
4157 4155 #endif /* DEBUG */
4158 4156
4159 4157 if (brkcow || tron) {
4160 4158 ASSERT(amp != NULL);
4161 4159 ASSERT(pplist == NULL);
4162 4160 ASSERT(szc == seg->s_szc);
4163 4161 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4164 4162 ASSERT(IS_P2ALIGNED(aindx, maxpages));
4165 4163 SEGVN_VMSTAT_FLTVNPAGES(27);
4166 4164 ierr = anon_map_privatepages(amp, aindx, szc,
4167 4165 seg, a, prot, ppa, vpage, segvn_anypgsz,
4168 4166 tron ? PG_LOCAL : 0, svd->cred);
4169 4167 if (ierr != 0) {
4170 4168 SEGVN_VMSTAT_FLTVNPAGES(28);
4171 4169 anon_array_exit(&an_cookie);
4172 4170 ANON_LOCK_EXIT(&->a_rwlock);
4173 4171 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4174 4172 err = FC_MAKE_ERR(ierr);
4175 4173 goto out;
4176 4174 }
4177 4175
4178 4176 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4179 4177 /*
4180 4178 * p_szc can't be changed for locked
4181 4179 * swapfs pages.
4182 4180 */
4183 4181 ASSERT(svd->rcookie ==
4184 4182 HAT_INVALID_REGION_COOKIE);
4185 4183 hat_memload_array(hat, a, pgsz, ppa, prot,
4186 4184 hat_flag);
4187 4185
4188 4186 if (!(hat_flag & HAT_LOAD_LOCK)) {
4189 4187 SEGVN_VMSTAT_FLTVNPAGES(29);
4190 4188 for (i = 0; i < pages; i++) {
4191 4189 page_unlock(ppa[i]);
4192 4190 }
4193 4191 }
4194 4192 anon_array_exit(&an_cookie);
4195 4193 ANON_LOCK_EXIT(&->a_rwlock);
4196 4194 goto next;
4197 4195 }
4198 4196
4199 4197 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
4200 4198 (!svd->pageprot && svd->prot == (prot & vpprot)));
4201 4199
4202 4200 pfn = page_pptonum(ppa[0]);
4203 4201 /*
4204 4202 * hat_page_demote() needs an SE_EXCL lock on one of
4205 4203 * constituent page_t's and it decreases root's p_szc
4206 4204 * last. This means if root's p_szc is equal szc and
4207 4205 * all its constituent pages are locked
4208 4206 * hat_page_demote() that could have changed p_szc to
4209 4207 * szc is already done and no new have page_demote()
4210 4208 * can start for this large page.
4211 4209 */
4212 4210
4213 4211 /*
4214 4212 * we need to make sure same mapping size is used for
4215 4213 * the same address range if there's a possibility the
4216 4214 * adddress is already mapped because hat layer panics
4217 4215 * when translation is loaded for the range already
4218 4216 * mapped with a different page size. We achieve it
4219 4217 * by always using largest page size possible subject
4220 4218 * to the constraints of page size, segment page size
4221 4219 * and page alignment. Since mappings are invalidated
4222 4220 * when those constraints change and make it
4223 4221 * impossible to use previously used mapping size no
4224 4222 * mapping size conflicts should happen.
4225 4223 */
4226 4224
4227 4225 chkszc:
4228 4226 if ((pszc = ppa[0]->p_szc) == szc &&
4229 4227 IS_P2ALIGNED(pfn, pages)) {
4230 4228
4231 4229 SEGVN_VMSTAT_FLTVNPAGES(30);
4232 4230 #ifdef DEBUG
4233 4231 for (i = 0; i < pages; i++) {
4234 4232 ASSERT(PAGE_LOCKED(ppa[i]));
4235 4233 ASSERT(!PP_ISFREE(ppa[i]));
4236 4234 ASSERT(page_pptonum(ppa[i]) ==
4237 4235 pfn + i);
4238 4236 ASSERT(ppa[i]->p_szc == szc);
4239 4237 ASSERT(ppa[i]->p_vnode == vp);
4240 4238 ASSERT(ppa[i]->p_offset ==
4241 4239 off + (i << PAGESHIFT));
4242 4240 }
4243 4241 #endif /* DEBUG */
4244 4242 /*
4245 4243 * All pages are of szc we need and they are
4246 4244 * all locked so they can't change szc. load
4247 4245 * translations.
4248 4246 *
4249 4247 * if page got promoted since last check
4250 4248 * we don't need pplist.
4251 4249 */
4252 4250 if (pplist != NULL) {
4253 4251 page_free_replacement_page(pplist);
4254 4252 page_create_putback(pages);
4255 4253 }
4256 4254 if (PP_ISMIGRATE(ppa[0])) {
4257 4255 page_migrate(seg, a, ppa, pages);
4258 4256 }
4259 4257 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4260 4258 prot, vpprot);
4261 4259 hat_memload_array_region(hat, a, pgsz,
4262 4260 ppa, prot & vpprot, hat_flag,
4263 4261 svd->rcookie);
4264 4262
4265 4263 if (!(hat_flag & HAT_LOAD_LOCK)) {
4266 4264 for (i = 0; i < pages; i++) {
4267 4265 page_unlock(ppa[i]);
4268 4266 }
4269 4267 }
4270 4268 if (amp != NULL) {
4271 4269 anon_array_exit(&an_cookie);
4272 4270 ANON_LOCK_EXIT(&->a_rwlock);
4273 4271 }
4274 4272 goto next;
4275 4273 }
4276 4274
4277 4275 /*
4278 4276 * See if upsize is possible.
4279 4277 */
4280 4278 if (pszc > szc && szc < seg->s_szc &&
4281 4279 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4282 4280 pgcnt_t aphase;
4283 4281 uint_t pszc1 = MIN(pszc, seg->s_szc);
4284 4282 ppgsz = page_get_pagesize(pszc1);
4285 4283 ppages = btop(ppgsz);
4286 4284 aphase = btop(P2PHASE((uintptr_t)a, ppgsz));
4287 4285
4288 4286 ASSERT(type != F_SOFTLOCK);
4289 4287
4290 4288 SEGVN_VMSTAT_FLTVNPAGES(31);
4291 4289 if (aphase != P2PHASE(pfn, ppages)) {
4292 4290 segvn_faultvnmpss_align_err4++;
4293 4291 } else {
4294 4292 SEGVN_VMSTAT_FLTVNPAGES(32);
4295 4293 if (pplist != NULL) {
4296 4294 page_t *pl = pplist;
4297 4295 page_free_replacement_page(pl);
4298 4296 page_create_putback(pages);
4299 4297 }
4300 4298 for (i = 0; i < pages; i++) {
4301 4299 page_unlock(ppa[i]);
4302 4300 }
4303 4301 if (amp != NULL) {
4304 4302 anon_array_exit(&an_cookie);
4305 4303 ANON_LOCK_EXIT(&->a_rwlock);
4306 4304 }
4307 4305 pszc = pszc1;
4308 4306 ierr = -2;
4309 4307 break;
4310 4308 }
4311 4309 }
4312 4310
4313 4311 /*
4314 4312 * check if we should use smallest mapping size.
4315 4313 */
4316 4314 upgrdfail = 0;
4317 4315 if (szc == 0 ||
4318 4316 (pszc >= szc &&
4319 4317 !IS_P2ALIGNED(pfn, pages)) ||
4320 4318 (pszc < szc &&
4321 4319 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4322 4320 &pszc))) {
4323 4321
4324 4322 if (upgrdfail && type != F_SOFTLOCK) {
4325 4323 /*
4326 4324 * segvn_full_szcpages failed to lock
4327 4325 * all pages EXCL. Size down.
4328 4326 */
4329 4327 ASSERT(pszc < szc);
4330 4328
4331 4329 SEGVN_VMSTAT_FLTVNPAGES(33);
4332 4330
4333 4331 if (pplist != NULL) {
4334 4332 page_t *pl = pplist;
4335 4333 page_free_replacement_page(pl);
4336 4334 page_create_putback(pages);
4337 4335 }
4338 4336
4339 4337 for (i = 0; i < pages; i++) {
4340 4338 page_unlock(ppa[i]);
4341 4339 }
4342 4340 if (amp != NULL) {
4343 4341 anon_array_exit(&an_cookie);
4344 4342 ANON_LOCK_EXIT(&->a_rwlock);
4345 4343 }
4346 4344 ierr = -1;
4347 4345 break;
4348 4346 }
4349 4347 if (szc != 0 && !upgrdfail) {
4350 4348 segvn_faultvnmpss_align_err5++;
4351 4349 }
4352 4350 SEGVN_VMSTAT_FLTVNPAGES(34);
4353 4351 if (pplist != NULL) {
4354 4352 page_free_replacement_page(pplist);
4355 4353 page_create_putback(pages);
4356 4354 }
4357 4355 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4358 4356 prot, vpprot);
4359 4357 if (upgrdfail && segvn_anypgsz_vnode) {
4360 4358 /* SOFTLOCK case */
4361 4359 hat_memload_array_region(hat, a, pgsz,
4362 4360 ppa, prot & vpprot, hat_flag,
4363 4361 svd->rcookie);
4364 4362 } else {
4365 4363 for (i = 0; i < pages; i++) {
4366 4364 hat_memload_region(hat,
4367 4365 a + (i << PAGESHIFT),
4368 4366 ppa[i], prot & vpprot,
4369 4367 hat_flag, svd->rcookie);
4370 4368 }
4371 4369 }
4372 4370 if (!(hat_flag & HAT_LOAD_LOCK)) {
4373 4371 for (i = 0; i < pages; i++) {
4374 4372 page_unlock(ppa[i]);
4375 4373 }
4376 4374 }
4377 4375 if (amp != NULL) {
4378 4376 anon_array_exit(&an_cookie);
4379 4377 ANON_LOCK_EXIT(&->a_rwlock);
4380 4378 }
4381 4379 goto next;
4382 4380 }
4383 4381
4384 4382 if (pszc == szc) {
4385 4383 /*
4386 4384 * segvn_full_szcpages() upgraded pages szc.
4387 4385 */
4388 4386 ASSERT(pszc == ppa[0]->p_szc);
4389 4387 ASSERT(IS_P2ALIGNED(pfn, pages));
4390 4388 goto chkszc;
4391 4389 }
4392 4390
4393 4391 if (pszc > szc) {
4394 4392 kmutex_t *szcmtx;
4395 4393 SEGVN_VMSTAT_FLTVNPAGES(35);
4396 4394 /*
4397 4395 * p_szc of ppa[0] can change since we haven't
4398 4396 * locked all constituent pages. Call
4399 4397 * page_lock_szc() to prevent szc changes.
4400 4398 * This should be a rare case that happens when
4401 4399 * multiple segments use a different page size
4402 4400 * to map the same file offsets.
4403 4401 */
4404 4402 szcmtx = page_szc_lock(ppa[0]);
4405 4403 pszc = ppa[0]->p_szc;
4406 4404 ASSERT(szcmtx != NULL || pszc == 0);
4407 4405 ASSERT(ppa[0]->p_szc <= pszc);
4408 4406 if (pszc <= szc) {
4409 4407 SEGVN_VMSTAT_FLTVNPAGES(36);
4410 4408 if (szcmtx != NULL) {
4411 4409 mutex_exit(szcmtx);
4412 4410 }
4413 4411 goto chkszc;
4414 4412 }
4415 4413 if (pplist != NULL) {
4416 4414 /*
4417 4415 * page got promoted since last check.
4418 4416 * we don't need preaalocated large
4419 4417 * page.
4420 4418 */
4421 4419 SEGVN_VMSTAT_FLTVNPAGES(37);
4422 4420 page_free_replacement_page(pplist);
4423 4421 page_create_putback(pages);
4424 4422 }
4425 4423 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4426 4424 prot, vpprot);
4427 4425 hat_memload_array_region(hat, a, pgsz, ppa,
4428 4426 prot & vpprot, hat_flag, svd->rcookie);
4429 4427 mutex_exit(szcmtx);
4430 4428 if (!(hat_flag & HAT_LOAD_LOCK)) {
4431 4429 for (i = 0; i < pages; i++) {
4432 4430 page_unlock(ppa[i]);
4433 4431 }
4434 4432 }
4435 4433 if (amp != NULL) {
4436 4434 anon_array_exit(&an_cookie);
4437 4435 ANON_LOCK_EXIT(&->a_rwlock);
4438 4436 }
4439 4437 goto next;
4440 4438 }
4441 4439
4442 4440 /*
4443 4441 * if page got demoted since last check
4444 4442 * we could have not allocated larger page.
4445 4443 * allocate now.
4446 4444 */
4447 4445 if (pplist == NULL &&
4448 4446 page_alloc_pages(vp, seg, a, &pplist, NULL,
4449 4447 szc, 0, 0) && type != F_SOFTLOCK) {
4450 4448 SEGVN_VMSTAT_FLTVNPAGES(38);
4451 4449 for (i = 0; i < pages; i++) {
4452 4450 page_unlock(ppa[i]);
4453 4451 }
4454 4452 if (amp != NULL) {
4455 4453 anon_array_exit(&an_cookie);
4456 4454 ANON_LOCK_EXIT(&->a_rwlock);
4457 4455 }
4458 4456 ierr = -1;
4459 4457 alloc_failed |= (1 << szc);
4460 4458 break;
4461 4459 }
4462 4460
4463 4461 SEGVN_VMSTAT_FLTVNPAGES(39);
4464 4462
4465 4463 if (pplist != NULL) {
4466 4464 segvn_relocate_pages(ppa, pplist);
4467 4465 #ifdef DEBUG
4468 4466 } else {
4469 4467 ASSERT(type == F_SOFTLOCK);
4470 4468 SEGVN_VMSTAT_FLTVNPAGES(40);
4471 4469 #endif /* DEBUG */
4472 4470 }
4473 4471
4474 4472 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot);
4475 4473
4476 4474 if (pplist == NULL && segvn_anypgsz_vnode == 0) {
4477 4475 ASSERT(type == F_SOFTLOCK);
4478 4476 for (i = 0; i < pages; i++) {
4479 4477 ASSERT(ppa[i]->p_szc < szc);
4480 4478 hat_memload_region(hat,
4481 4479 a + (i << PAGESHIFT),
4482 4480 ppa[i], prot & vpprot, hat_flag,
4483 4481 svd->rcookie);
4484 4482 }
4485 4483 } else {
4486 4484 ASSERT(pplist != NULL || type == F_SOFTLOCK);
4487 4485 hat_memload_array_region(hat, a, pgsz, ppa,
4488 4486 prot & vpprot, hat_flag, svd->rcookie);
4489 4487 }
4490 4488 if (!(hat_flag & HAT_LOAD_LOCK)) {
4491 4489 for (i = 0; i < pages; i++) {
4492 4490 ASSERT(PAGE_SHARED(ppa[i]));
4493 4491 page_unlock(ppa[i]);
4494 4492 }
4495 4493 }
4496 4494 if (amp != NULL) {
4497 4495 anon_array_exit(&an_cookie);
4498 4496 ANON_LOCK_EXIT(&->a_rwlock);
4499 4497 }
4500 4498
4501 4499 next:
4502 4500 if (vpage != NULL) {
4503 4501 vpage += pages;
4504 4502 }
4505 4503 adjszc_chk = 1;
4506 4504 }
4507 4505 if (a == lpgeaddr)
4508 4506 break;
4509 4507 ASSERT(a < lpgeaddr);
4510 4508
4511 4509 ASSERT(!brkcow && !tron && type != F_SOFTLOCK);
4512 4510
4513 4511 /*
4514 4512 * ierr == -1 means we failed to map with a large page.
4515 4513 * (either due to allocation/relocation failures or
4516 4514 * misalignment with other mappings to this file.
4517 4515 *
4518 4516 * ierr == -2 means some other thread allocated a large page
4519 4517 * after we gave up tp map with a large page. retry with
4520 4518 * larger mapping.
4521 4519 */
4522 4520 ASSERT(ierr == -1 || ierr == -2);
4523 4521 ASSERT(ierr == -2 || szc != 0);
4524 4522 ASSERT(ierr == -1 || szc < seg->s_szc);
4525 4523 if (ierr == -2) {
4526 4524 SEGVN_VMSTAT_FLTVNPAGES(41);
4527 4525 ASSERT(pszc > szc && pszc <= seg->s_szc);
4528 4526 szc = pszc;
4529 4527 } else if (segvn_anypgsz_vnode) {
4530 4528 SEGVN_VMSTAT_FLTVNPAGES(42);
4531 4529 szc--;
4532 4530 } else {
4533 4531 SEGVN_VMSTAT_FLTVNPAGES(43);
4534 4532 ASSERT(pszc < szc);
4535 4533 /*
4536 4534 * other process created pszc large page.
4537 4535 * but we still have to drop to 0 szc.
4538 4536 */
4539 4537 szc = 0;
4540 4538 }
4541 4539
4542 4540 pgsz = page_get_pagesize(szc);
4543 4541 pages = btop(pgsz);
4544 4542 if (ierr == -2) {
4545 4543 /*
4546 4544 * Size up case. Note lpgaddr may only be needed for
4547 4545 * softlock case so we don't adjust it here.
4548 4546 */
4549 4547 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4550 4548 ASSERT(a >= lpgaddr);
4551 4549 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4552 4550 off = svd->offset + (uintptr_t)(a - seg->s_base);
4553 4551 aindx = svd->anon_index + seg_page(seg, a);
4554 4552 vpage = (svd->vpage != NULL) ?
4555 4553 &svd->vpage[seg_page(seg, a)] : NULL;
4556 4554 } else {
4557 4555 /*
4558 4556 * Size down case. Note lpgaddr may only be needed for
4559 4557 * softlock case so we don't adjust it here.
4560 4558 */
4561 4559 ASSERT(IS_P2ALIGNED(a, pgsz));
4562 4560 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4563 4561 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4564 4562 ASSERT(a < lpgeaddr);
4565 4563 if (a < addr) {
4566 4564 SEGVN_VMSTAT_FLTVNPAGES(44);
4567 4565 /*
4568 4566 * The beginning of the large page region can
4569 4567 * be pulled to the right to make a smaller
4570 4568 * region. We haven't yet faulted a single
4571 4569 * page.
4572 4570 */
4573 4571 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4574 4572 ASSERT(a >= lpgaddr);
4575 4573 off = svd->offset +
4576 4574 (uintptr_t)(a - seg->s_base);
4577 4575 aindx = svd->anon_index + seg_page(seg, a);
4578 4576 vpage = (svd->vpage != NULL) ?
4579 4577 &svd->vpage[seg_page(seg, a)] : NULL;
4580 4578 }
4581 4579 }
4582 4580 }
4583 4581 out:
4584 4582 kmem_free(ppa, ppasize);
4585 4583 if (!err && !vop_size_err) {
4586 4584 SEGVN_VMSTAT_FLTVNPAGES(45);
4587 4585 return (0);
4588 4586 }
4589 4587 if (type == F_SOFTLOCK && a > lpgaddr) {
4590 4588 SEGVN_VMSTAT_FLTVNPAGES(46);
4591 4589 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4592 4590 }
4593 4591 if (!vop_size_err) {
4594 4592 SEGVN_VMSTAT_FLTVNPAGES(47);
4595 4593 return (err);
4596 4594 }
4597 4595 ASSERT(brkcow || tron || type == F_SOFTLOCK);
4598 4596 /*
4599 4597 * Large page end is mapped beyond the end of file and it's a cow
4600 4598 * fault (can be a text replication induced cow) or softlock so we can't
4601 4599 * reduce the map area. For now just demote the segment. This should
4602 4600 * really only happen if the end of the file changed after the mapping
4603 4601 * was established since when large page segments are created we make
4604 4602 * sure they don't extend beyond the end of the file.
4605 4603 */
4606 4604 SEGVN_VMSTAT_FLTVNPAGES(48);
4607 4605
4608 4606 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4609 4607 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4610 4608 err = 0;
4611 4609 if (seg->s_szc != 0) {
4612 4610 segvn_fltvnpages_clrszc_cnt++;
4613 4611 ASSERT(svd->softlockcnt == 0);
4614 4612 err = segvn_clrszc(seg);
4615 4613 if (err != 0) {
4616 4614 segvn_fltvnpages_clrszc_err++;
4617 4615 }
4618 4616 }
4619 4617 ASSERT(err || seg->s_szc == 0);
4620 4618 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock);
4621 4619 /* segvn_fault will do its job as if szc had been zero to begin with */
4622 4620 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err));
4623 4621 }
4624 4622
4625 4623 /*
4626 4624 * This routine will attempt to fault in one large page.
4627 4625 * it will use smaller pages if that fails.
4628 4626 * It should only be called for pure anonymous segments.
4629 4627 */
4630 4628 static faultcode_t
4631 4629 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
4632 4630 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
4633 4631 caddr_t eaddr, int brkcow)
4634 4632 {
4635 4633 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4636 4634 struct anon_map *amp = svd->amp;
4637 4635 uchar_t segtype = svd->type;
4638 4636 uint_t szc = seg->s_szc;
4639 4637 size_t pgsz = page_get_pagesize(szc);
4640 4638 size_t maxpgsz = pgsz;
4641 4639 pgcnt_t pages = btop(pgsz);
4642 4640 uint_t ppaszc = szc;
4643 4641 caddr_t a = lpgaddr;
4644 4642 ulong_t aindx = svd->anon_index + seg_page(seg, a);
4645 4643 struct vpage *vpage = (svd->vpage != NULL) ?
4646 4644 &svd->vpage[seg_page(seg, a)] : NULL;
4647 4645 page_t **ppa;
4648 4646 uint_t ppa_szc;
4649 4647 faultcode_t err;
4650 4648 int ierr;
4651 4649 uint_t protchk, prot, vpprot;
4652 4650 ulong_t i;
4653 4651 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
4654 4652 anon_sync_obj_t cookie;
4655 4653 int adjszc_chk;
4656 4654 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0;
4657 4655
4658 4656 ASSERT(szc != 0);
4659 4657 ASSERT(amp != NULL);
4660 4658 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
4661 4659 ASSERT(!(svd->flags & MAP_NORESERVE));
4662 4660 ASSERT(type != F_SOFTUNLOCK);
4663 4661 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4664 4662 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF);
4665 4663 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4666 4664
4667 4665 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
4668 4666
4669 4667 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]);
4670 4668 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]);
4671 4669
4672 4670 if (svd->flags & MAP_TEXT) {
4673 4671 hat_flag |= HAT_LOAD_TEXT;
4674 4672 }
4675 4673
4676 4674 if (svd->pageprot) {
4677 4675 switch (rw) {
4678 4676 case S_READ:
4679 4677 protchk = PROT_READ;
4680 4678 break;
4681 4679 case S_WRITE:
4682 4680 protchk = PROT_WRITE;
4683 4681 break;
4684 4682 case S_EXEC:
4685 4683 protchk = PROT_EXEC;
4686 4684 break;
4687 4685 case S_OTHER:
4688 4686 default:
4689 4687 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4690 4688 break;
4691 4689 }
4692 4690 VM_STAT_ADD(segvnvmstats.fltanpages[2]);
4693 4691 } else {
4694 4692 prot = svd->prot;
4695 4693 /* caller has already done segment level protection check. */
4696 4694 }
4697 4695
4698 4696 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP);
4699 4697 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4700 4698 for (;;) {
4701 4699 adjszc_chk = 0;
4702 4700 for (; a < lpgeaddr; a += pgsz, aindx += pages) {
4703 4701 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
4704 4702 VM_STAT_ADD(segvnvmstats.fltanpages[3]);
4705 4703 ASSERT(vpage != NULL);
4706 4704 prot = VPP_PROT(vpage);
4707 4705 ASSERT(sameprot(seg, a, maxpgsz));
4708 4706 if ((prot & protchk) == 0) {
4709 4707 err = FC_PROT;
4710 4708 goto error;
4711 4709 }
4712 4710 }
4713 4711 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) &&
4714 4712 pgsz < maxpgsz) {
4715 4713 ASSERT(a > lpgaddr);
4716 4714 szc = seg->s_szc;
4717 4715 pgsz = maxpgsz;
4718 4716 pages = btop(pgsz);
4719 4717 ASSERT(IS_P2ALIGNED(aindx, pages));
4720 4718 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr,
4721 4719 pgsz);
4722 4720 }
4723 4721 if (type == F_SOFTLOCK) {
4724 4722 atomic_add_long((ulong_t *)&svd->softlockcnt,
4725 4723 pages);
4726 4724 }
4727 4725 anon_array_enter(amp, aindx, &cookie);
4728 4726 ppa_szc = (uint_t)-1;
4729 4727 ierr = anon_map_getpages(amp, aindx, szc, seg, a,
4730 4728 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow,
4731 4729 segvn_anypgsz, pgflags, svd->cred);
4732 4730 if (ierr != 0) {
4733 4731 anon_array_exit(&cookie);
4734 4732 VM_STAT_ADD(segvnvmstats.fltanpages[4]);
4735 4733 if (type == F_SOFTLOCK) {
4736 4734 atomic_add_long(
4737 4735 (ulong_t *)&svd->softlockcnt,
4738 4736 -pages);
4739 4737 }
4740 4738 if (ierr > 0) {
4741 4739 VM_STAT_ADD(segvnvmstats.fltanpages[6]);
4742 4740 err = FC_MAKE_ERR(ierr);
4743 4741 goto error;
4744 4742 }
4745 4743 break;
4746 4744 }
4747 4745
4748 4746 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4749 4747
4750 4748 ASSERT(segtype == MAP_SHARED ||
4751 4749 ppa[0]->p_szc <= szc);
4752 4750 ASSERT(segtype == MAP_PRIVATE ||
4753 4751 ppa[0]->p_szc >= szc);
4754 4752
4755 4753 /*
4756 4754 * Handle pages that have been marked for migration
4757 4755 */
4758 4756 if (lgrp_optimizations())
4759 4757 page_migrate(seg, a, ppa, pages);
4760 4758
4761 4759 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
4762 4760
4763 4761 if (segtype == MAP_SHARED) {
4764 4762 vpprot |= PROT_WRITE;
4765 4763 }
4766 4764
4767 4765 hat_memload_array(hat, a, pgsz, ppa,
4768 4766 prot & vpprot, hat_flag);
4769 4767
4770 4768 if (hat_flag & HAT_LOAD_LOCK) {
4771 4769 VM_STAT_ADD(segvnvmstats.fltanpages[7]);
4772 4770 } else {
4773 4771 VM_STAT_ADD(segvnvmstats.fltanpages[8]);
4774 4772 for (i = 0; i < pages; i++)
4775 4773 page_unlock(ppa[i]);
4776 4774 }
4777 4775 if (vpage != NULL)
4778 4776 vpage += pages;
4779 4777
4780 4778 anon_array_exit(&cookie);
4781 4779 adjszc_chk = 1;
4782 4780 }
4783 4781 if (a == lpgeaddr)
4784 4782 break;
4785 4783 ASSERT(a < lpgeaddr);
4786 4784 /*
4787 4785 * ierr == -1 means we failed to allocate a large page.
4788 4786 * so do a size down operation.
4789 4787 *
4790 4788 * ierr == -2 means some other process that privately shares
4791 4789 * pages with this process has allocated a larger page and we
4792 4790 * need to retry with larger pages. So do a size up
4793 4791 * operation. This relies on the fact that large pages are
4794 4792 * never partially shared i.e. if we share any constituent
4795 4793 * page of a large page with another process we must share the
4796 4794 * entire large page. Note this cannot happen for SOFTLOCK
4797 4795 * case, unless current address (a) is at the beginning of the
4798 4796 * next page size boundary because the other process couldn't
4799 4797 * have relocated locked pages.
4800 4798 */
4801 4799 ASSERT(ierr == -1 || ierr == -2);
4802 4800
4803 4801 if (segvn_anypgsz) {
4804 4802 ASSERT(ierr == -2 || szc != 0);
4805 4803 ASSERT(ierr == -1 || szc < seg->s_szc);
4806 4804 szc = (ierr == -1) ? szc - 1 : szc + 1;
4807 4805 } else {
4808 4806 /*
4809 4807 * For non COW faults and segvn_anypgsz == 0
4810 4808 * we need to be careful not to loop forever
4811 4809 * if existing page is found with szc other
4812 4810 * than 0 or seg->s_szc. This could be due
4813 4811 * to page relocations on behalf of DR or
4814 4812 * more likely large page creation. For this
4815 4813 * case simply re-size to existing page's szc
4816 4814 * if returned by anon_map_getpages().
4817 4815 */
4818 4816 if (ppa_szc == (uint_t)-1) {
4819 4817 szc = (ierr == -1) ? 0 : seg->s_szc;
4820 4818 } else {
4821 4819 ASSERT(ppa_szc <= seg->s_szc);
4822 4820 ASSERT(ierr == -2 || ppa_szc < szc);
4823 4821 ASSERT(ierr == -1 || ppa_szc > szc);
4824 4822 szc = ppa_szc;
4825 4823 }
4826 4824 }
4827 4825
4828 4826 pgsz = page_get_pagesize(szc);
4829 4827 pages = btop(pgsz);
4830 4828 ASSERT(type != F_SOFTLOCK || ierr == -1 ||
4831 4829 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz)));
4832 4830 if (type == F_SOFTLOCK) {
4833 4831 /*
4834 4832 * For softlocks we cannot reduce the fault area
4835 4833 * (calculated based on the largest page size for this
4836 4834 * segment) for size down and a is already next
4837 4835 * page size aligned as assertted above for size
4838 4836 * ups. Therefore just continue in case of softlock.
4839 4837 */
4840 4838 VM_STAT_ADD(segvnvmstats.fltanpages[9]);
4841 4839 continue; /* keep lint happy */
4842 4840 } else if (ierr == -2) {
4843 4841
4844 4842 /*
4845 4843 * Size up case. Note lpgaddr may only be needed for
4846 4844 * softlock case so we don't adjust it here.
4847 4845 */
4848 4846 VM_STAT_ADD(segvnvmstats.fltanpages[10]);
4849 4847 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4850 4848 ASSERT(a >= lpgaddr);
4851 4849 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4852 4850 aindx = svd->anon_index + seg_page(seg, a);
4853 4851 vpage = (svd->vpage != NULL) ?
4854 4852 &svd->vpage[seg_page(seg, a)] : NULL;
4855 4853 } else {
4856 4854 /*
4857 4855 * Size down case. Note lpgaddr may only be needed for
4858 4856 * softlock case so we don't adjust it here.
4859 4857 */
4860 4858 VM_STAT_ADD(segvnvmstats.fltanpages[11]);
4861 4859 ASSERT(IS_P2ALIGNED(a, pgsz));
4862 4860 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4863 4861 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4864 4862 ASSERT(a < lpgeaddr);
4865 4863 if (a < addr) {
4866 4864 /*
4867 4865 * The beginning of the large page region can
4868 4866 * be pulled to the right to make a smaller
4869 4867 * region. We haven't yet faulted a single
4870 4868 * page.
4871 4869 */
4872 4870 VM_STAT_ADD(segvnvmstats.fltanpages[12]);
4873 4871 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4874 4872 ASSERT(a >= lpgaddr);
4875 4873 aindx = svd->anon_index + seg_page(seg, a);
4876 4874 vpage = (svd->vpage != NULL) ?
4877 4875 &svd->vpage[seg_page(seg, a)] : NULL;
4878 4876 }
4879 4877 }
4880 4878 }
4881 4879 VM_STAT_ADD(segvnvmstats.fltanpages[13]);
4882 4880 ANON_LOCK_EXIT(&->a_rwlock);
4883 4881 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4884 4882 return (0);
4885 4883 error:
4886 4884 VM_STAT_ADD(segvnvmstats.fltanpages[14]);
4887 4885 ANON_LOCK_EXIT(&->a_rwlock);
4888 4886 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4889 4887 if (type == F_SOFTLOCK && a > lpgaddr) {
4890 4888 VM_STAT_ADD(segvnvmstats.fltanpages[15]);
4891 4889 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4892 4890 }
4893 4891 return (err);
4894 4892 }
4895 4893
4896 4894 int fltadvice = 1; /* set to free behind pages for sequential access */
4897 4895
4898 4896 /*
4899 4897 * This routine is called via a machine specific fault handling routine.
4900 4898 * It is also called by software routines wishing to lock or unlock
4901 4899 * a range of addresses.
4902 4900 *
4903 4901 * Here is the basic algorithm:
4904 4902 * If unlocking
4905 4903 * Call segvn_softunlock
4906 4904 * Return
4907 4905 * endif
4908 4906 * Checking and set up work
4909 4907 * If we will need some non-anonymous pages
4910 4908 * Call VOP_GETPAGE over the range of non-anonymous pages
4911 4909 * endif
4912 4910 * Loop over all addresses requested
4913 4911 * Call segvn_faultpage passing in page list
4914 4912 * to load up translations and handle anonymous pages
4915 4913 * endloop
4916 4914 * Load up translation to any additional pages in page list not
4917 4915 * already handled that fit into this segment
4918 4916 */
4919 4917 static faultcode_t
4920 4918 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
4921 4919 enum fault_type type, enum seg_rw rw)
4922 4920 {
4923 4921 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4924 4922 page_t **plp, **ppp, *pp;
4925 4923 u_offset_t off;
4926 4924 caddr_t a;
4927 4925 struct vpage *vpage;
4928 4926 uint_t vpprot, prot;
4929 4927 int err;
4930 4928 page_t *pl[PVN_GETPAGE_NUM + 1];
4931 4929 size_t plsz, pl_alloc_sz;
4932 4930 size_t page;
4933 4931 ulong_t anon_index;
4934 4932 struct anon_map *amp;
4935 4933 int dogetpage = 0;
4936 4934 caddr_t lpgaddr, lpgeaddr;
4937 4935 size_t pgsz;
4938 4936 anon_sync_obj_t cookie;
4939 4937 int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4940 4938
4941 4939 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
4942 4940 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4943 4941
4944 4942 /*
4945 4943 * First handle the easy stuff
4946 4944 */
4947 4945 if (type == F_SOFTUNLOCK) {
4948 4946 if (rw == S_READ_NOCOW) {
4949 4947 rw = S_READ;
4950 4948 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
4951 4949 }
4952 4950 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4953 4951 pgsz = (seg->s_szc == 0) ? PAGESIZE :
4954 4952 page_get_pagesize(seg->s_szc);
4955 4953 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]);
4956 4954 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
4957 4955 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw);
4958 4956 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4959 4957 return (0);
4960 4958 }
4961 4959
4962 4960 ASSERT(svd->tr_state == SEGVN_TR_OFF ||
4963 4961 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
4964 4962 if (brkcow == 0) {
4965 4963 if (svd->tr_state == SEGVN_TR_INIT) {
4966 4964 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4967 4965 if (svd->tr_state == SEGVN_TR_INIT) {
4968 4966 ASSERT(svd->vp != NULL && svd->amp == NULL);
4969 4967 ASSERT(svd->flags & MAP_TEXT);
4970 4968 ASSERT(svd->type == MAP_PRIVATE);
4971 4969 segvn_textrepl(seg);
4972 4970 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4973 4971 ASSERT(svd->tr_state != SEGVN_TR_ON ||
4974 4972 svd->amp != NULL);
4975 4973 }
4976 4974 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4977 4975 }
4978 4976 } else if (svd->tr_state != SEGVN_TR_OFF) {
4979 4977 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4980 4978
4981 4979 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) {
4982 4980 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
4983 4981 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4984 4982 return (FC_PROT);
4985 4983 }
4986 4984
4987 4985 if (svd->tr_state == SEGVN_TR_ON) {
4988 4986 ASSERT(svd->vp != NULL && svd->amp != NULL);
4989 4987 segvn_textunrepl(seg, 0);
4990 4988 ASSERT(svd->amp == NULL &&
4991 4989 svd->tr_state == SEGVN_TR_OFF);
4992 4990 } else if (svd->tr_state != SEGVN_TR_OFF) {
4993 4991 svd->tr_state = SEGVN_TR_OFF;
4994 4992 }
4995 4993 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
4996 4994 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4997 4995 }
4998 4996
4999 4997 top:
5000 4998 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5001 4999
5002 5000 /*
5003 5001 * If we have the same protections for the entire segment,
5004 5002 * insure that the access being attempted is legitimate.
5005 5003 */
5006 5004
5007 5005 if (svd->pageprot == 0) {
5008 5006 uint_t protchk;
5009 5007
5010 5008 switch (rw) {
5011 5009 case S_READ:
5012 5010 case S_READ_NOCOW:
5013 5011 protchk = PROT_READ;
5014 5012 break;
5015 5013 case S_WRITE:
5016 5014 protchk = PROT_WRITE;
5017 5015 break;
5018 5016 case S_EXEC:
5019 5017 protchk = PROT_EXEC;
5020 5018 break;
5021 5019 case S_OTHER:
5022 5020 default:
5023 5021 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
5024 5022 break;
5025 5023 }
5026 5024
5027 5025 if ((svd->prot & protchk) == 0) {
5028 5026 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5029 5027 return (FC_PROT); /* illegal access type */
5030 5028 }
5031 5029 }
5032 5030
5033 5031 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5034 5032 /* this must be SOFTLOCK S_READ fault */
5035 5033 ASSERT(svd->amp == NULL);
5036 5034 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5037 5035 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5038 5036 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5039 5037 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5040 5038 /*
5041 5039 * this must be the first ever non S_READ_NOCOW
5042 5040 * softlock for this segment.
5043 5041 */
5044 5042 ASSERT(svd->softlockcnt == 0);
5045 5043 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5046 5044 HAT_REGION_TEXT);
5047 5045 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5048 5046 }
5049 5047 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5050 5048 goto top;
5051 5049 }
5052 5050
5053 5051 /*
5054 5052 * We can't allow the long term use of softlocks for vmpss segments,
5055 5053 * because in some file truncation cases we should be able to demote
5056 5054 * the segment, which requires that there are no softlocks. The
5057 5055 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5058 5056 * segment is S_READ_NOCOW, where the caller holds the address space
5059 5057 * locked as writer and calls softunlock before dropping the as lock.
5060 5058 * S_READ_NOCOW is used by /proc to read memory from another user.
5061 5059 *
5062 5060 * Another deadlock between SOFTLOCK and file truncation can happen
5063 5061 * because segvn_fault_vnodepages() calls the FS one pagesize at
5064 5062 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5065 5063 * can cause a deadlock because the first set of page_t's remain
5066 5064 * locked SE_SHARED. To avoid this, we demote segments on a first
5067 5065 * SOFTLOCK if they have a length greater than the segment's
5068 5066 * page size.
5069 5067 *
5070 5068 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5071 5069 * the access type is S_READ_NOCOW and the fault length is less than
5072 5070 * or equal to the segment's page size. While this is quite restrictive,
5073 5071 * it should be the most common case of SOFTLOCK against a vmpss
5074 5072 * segment.
5075 5073 *
5076 5074 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5077 5075 * caller makes sure no COW will be caused by another thread for a
5078 5076 * softlocked page.
5079 5077 */
5080 5078 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) {
5081 5079 int demote = 0;
5082 5080
5083 5081 if (rw != S_READ_NOCOW) {
5084 5082 demote = 1;
5085 5083 }
5086 5084 if (!demote && len > PAGESIZE) {
5087 5085 pgsz = page_get_pagesize(seg->s_szc);
5088 5086 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5089 5087 lpgeaddr);
5090 5088 if (lpgeaddr - lpgaddr > pgsz) {
5091 5089 demote = 1;
5092 5090 }
5093 5091 }
5094 5092
5095 5093 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5096 5094
5097 5095 if (demote) {
5098 5096 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5099 5097 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5100 5098 if (seg->s_szc != 0) {
5101 5099 segvn_vmpss_clrszc_cnt++;
5102 5100 ASSERT(svd->softlockcnt == 0);
5103 5101 err = segvn_clrszc(seg);
5104 5102 if (err) {
5105 5103 segvn_vmpss_clrszc_err++;
5106 5104 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5107 5105 return (FC_MAKE_ERR(err));
5108 5106 }
5109 5107 }
5110 5108 ASSERT(seg->s_szc == 0);
5111 5109 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5112 5110 goto top;
5113 5111 }
5114 5112 }
5115 5113
5116 5114 /*
5117 5115 * Check to see if we need to allocate an anon_map structure.
5118 5116 */
5119 5117 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) {
5120 5118 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5121 5119 /*
5122 5120 * Drop the "read" lock on the segment and acquire
5123 5121 * the "write" version since we have to allocate the
5124 5122 * anon_map.
5125 5123 */
5126 5124 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5127 5125 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5128 5126
5129 5127 if (svd->amp == NULL) {
5130 5128 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
5131 5129 svd->amp->a_szc = seg->s_szc;
5132 5130 }
5133 5131 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5134 5132
5135 5133 /*
5136 5134 * Start all over again since segment protections
5137 5135 * may have changed after we dropped the "read" lock.
5138 5136 */
5139 5137 goto top;
5140 5138 }
5141 5139
5142 5140 /*
5143 5141 * S_READ_NOCOW vs S_READ distinction was
5144 5142 * only needed for the code above. After
5145 5143 * that we treat it as S_READ.
5146 5144 */
5147 5145 if (rw == S_READ_NOCOW) {
5148 5146 ASSERT(type == F_SOFTLOCK);
5149 5147 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5150 5148 rw = S_READ;
5151 5149 }
5152 5150
5153 5151 amp = svd->amp;
5154 5152
5155 5153 /*
5156 5154 * MADV_SEQUENTIAL work is ignored for large page segments.
5157 5155 */
5158 5156 if (seg->s_szc != 0) {
5159 5157 pgsz = page_get_pagesize(seg->s_szc);
5160 5158 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
5161 5159 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5162 5160 if (svd->vp == NULL) {
5163 5161 err = segvn_fault_anonpages(hat, seg, lpgaddr,
5164 5162 lpgeaddr, type, rw, addr, addr + len, brkcow);
5165 5163 } else {
5166 5164 err = segvn_fault_vnodepages(hat, seg, lpgaddr,
5167 5165 lpgeaddr, type, rw, addr, addr + len, brkcow);
5168 5166 if (err == IE_RETRY) {
5169 5167 ASSERT(seg->s_szc == 0);
5170 5168 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
5171 5169 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5172 5170 goto top;
5173 5171 }
5174 5172 }
5175 5173 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5176 5174 return (err);
5177 5175 }
5178 5176
5179 5177 page = seg_page(seg, addr);
5180 5178 if (amp != NULL) {
5181 5179 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5182 5180 anon_index = svd->anon_index + page;
5183 5181
5184 5182 if (type == F_PROT && rw == S_READ &&
5185 5183 svd->tr_state == SEGVN_TR_OFF &&
5186 5184 svd->type == MAP_PRIVATE && svd->pageprot == 0) {
5187 5185 size_t index = anon_index;
5188 5186 struct anon *ap;
5189 5187
5190 5188 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5191 5189 /*
5192 5190 * The fast path could apply to S_WRITE also, except
5193 5191 * that the protection fault could be caused by lazy
5194 5192 * tlb flush when ro->rw. In this case, the pte is
5195 5193 * RW already. But RO in the other cpu's tlb causes
5196 5194 * the fault. Since hat_chgprot won't do anything if
5197 5195 * pte doesn't change, we may end up faulting
5198 5196 * indefinitely until the RO tlb entry gets replaced.
5199 5197 */
5200 5198 for (a = addr; a < addr + len; a += PAGESIZE, index++) {
5201 5199 anon_array_enter(amp, index, &cookie);
5202 5200 ap = anon_get_ptr(amp->ahp, index);
5203 5201 anon_array_exit(&cookie);
5204 5202 if ((ap == NULL) || (ap->an_refcnt != 1)) {
5205 5203 ANON_LOCK_EXIT(&->a_rwlock);
5206 5204 goto slow;
5207 5205 }
5208 5206 }
5209 5207 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5210 5208 ANON_LOCK_EXIT(&->a_rwlock);
5211 5209 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5212 5210 return (0);
5213 5211 }
5214 5212 }
5215 5213 slow:
5216 5214
5217 5215 if (svd->vpage == NULL)
5218 5216 vpage = NULL;
5219 5217 else
5220 5218 vpage = &svd->vpage[page];
5221 5219
5222 5220 off = svd->offset + (uintptr_t)(addr - seg->s_base);
5223 5221
5224 5222 /*
5225 5223 * If MADV_SEQUENTIAL has been set for the particular page we
5226 5224 * are faulting on, free behind all pages in the segment and put
5227 5225 * them on the free list.
5228 5226 */
5229 5227
5230 5228 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) {
5231 5229 struct vpage *vpp;
5232 5230 ulong_t fanon_index;
5233 5231 size_t fpage;
5234 5232 u_offset_t pgoff, fpgoff;
5235 5233 struct vnode *fvp;
5236 5234 struct anon *fap = NULL;
5237 5235
5238 5236 if (svd->advice == MADV_SEQUENTIAL ||
5239 5237 (svd->pageadvice &&
5240 5238 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) {
5241 5239 pgoff = off - PAGESIZE;
5242 5240 fpage = page - 1;
5243 5241 if (vpage != NULL)
5244 5242 vpp = &svd->vpage[fpage];
5245 5243 if (amp != NULL)
5246 5244 fanon_index = svd->anon_index + fpage;
5247 5245
5248 5246 while (pgoff > svd->offset) {
5249 5247 if (svd->advice != MADV_SEQUENTIAL &&
5250 5248 (!svd->pageadvice || (vpage &&
5251 5249 VPP_ADVICE(vpp) != MADV_SEQUENTIAL)))
5252 5250 break;
5253 5251
5254 5252 /*
5255 5253 * If this is an anon page, we must find the
5256 5254 * correct <vp, offset> for it
5257 5255 */
5258 5256 fap = NULL;
5259 5257 if (amp != NULL) {
5260 5258 ANON_LOCK_ENTER(&->a_rwlock,
5261 5259 RW_READER);
5262 5260 anon_array_enter(amp, fanon_index,
5263 5261 &cookie);
5264 5262 fap = anon_get_ptr(amp->ahp,
5265 5263 fanon_index);
5266 5264 if (fap != NULL) {
5267 5265 swap_xlate(fap, &fvp, &fpgoff);
5268 5266 } else {
5269 5267 fpgoff = pgoff;
5270 5268 fvp = svd->vp;
5271 5269 }
5272 5270 anon_array_exit(&cookie);
5273 5271 ANON_LOCK_EXIT(&->a_rwlock);
5274 5272 } else {
5275 5273 fpgoff = pgoff;
5276 5274 fvp = svd->vp;
5277 5275 }
5278 5276 if (fvp == NULL)
5279 5277 break; /* XXX */
5280 5278 /*
5281 5279 * Skip pages that are free or have an
5282 5280 * "exclusive" lock.
5283 5281 */
5284 5282 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED);
5285 5283 if (pp == NULL)
5286 5284 break;
5287 5285 /*
5288 5286 * We don't need the page_struct_lock to test
5289 5287 * as this is only advisory; even if we
5290 5288 * acquire it someone might race in and lock
5291 5289 * the page after we unlock and before the
5292 5290 * PUTPAGE, then VOP_PUTPAGE will do nothing.
5293 5291 */
5294 5292 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) {
5295 5293 /*
5296 5294 * Hold the vnode before releasing
5297 5295 * the page lock to prevent it from
5298 5296 * being freed and re-used by some
5299 5297 * other thread.
5300 5298 */
5301 5299 VN_HOLD(fvp);
5302 5300 page_unlock(pp);
5303 5301 /*
5304 5302 * We should build a page list
5305 5303 * to kluster putpages XXX
5306 5304 */
5307 5305 (void) VOP_PUTPAGE(fvp,
5308 5306 (offset_t)fpgoff, PAGESIZE,
5309 5307 (B_DONTNEED|B_FREE|B_ASYNC),
5310 5308 svd->cred, NULL);
5311 5309 VN_RELE(fvp);
5312 5310 } else {
5313 5311 /*
5314 5312 * XXX - Should the loop terminate if
5315 5313 * the page is `locked'?
5316 5314 */
5317 5315 page_unlock(pp);
5318 5316 }
5319 5317 --vpp;
5320 5318 --fanon_index;
5321 5319 pgoff -= PAGESIZE;
5322 5320 }
5323 5321 }
5324 5322 }
5325 5323
5326 5324 plp = pl;
5327 5325 *plp = NULL;
5328 5326 pl_alloc_sz = 0;
5329 5327
5330 5328 /*
5331 5329 * See if we need to call VOP_GETPAGE for
5332 5330 * *any* of the range being faulted on.
5333 5331 * We can skip all of this work if there
5334 5332 * was no original vnode.
5335 5333 */
5336 5334 if (svd->vp != NULL) {
5337 5335 u_offset_t vp_off;
5338 5336 size_t vp_len;
5339 5337 struct anon *ap;
5340 5338 vnode_t *vp;
5341 5339
5342 5340 vp_off = off;
5343 5341 vp_len = len;
5344 5342
5345 5343 if (amp == NULL)
5346 5344 dogetpage = 1;
5347 5345 else {
5348 5346 /*
5349 5347 * Only acquire reader lock to prevent amp->ahp
5350 5348 * from being changed. It's ok to miss pages,
5351 5349 * hence we don't do anon_array_enter
5352 5350 */
5353 5351 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5354 5352 ap = anon_get_ptr(amp->ahp, anon_index);
5355 5353
5356 5354 if (len <= PAGESIZE)
5357 5355 /* inline non_anon() */
5358 5356 dogetpage = (ap == NULL);
5359 5357 else
5360 5358 dogetpage = non_anon(amp->ahp, anon_index,
5361 5359 &vp_off, &vp_len);
5362 5360 ANON_LOCK_EXIT(&->a_rwlock);
5363 5361 }
5364 5362
5365 5363 if (dogetpage) {
5366 5364 enum seg_rw arw;
5367 5365 struct as *as = seg->s_as;
5368 5366
5369 5367 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) {
5370 5368 /*
5371 5369 * Page list won't fit in local array,
5372 5370 * allocate one of the needed size.
5373 5371 */
5374 5372 pl_alloc_sz =
5375 5373 (btop(len) + 1) * sizeof (page_t *);
5376 5374 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP);
5377 5375 plp[0] = NULL;
5378 5376 plsz = len;
5379 5377 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE ||
5380 5378 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER ||
5381 5379 (((size_t)(addr + PAGESIZE) <
5382 5380 (size_t)(seg->s_base + seg->s_size)) &&
5383 5381 hat_probe(as->a_hat, addr + PAGESIZE))) {
5384 5382 /*
5385 5383 * Ask VOP_GETPAGE to return the exact number
5386 5384 * of pages if
5387 5385 * (a) this is a COW fault, or
5388 5386 * (b) this is a software fault, or
5389 5387 * (c) next page is already mapped.
5390 5388 */
5391 5389 plsz = len;
5392 5390 } else {
5393 5391 /*
5394 5392 * Ask VOP_GETPAGE to return adjacent pages
5395 5393 * within the segment.
5396 5394 */
5397 5395 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t)
5398 5396 ((seg->s_base + seg->s_size) - addr));
5399 5397 ASSERT((addr + plsz) <=
5400 5398 (seg->s_base + seg->s_size));
5401 5399 }
5402 5400
5403 5401 /*
5404 5402 * Need to get some non-anonymous pages.
5405 5403 * We need to make only one call to GETPAGE to do
5406 5404 * this to prevent certain deadlocking conditions
5407 5405 * when we are doing locking. In this case
5408 5406 * non_anon() should have picked up the smallest
5409 5407 * range which includes all the non-anonymous
5410 5408 * pages in the requested range. We have to
5411 5409 * be careful regarding which rw flag to pass in
5412 5410 * because on a private mapping, the underlying
5413 5411 * object is never allowed to be written.
5414 5412 */
5415 5413 if (rw == S_WRITE && svd->type == MAP_PRIVATE) {
5416 5414 arw = S_READ;
5417 5415 } else {
5418 5416 arw = rw;
5419 5417 }
5420 5418 vp = svd->vp;
5421 5419 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5422 5420 "segvn_getpage:seg %p addr %p vp %p",
5423 5421 seg, addr, vp);
5424 5422 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len,
5425 5423 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw,
5426 5424 svd->cred, NULL);
5427 5425 if (err) {
5428 5426 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5429 5427 segvn_pagelist_rele(plp);
5430 5428 if (pl_alloc_sz)
5431 5429 kmem_free(plp, pl_alloc_sz);
5432 5430 return (FC_MAKE_ERR(err));
5433 5431 }
5434 5432 if (svd->type == MAP_PRIVATE)
5435 5433 vpprot &= ~PROT_WRITE;
5436 5434 }
5437 5435 }
5438 5436
5439 5437 /*
5440 5438 * N.B. at this time the plp array has all the needed non-anon
5441 5439 * pages in addition to (possibly) having some adjacent pages.
5442 5440 */
5443 5441
5444 5442 /*
5445 5443 * Always acquire the anon_array_lock to prevent
5446 5444 * 2 threads from allocating separate anon slots for
5447 5445 * the same "addr".
5448 5446 *
5449 5447 * If this is a copy-on-write fault and we don't already
5450 5448 * have the anon_array_lock, acquire it to prevent the
5451 5449 * fault routine from handling multiple copy-on-write faults
5452 5450 * on the same "addr" in the same address space.
5453 5451 *
5454 5452 * Only one thread should deal with the fault since after
5455 5453 * it is handled, the other threads can acquire a translation
5456 5454 * to the newly created private page. This prevents two or
5457 5455 * more threads from creating different private pages for the
5458 5456 * same fault.
5459 5457 *
5460 5458 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5461 5459 * to prevent deadlock between this thread and another thread
5462 5460 * which has soft-locked this page and wants to acquire serial_lock.
5463 5461 * ( bug 4026339 )
5464 5462 *
5465 5463 * The fix for bug 4026339 becomes unnecessary when using the
5466 5464 * locking scheme with per amp rwlock and a global set of hash
5467 5465 * lock, anon_array_lock. If we steal a vnode page when low
5468 5466 * on memory and upgrad the page lock through page_rename,
5469 5467 * then the page is PAGE_HANDLED, nothing needs to be done
5470 5468 * for this page after returning from segvn_faultpage.
5471 5469 *
5472 5470 * But really, the page lock should be downgraded after
5473 5471 * the stolen page is page_rename'd.
5474 5472 */
5475 5473
5476 5474 if (amp != NULL)
5477 5475 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5478 5476
5479 5477 /*
5480 5478 * Ok, now loop over the address range and handle faults
5481 5479 */
5482 5480 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) {
5483 5481 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot,
5484 5482 type, rw, brkcow);
5485 5483 if (err) {
5486 5484 if (amp != NULL)
5487 5485 ANON_LOCK_EXIT(&->a_rwlock);
5488 5486 if (type == F_SOFTLOCK && a > addr) {
5489 5487 segvn_softunlock(seg, addr, (a - addr),
5490 5488 S_OTHER);
5491 5489 }
5492 5490 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5493 5491 segvn_pagelist_rele(plp);
5494 5492 if (pl_alloc_sz)
5495 5493 kmem_free(plp, pl_alloc_sz);
5496 5494 return (err);
5497 5495 }
5498 5496 if (vpage) {
5499 5497 vpage++;
5500 5498 } else if (svd->vpage) {
5501 5499 page = seg_page(seg, addr);
5502 5500 vpage = &svd->vpage[++page];
5503 5501 }
5504 5502 }
5505 5503
5506 5504 /* Didn't get pages from the underlying fs so we're done */
5507 5505 if (!dogetpage)
5508 5506 goto done;
5509 5507
5510 5508 /*
5511 5509 * Now handle any other pages in the list returned.
5512 5510 * If the page can be used, load up the translations now.
5513 5511 * Note that the for loop will only be entered if "plp"
5514 5512 * is pointing to a non-NULL page pointer which means that
5515 5513 * VOP_GETPAGE() was called and vpprot has been initialized.
5516 5514 */
5517 5515 if (svd->pageprot == 0)
5518 5516 prot = svd->prot & vpprot;
5519 5517
5520 5518
5521 5519 /*
5522 5520 * Large Files: diff should be unsigned value because we started
5523 5521 * supporting > 2GB segment sizes from 2.5.1 and when a
5524 5522 * large file of size > 2GB gets mapped to address space
5525 5523 * the diff value can be > 2GB.
5526 5524 */
5527 5525
5528 5526 for (ppp = plp; (pp = *ppp) != NULL; ppp++) {
5529 5527 size_t diff;
5530 5528 struct anon *ap;
5531 5529 int anon_index;
5532 5530 anon_sync_obj_t cookie;
5533 5531 int hat_flag = HAT_LOAD_ADV;
5534 5532
5535 5533 if (svd->flags & MAP_TEXT) {
5536 5534 hat_flag |= HAT_LOAD_TEXT;
5537 5535 }
5538 5536
5539 5537 if (pp == PAGE_HANDLED)
5540 5538 continue;
5541 5539
5542 5540 if (svd->tr_state != SEGVN_TR_ON &&
5543 5541 pp->p_offset >= svd->offset &&
5544 5542 pp->p_offset < svd->offset + seg->s_size) {
5545 5543
5546 5544 diff = pp->p_offset - svd->offset;
5547 5545
5548 5546 /*
5549 5547 * Large Files: Following is the assertion
5550 5548 * validating the above cast.
5551 5549 */
5552 5550 ASSERT(svd->vp == pp->p_vnode);
5553 5551
5554 5552 page = btop(diff);
5555 5553 if (svd->pageprot)
5556 5554 prot = VPP_PROT(&svd->vpage[page]) & vpprot;
5557 5555
5558 5556 /*
5559 5557 * Prevent other threads in the address space from
5560 5558 * creating private pages (i.e., allocating anon slots)
5561 5559 * while we are in the process of loading translations
5562 5560 * to additional pages returned by the underlying
5563 5561 * object.
5564 5562 */
5565 5563 if (amp != NULL) {
5566 5564 anon_index = svd->anon_index + page;
5567 5565 anon_array_enter(amp, anon_index, &cookie);
5568 5566 ap = anon_get_ptr(amp->ahp, anon_index);
5569 5567 }
5570 5568 if ((amp == NULL) || (ap == NULL)) {
5571 5569 if (IS_VMODSORT(pp->p_vnode) ||
5572 5570 enable_mbit_wa) {
5573 5571 if (rw == S_WRITE)
5574 5572 hat_setmod(pp);
5575 5573 else if (rw != S_OTHER &&
5576 5574 !hat_ismod(pp))
5577 5575 prot &= ~PROT_WRITE;
5578 5576 }
5579 5577 /*
5580 5578 * Skip mapping read ahead pages marked
5581 5579 * for migration, so they will get migrated
5582 5580 * properly on fault
5583 5581 */
5584 5582 ASSERT(amp == NULL ||
5585 5583 svd->rcookie == HAT_INVALID_REGION_COOKIE);
5586 5584 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) {
5587 5585 hat_memload_region(hat,
5588 5586 seg->s_base + diff,
5589 5587 pp, prot, hat_flag,
5590 5588 svd->rcookie);
5591 5589 }
5592 5590 }
5593 5591 if (amp != NULL)
5594 5592 anon_array_exit(&cookie);
5595 5593 }
5596 5594 page_unlock(pp);
5597 5595 }
5598 5596 done:
5599 5597 if (amp != NULL)
5600 5598 ANON_LOCK_EXIT(&->a_rwlock);
5601 5599 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5602 5600 if (pl_alloc_sz)
5603 5601 kmem_free(plp, pl_alloc_sz);
5604 5602 return (0);
5605 5603 }
5606 5604
5607 5605 /*
5608 5606 * This routine is used to start I/O on pages asynchronously. XXX it will
5609 5607 * only create PAGESIZE pages. At fault time they will be relocated into
5610 5608 * larger pages.
5611 5609 */
5612 5610 static faultcode_t
5613 5611 segvn_faulta(struct seg *seg, caddr_t addr)
5614 5612 {
5615 5613 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5616 5614 int err;
5617 5615 struct anon_map *amp;
5618 5616 vnode_t *vp;
5619 5617
5620 5618 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5621 5619
5622 5620 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5623 5621 if ((amp = svd->amp) != NULL) {
5624 5622 struct anon *ap;
5625 5623
5626 5624 /*
5627 5625 * Reader lock to prevent amp->ahp from being changed.
5628 5626 * This is advisory, it's ok to miss a page, so
5629 5627 * we don't do anon_array_enter lock.
5630 5628 */
5631 5629 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5632 5630 if ((ap = anon_get_ptr(amp->ahp,
5633 5631 svd->anon_index + seg_page(seg, addr))) != NULL) {
5634 5632
5635 5633 err = anon_getpage(&ap, NULL, NULL,
5636 5634 0, seg, addr, S_READ, svd->cred);
5637 5635
5638 5636 ANON_LOCK_EXIT(&->a_rwlock);
5639 5637 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5640 5638 if (err)
5641 5639 return (FC_MAKE_ERR(err));
5642 5640 return (0);
5643 5641 }
5644 5642 ANON_LOCK_EXIT(&->a_rwlock);
5645 5643 }
5646 5644
5647 5645 if (svd->vp == NULL) {
5648 5646 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5649 5647 return (0); /* zfod page - do nothing now */
5650 5648 }
5651 5649
5652 5650 vp = svd->vp;
5653 5651 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5654 5652 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp);
5655 5653 err = VOP_GETPAGE(vp,
5656 5654 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5657 5655 PAGESIZE, NULL, NULL, 0, seg, addr,
5658 5656 S_OTHER, svd->cred, NULL);
5659 5657
5660 5658 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5661 5659 if (err)
5662 5660 return (FC_MAKE_ERR(err));
5663 5661 return (0);
5664 5662 }
5665 5663
5666 5664 static int
5667 5665 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5668 5666 {
5669 5667 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5670 5668 struct vpage *cvp, *svp, *evp;
5671 5669 struct vnode *vp;
5672 5670 size_t pgsz;
5673 5671 pgcnt_t pgcnt;
5674 5672 anon_sync_obj_t cookie;
5675 5673 int unload_done = 0;
5676 5674
5677 5675 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5678 5676
5679 5677 if ((svd->maxprot & prot) != prot)
5680 5678 return (EACCES); /* violated maxprot */
5681 5679
5682 5680 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5683 5681
5684 5682 /* return if prot is the same */
5685 5683 if (!svd->pageprot && svd->prot == prot) {
5686 5684 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5687 5685 return (0);
5688 5686 }
5689 5687
5690 5688 /*
5691 5689 * Since we change protections we first have to flush the cache.
5692 5690 * This makes sure all the pagelock calls have to recheck
5693 5691 * protections.
5694 5692 */
5695 5693 if (svd->softlockcnt > 0) {
5696 5694 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5697 5695
5698 5696 /*
5699 5697 * If this is shared segment non 0 softlockcnt
5700 5698 * means locked pages are still in use.
5701 5699 */
5702 5700 if (svd->type == MAP_SHARED) {
5703 5701 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5704 5702 return (EAGAIN);
5705 5703 }
5706 5704
5707 5705 /*
5708 5706 * Since we do have the segvn writers lock nobody can fill
5709 5707 * the cache with entries belonging to this seg during
5710 5708 * the purge. The flush either succeeds or we still have
5711 5709 * pending I/Os.
5712 5710 */
5713 5711 segvn_purge(seg);
5714 5712 if (svd->softlockcnt > 0) {
5715 5713 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5716 5714 return (EAGAIN);
5717 5715 }
5718 5716 }
5719 5717
5720 5718 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5721 5719 ASSERT(svd->amp == NULL);
5722 5720 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5723 5721 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5724 5722 HAT_REGION_TEXT);
5725 5723 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5726 5724 unload_done = 1;
5727 5725 } else if (svd->tr_state == SEGVN_TR_INIT) {
5728 5726 svd->tr_state = SEGVN_TR_OFF;
5729 5727 } else if (svd->tr_state == SEGVN_TR_ON) {
5730 5728 ASSERT(svd->amp != NULL);
5731 5729 segvn_textunrepl(seg, 0);
5732 5730 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5733 5731 unload_done = 1;
5734 5732 }
5735 5733
5736 5734 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED &&
5737 5735 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) {
5738 5736 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
5739 5737 segvn_inval_trcache(svd->vp);
5740 5738 }
5741 5739 if (seg->s_szc != 0) {
5742 5740 int err;
5743 5741 pgsz = page_get_pagesize(seg->s_szc);
5744 5742 pgcnt = pgsz >> PAGESHIFT;
5745 5743 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
5746 5744 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5747 5745 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5748 5746 ASSERT(seg->s_base != addr || seg->s_size != len);
5749 5747 /*
5750 5748 * If we are holding the as lock as a reader then
5751 5749 * we need to return IE_RETRY and let the as
5752 5750 * layer drop and re-acquire the lock as a writer.
5753 5751 */
5754 5752 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock))
5755 5753 return (IE_RETRY);
5756 5754 VM_STAT_ADD(segvnvmstats.demoterange[1]);
5757 5755 if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5758 5756 err = segvn_demote_range(seg, addr, len,
5759 5757 SDR_END, 0);
5760 5758 } else {
5761 5759 uint_t szcvec = map_pgszcvec(seg->s_base,
5762 5760 pgsz, (uintptr_t)seg->s_base,
5763 5761 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5764 5762 err = segvn_demote_range(seg, addr, len,
5765 5763 SDR_END, szcvec);
5766 5764 }
5767 5765 if (err == 0)
5768 5766 return (IE_RETRY);
5769 5767 if (err == ENOMEM)
5770 5768 return (IE_NOMEM);
5771 5769 return (err);
5772 5770 }
5773 5771 }
5774 5772
5775 5773
5776 5774 /*
5777 5775 * If it's a private mapping and we're making it writable then we
5778 5776 * may have to reserve the additional swap space now. If we are
5779 5777 * making writable only a part of the segment then we use its vpage
5780 5778 * array to keep a record of the pages for which we have reserved
5781 5779 * swap. In this case we set the pageswap field in the segment's
5782 5780 * segvn structure to record this.
5783 5781 *
5784 5782 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5785 5783 * removing write permission on the entire segment and we haven't
5786 5784 * modified any pages, we can release the swap space.
5787 5785 */
5788 5786 if (svd->type == MAP_PRIVATE) {
5789 5787 if (prot & PROT_WRITE) {
5790 5788 if (!(svd->flags & MAP_NORESERVE) &&
5791 5789 !(svd->swresv && svd->pageswap == 0)) {
5792 5790 size_t sz = 0;
5793 5791
5794 5792 /*
5795 5793 * Start by determining how much swap
5796 5794 * space is required.
5797 5795 */
5798 5796 if (addr == seg->s_base &&
5799 5797 len == seg->s_size &&
5800 5798 svd->pageswap == 0) {
5801 5799 /* The whole segment */
5802 5800 sz = seg->s_size;
5803 5801 } else {
5804 5802 /*
5805 5803 * Make sure that the vpage array
5806 5804 * exists, and make a note of the
5807 5805 * range of elements corresponding
5808 5806 * to len.
5809 5807 */
5810 5808 segvn_vpage(seg);
5811 5809 if (svd->vpage == NULL) {
5812 5810 SEGVN_LOCK_EXIT(seg->s_as,
5813 5811 &svd->lock);
5814 5812 return (ENOMEM);
5815 5813 }
5816 5814 svp = &svd->vpage[seg_page(seg, addr)];
5817 5815 evp = &svd->vpage[seg_page(seg,
5818 5816 addr + len)];
5819 5817
5820 5818 if (svd->pageswap == 0) {
5821 5819 /*
5822 5820 * This is the first time we've
5823 5821 * asked for a part of this
5824 5822 * segment, so we need to
5825 5823 * reserve everything we've
5826 5824 * been asked for.
5827 5825 */
5828 5826 sz = len;
5829 5827 } else {
5830 5828 /*
5831 5829 * We have to count the number
5832 5830 * of pages required.
5833 5831 */
5834 5832 for (cvp = svp; cvp < evp;
5835 5833 cvp++) {
5836 5834 if (!VPP_ISSWAPRES(cvp))
5837 5835 sz++;
5838 5836 }
5839 5837 sz <<= PAGESHIFT;
5840 5838 }
5841 5839 }
5842 5840
5843 5841 /* Try to reserve the necessary swap. */
5844 5842 if (anon_resv_zone(sz,
5845 5843 seg->s_as->a_proc->p_zone) == 0) {
5846 5844 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5847 5845 return (IE_NOMEM);
5848 5846 }
5849 5847
5850 5848 /*
5851 5849 * Make a note of how much swap space
5852 5850 * we've reserved.
5853 5851 */
5854 5852 if (svd->pageswap == 0 && sz == seg->s_size) {
5855 5853 svd->swresv = sz;
5856 5854 } else {
5857 5855 ASSERT(svd->vpage != NULL);
5858 5856 svd->swresv += sz;
5859 5857 svd->pageswap = 1;
5860 5858 for (cvp = svp; cvp < evp; cvp++) {
5861 5859 if (!VPP_ISSWAPRES(cvp))
5862 5860 VPP_SETSWAPRES(cvp);
5863 5861 }
5864 5862 }
5865 5863 }
5866 5864 } else {
5867 5865 /*
5868 5866 * Swap space is released only if this segment
5869 5867 * does not map anonymous memory, since read faults
5870 5868 * on such segments still need an anon slot to read
5871 5869 * in the data.
5872 5870 */
5873 5871 if (svd->swresv != 0 && svd->vp != NULL &&
5874 5872 svd->amp == NULL && addr == seg->s_base &&
5875 5873 len == seg->s_size && svd->pageprot == 0) {
5876 5874 ASSERT(svd->pageswap == 0);
5877 5875 anon_unresv_zone(svd->swresv,
5878 5876 seg->s_as->a_proc->p_zone);
5879 5877 svd->swresv = 0;
5880 5878 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
5881 5879 "anon proc:%p %lu %u", seg, 0, 0);
5882 5880 }
5883 5881 }
5884 5882 }
5885 5883
5886 5884 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5887 5885 if (svd->prot == prot) {
5888 5886 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5889 5887 return (0); /* all done */
5890 5888 }
5891 5889 svd->prot = (uchar_t)prot;
5892 5890 } else if (svd->type == MAP_PRIVATE) {
5893 5891 struct anon *ap = NULL;
5894 5892 page_t *pp;
5895 5893 u_offset_t offset, off;
5896 5894 struct anon_map *amp;
5897 5895 ulong_t anon_idx = 0;
5898 5896
5899 5897 /*
5900 5898 * A vpage structure exists or else the change does not
5901 5899 * involve the entire segment. Establish a vpage structure
5902 5900 * if none is there. Then, for each page in the range,
5903 5901 * adjust its individual permissions. Note that write-
5904 5902 * enabling a MAP_PRIVATE page can affect the claims for
5905 5903 * locked down memory. Overcommitting memory terminates
5906 5904 * the operation.
5907 5905 */
5908 5906 segvn_vpage(seg);
5909 5907 if (svd->vpage == NULL) {
5910 5908 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5911 5909 return (ENOMEM);
5912 5910 }
5913 5911 svd->pageprot = 1;
5914 5912 if ((amp = svd->amp) != NULL) {
5915 5913 anon_idx = svd->anon_index + seg_page(seg, addr);
5916 5914 ASSERT(seg->s_szc == 0 ||
5917 5915 IS_P2ALIGNED(anon_idx, pgcnt));
5918 5916 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5919 5917 }
5920 5918
5921 5919 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5922 5920 evp = &svd->vpage[seg_page(seg, addr + len)];
5923 5921
5924 5922 /*
5925 5923 * See Statement at the beginning of segvn_lockop regarding
5926 5924 * the way cowcnts and lckcnts are handled.
5927 5925 */
5928 5926 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5929 5927
5930 5928 if (seg->s_szc != 0) {
5931 5929 if (amp != NULL) {
5932 5930 anon_array_enter(amp, anon_idx,
5933 5931 &cookie);
5934 5932 }
5935 5933 if (IS_P2ALIGNED(anon_idx, pgcnt) &&
5936 5934 !segvn_claim_pages(seg, svp, offset,
5937 5935 anon_idx, prot)) {
5938 5936 if (amp != NULL) {
5939 5937 anon_array_exit(&cookie);
5940 5938 }
5941 5939 break;
5942 5940 }
5943 5941 if (amp != NULL) {
5944 5942 anon_array_exit(&cookie);
5945 5943 }
5946 5944 anon_idx++;
5947 5945 } else {
5948 5946 if (amp != NULL) {
5949 5947 anon_array_enter(amp, anon_idx,
5950 5948 &cookie);
5951 5949 ap = anon_get_ptr(amp->ahp, anon_idx++);
5952 5950 }
5953 5951
5954 5952 if (VPP_ISPPLOCK(svp) &&
5955 5953 VPP_PROT(svp) != prot) {
5956 5954
5957 5955 if (amp == NULL || ap == NULL) {
5958 5956 vp = svd->vp;
5959 5957 off = offset;
5960 5958 } else
5961 5959 swap_xlate(ap, &vp, &off);
5962 5960 if (amp != NULL)
5963 5961 anon_array_exit(&cookie);
5964 5962
5965 5963 if ((pp = page_lookup(vp, off,
5966 5964 SE_SHARED)) == NULL) {
5967 5965 panic("segvn_setprot: no page");
5968 5966 /*NOTREACHED*/
5969 5967 }
5970 5968 ASSERT(seg->s_szc == 0);
5971 5969 if ((VPP_PROT(svp) ^ prot) &
5972 5970 PROT_WRITE) {
5973 5971 if (prot & PROT_WRITE) {
5974 5972 if (!page_addclaim(
5975 5973 pp)) {
5976 5974 page_unlock(pp);
5977 5975 break;
5978 5976 }
5979 5977 } else {
5980 5978 if (!page_subclaim(
5981 5979 pp)) {
5982 5980 page_unlock(pp);
5983 5981 break;
5984 5982 }
5985 5983 }
5986 5984 }
5987 5985 page_unlock(pp);
5988 5986 } else if (amp != NULL)
5989 5987 anon_array_exit(&cookie);
5990 5988 }
5991 5989 VPP_SETPROT(svp, prot);
5992 5990 offset += PAGESIZE;
5993 5991 }
5994 5992 if (amp != NULL)
5995 5993 ANON_LOCK_EXIT(&->a_rwlock);
5996 5994
5997 5995 /*
5998 5996 * Did we terminate prematurely? If so, simply unload
5999 5997 * the translations to the things we've updated so far.
6000 5998 */
6001 5999 if (svp != evp) {
6002 6000 if (unload_done) {
6003 6001 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6004 6002 return (IE_NOMEM);
6005 6003 }
6006 6004 len = (svp - &svd->vpage[seg_page(seg, addr)]) *
6007 6005 PAGESIZE;
6008 6006 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz));
6009 6007 if (len != 0)
6010 6008 hat_unload(seg->s_as->a_hat, addr,
6011 6009 len, HAT_UNLOAD);
6012 6010 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6013 6011 return (IE_NOMEM);
6014 6012 }
6015 6013 } else {
6016 6014 segvn_vpage(seg);
6017 6015 if (svd->vpage == NULL) {
6018 6016 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6019 6017 return (ENOMEM);
6020 6018 }
6021 6019 svd->pageprot = 1;
6022 6020 evp = &svd->vpage[seg_page(seg, addr + len)];
6023 6021 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
6024 6022 VPP_SETPROT(svp, prot);
6025 6023 }
6026 6024 }
6027 6025
6028 6026 if (unload_done) {
6029 6027 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6030 6028 return (0);
6031 6029 }
6032 6030
6033 6031 if (((prot & PROT_WRITE) != 0 &&
6034 6032 (svd->vp != NULL || svd->type == MAP_PRIVATE)) ||
6035 6033 (prot & ~PROT_USER) == PROT_NONE) {
6036 6034 /*
6037 6035 * Either private or shared data with write access (in
6038 6036 * which case we need to throw out all former translations
6039 6037 * so that we get the right translations set up on fault
6040 6038 * and we don't allow write access to any copy-on-write pages
6041 6039 * that might be around or to prevent write access to pages
6042 6040 * representing holes in a file), or we don't have permission
6043 6041 * to access the memory at all (in which case we have to
6044 6042 * unload any current translations that might exist).
6045 6043 */
6046 6044 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
6047 6045 } else {
6048 6046 /*
6049 6047 * A shared mapping or a private mapping in which write
6050 6048 * protection is going to be denied - just change all the
6051 6049 * protections over the range of addresses in question.
6052 6050 * segvn does not support any other attributes other
6053 6051 * than prot so we can use hat_chgattr.
6054 6052 */
6055 6053 hat_chgattr(seg->s_as->a_hat, addr, len, prot);
6056 6054 }
6057 6055
6058 6056 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6059 6057
6060 6058 return (0);
6061 6059 }
6062 6060
6063 6061 /*
6064 6062 * segvn_setpagesize is called via segop_setpagesize from as_setpagesize,
6065 6063 * to determine if the seg is capable of mapping the requested szc.
6066 6064 */
6067 6065 static int
6068 6066 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
6069 6067 {
6070 6068 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6071 6069 struct segvn_data *nsvd;
6072 6070 struct anon_map *amp = svd->amp;
6073 6071 struct seg *nseg;
6074 6072 caddr_t eaddr = addr + len, a;
6075 6073 size_t pgsz = page_get_pagesize(szc);
6076 6074 pgcnt_t pgcnt = page_get_pagecnt(szc);
6077 6075 int err;
6078 6076 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6079 6077
6080 6078 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6081 6079 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6082 6080
6083 6081 if (seg->s_szc == szc || segvn_lpg_disable != 0) {
6084 6082 return (0);
6085 6083 }
6086 6084
6087 6085 /*
6088 6086 * addr should always be pgsz aligned but eaddr may be misaligned if
6089 6087 * it's at the end of the segment.
6090 6088 *
6091 6089 * XXX we should assert this condition since as_setpagesize() logic
6092 6090 * guarantees it.
6093 6091 */
6094 6092 if (!IS_P2ALIGNED(addr, pgsz) ||
6095 6093 (!IS_P2ALIGNED(eaddr, pgsz) &&
6096 6094 eaddr != seg->s_base + seg->s_size)) {
6097 6095
6098 6096 segvn_setpgsz_align_err++;
6099 6097 return (EINVAL);
6100 6098 }
6101 6099
6102 6100 if (amp != NULL && svd->type == MAP_SHARED) {
6103 6101 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6104 6102 if (!IS_P2ALIGNED(an_idx, pgcnt)) {
6105 6103
6106 6104 segvn_setpgsz_anon_align_err++;
6107 6105 return (EINVAL);
6108 6106 }
6109 6107 }
6110 6108
6111 6109 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas ||
6112 6110 szc > segvn_maxpgszc) {
6113 6111 return (EINVAL);
6114 6112 }
6115 6113
6116 6114 /* paranoid check */
6117 6115 if (svd->vp != NULL &&
6118 6116 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) {
6119 6117 return (EINVAL);
6120 6118 }
6121 6119
6122 6120 if (seg->s_szc == 0 && svd->vp != NULL &&
6123 6121 map_addr_vacalign_check(addr, off)) {
6124 6122 return (EINVAL);
6125 6123 }
6126 6124
6127 6125 /*
6128 6126 * Check that protections are the same within new page
6129 6127 * size boundaries.
6130 6128 */
6131 6129 if (svd->pageprot) {
6132 6130 for (a = addr; a < eaddr; a += pgsz) {
6133 6131 if ((a + pgsz) > eaddr) {
6134 6132 if (!sameprot(seg, a, eaddr - a)) {
6135 6133 return (EINVAL);
6136 6134 }
6137 6135 } else {
6138 6136 if (!sameprot(seg, a, pgsz)) {
6139 6137 return (EINVAL);
6140 6138 }
6141 6139 }
6142 6140 }
6143 6141 }
6144 6142
6145 6143 /*
6146 6144 * Since we are changing page size we first have to flush
6147 6145 * the cache. This makes sure all the pagelock calls have
6148 6146 * to recheck protections.
6149 6147 */
6150 6148 if (svd->softlockcnt > 0) {
6151 6149 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6152 6150
6153 6151 /*
6154 6152 * If this is shared segment non 0 softlockcnt
6155 6153 * means locked pages are still in use.
6156 6154 */
6157 6155 if (svd->type == MAP_SHARED) {
6158 6156 return (EAGAIN);
6159 6157 }
6160 6158
6161 6159 /*
6162 6160 * Since we do have the segvn writers lock nobody can fill
6163 6161 * the cache with entries belonging to this seg during
6164 6162 * the purge. The flush either succeeds or we still have
6165 6163 * pending I/Os.
6166 6164 */
6167 6165 segvn_purge(seg);
6168 6166 if (svd->softlockcnt > 0) {
6169 6167 return (EAGAIN);
6170 6168 }
6171 6169 }
6172 6170
6173 6171 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6174 6172 ASSERT(svd->amp == NULL);
6175 6173 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6176 6174 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6177 6175 HAT_REGION_TEXT);
6178 6176 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6179 6177 } else if (svd->tr_state == SEGVN_TR_INIT) {
6180 6178 svd->tr_state = SEGVN_TR_OFF;
6181 6179 } else if (svd->tr_state == SEGVN_TR_ON) {
6182 6180 ASSERT(svd->amp != NULL);
6183 6181 segvn_textunrepl(seg, 1);
6184 6182 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6185 6183 amp = NULL;
6186 6184 }
6187 6185
6188 6186 /*
6189 6187 * Operation for sub range of existing segment.
6190 6188 */
6191 6189 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) {
6192 6190 if (szc < seg->s_szc) {
6193 6191 VM_STAT_ADD(segvnvmstats.demoterange[2]);
6194 6192 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0);
6195 6193 if (err == 0) {
6196 6194 return (IE_RETRY);
6197 6195 }
6198 6196 if (err == ENOMEM) {
6199 6197 return (IE_NOMEM);
6200 6198 }
6201 6199 return (err);
6202 6200 }
6203 6201 if (addr != seg->s_base) {
6204 6202 nseg = segvn_split_seg(seg, addr);
6205 6203 if (eaddr != (nseg->s_base + nseg->s_size)) {
6206 6204 /* eaddr is szc aligned */
6207 6205 (void) segvn_split_seg(nseg, eaddr);
6208 6206 }
6209 6207 return (IE_RETRY);
6210 6208 }
6211 6209 if (eaddr != (seg->s_base + seg->s_size)) {
6212 6210 /* eaddr is szc aligned */
6213 6211 (void) segvn_split_seg(seg, eaddr);
6214 6212 }
6215 6213 return (IE_RETRY);
6216 6214 }
6217 6215
6218 6216 /*
6219 6217 * Break any low level sharing and reset seg->s_szc to 0.
6220 6218 */
6221 6219 if ((err = segvn_clrszc(seg)) != 0) {
6222 6220 if (err == ENOMEM) {
6223 6221 err = IE_NOMEM;
6224 6222 }
6225 6223 return (err);
6226 6224 }
6227 6225 ASSERT(seg->s_szc == 0);
6228 6226
6229 6227 /*
6230 6228 * If the end of the current segment is not pgsz aligned
6231 6229 * then attempt to concatenate with the next segment.
6232 6230 */
6233 6231 if (!IS_P2ALIGNED(eaddr, pgsz)) {
6234 6232 nseg = AS_SEGNEXT(seg->s_as, seg);
6235 6233 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) {
6236 6234 return (ENOMEM);
6237 6235 }
6238 6236 if (nseg->s_ops != &segvn_ops) {
6239 6237 return (EINVAL);
6240 6238 }
6241 6239 nsvd = (struct segvn_data *)nseg->s_data;
6242 6240 if (nsvd->softlockcnt > 0) {
6243 6241 /*
6244 6242 * If this is shared segment non 0 softlockcnt
6245 6243 * means locked pages are still in use.
6246 6244 */
6247 6245 if (nsvd->type == MAP_SHARED) {
6248 6246 return (EAGAIN);
6249 6247 }
6250 6248 segvn_purge(nseg);
6251 6249 if (nsvd->softlockcnt > 0) {
6252 6250 return (EAGAIN);
6253 6251 }
6254 6252 }
6255 6253 err = segvn_clrszc(nseg);
6256 6254 if (err == ENOMEM) {
6257 6255 err = IE_NOMEM;
6258 6256 }
6259 6257 if (err != 0) {
6260 6258 return (err);
6261 6259 }
6262 6260 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6263 6261 err = segvn_concat(seg, nseg, 1);
6264 6262 if (err == -1) {
6265 6263 return (EINVAL);
6266 6264 }
6267 6265 if (err == -2) {
6268 6266 return (IE_NOMEM);
6269 6267 }
6270 6268 return (IE_RETRY);
6271 6269 }
6272 6270
6273 6271 /*
6274 6272 * May need to re-align anon array to
6275 6273 * new szc.
6276 6274 */
6277 6275 if (amp != NULL) {
6278 6276 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) {
6279 6277 struct anon_hdr *nahp;
6280 6278
6281 6279 ASSERT(svd->type == MAP_PRIVATE);
6282 6280
6283 6281 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6284 6282 ASSERT(amp->refcnt == 1);
6285 6283 nahp = anon_create(btop(amp->size), ANON_NOSLEEP);
6286 6284 if (nahp == NULL) {
6287 6285 ANON_LOCK_EXIT(&->a_rwlock);
6288 6286 return (IE_NOMEM);
6289 6287 }
6290 6288 if (anon_copy_ptr(amp->ahp, svd->anon_index,
6291 6289 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) {
6292 6290 anon_release(nahp, btop(amp->size));
6293 6291 ANON_LOCK_EXIT(&->a_rwlock);
6294 6292 return (IE_NOMEM);
6295 6293 }
6296 6294 anon_release(amp->ahp, btop(amp->size));
6297 6295 amp->ahp = nahp;
6298 6296 svd->anon_index = 0;
6299 6297 ANON_LOCK_EXIT(&->a_rwlock);
6300 6298 }
6301 6299 }
6302 6300 if (svd->vp != NULL && szc != 0) {
6303 6301 struct vattr va;
6304 6302 u_offset_t eoffpage = svd->offset;
6305 6303 va.va_mask = AT_SIZE;
6306 6304 eoffpage += seg->s_size;
6307 6305 eoffpage = btopr(eoffpage);
6308 6306 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) {
6309 6307 segvn_setpgsz_getattr_err++;
6310 6308 return (EINVAL);
6311 6309 }
6312 6310 if (btopr(va.va_size) < eoffpage) {
6313 6311 segvn_setpgsz_eof_err++;
6314 6312 return (EINVAL);
6315 6313 }
6316 6314 if (amp != NULL) {
6317 6315 /*
6318 6316 * anon_fill_cow_holes() may call VOP_GETPAGE().
6319 6317 * don't take anon map lock here to avoid holding it
6320 6318 * across VOP_GETPAGE() calls that may call back into
6321 6319 * segvn for klsutering checks. We don't really need
6322 6320 * anon map lock here since it's a private segment and
6323 6321 * we hold as level lock as writers.
6324 6322 */
6325 6323 if ((err = anon_fill_cow_holes(seg, seg->s_base,
6326 6324 amp->ahp, svd->anon_index, svd->vp, svd->offset,
6327 6325 seg->s_size, szc, svd->prot, svd->vpage,
6328 6326 svd->cred)) != 0) {
6329 6327 return (EINVAL);
6330 6328 }
6331 6329 }
6332 6330 segvn_setvnode_mpss(svd->vp);
6333 6331 }
6334 6332
6335 6333 if (amp != NULL) {
6336 6334 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6337 6335 if (svd->type == MAP_PRIVATE) {
6338 6336 amp->a_szc = szc;
6339 6337 } else if (szc > amp->a_szc) {
6340 6338 amp->a_szc = szc;
6341 6339 }
6342 6340 ANON_LOCK_EXIT(&->a_rwlock);
6343 6341 }
6344 6342
6345 6343 seg->s_szc = szc;
6346 6344
6347 6345 return (0);
6348 6346 }
6349 6347
6350 6348 static int
6351 6349 segvn_clrszc(struct seg *seg)
6352 6350 {
6353 6351 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6354 6352 struct anon_map *amp = svd->amp;
6355 6353 size_t pgsz;
6356 6354 pgcnt_t pages;
6357 6355 int err = 0;
6358 6356 caddr_t a = seg->s_base;
6359 6357 caddr_t ea = a + seg->s_size;
6360 6358 ulong_t an_idx = svd->anon_index;
6361 6359 vnode_t *vp = svd->vp;
6362 6360 struct vpage *vpage = svd->vpage;
6363 6361 page_t *anon_pl[1 + 1], *pp;
6364 6362 struct anon *ap, *oldap;
6365 6363 uint_t prot = svd->prot, vpprot;
6366 6364 int pageflag = 0;
6367 6365
6368 6366 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6369 6367 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6370 6368 ASSERT(svd->softlockcnt == 0);
6371 6369
6372 6370 if (vp == NULL && amp == NULL) {
6373 6371 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6374 6372 seg->s_szc = 0;
6375 6373 return (0);
6376 6374 }
6377 6375
6378 6376 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6379 6377 ASSERT(svd->amp == NULL);
6380 6378 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6381 6379 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6382 6380 HAT_REGION_TEXT);
6383 6381 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6384 6382 } else if (svd->tr_state == SEGVN_TR_ON) {
6385 6383 ASSERT(svd->amp != NULL);
6386 6384 segvn_textunrepl(seg, 1);
6387 6385 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6388 6386 amp = NULL;
6389 6387 } else {
6390 6388 if (svd->tr_state != SEGVN_TR_OFF) {
6391 6389 ASSERT(svd->tr_state == SEGVN_TR_INIT);
6392 6390 svd->tr_state = SEGVN_TR_OFF;
6393 6391 }
6394 6392
6395 6393 /*
6396 6394 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6397 6395 * unload argument is 0 when we are freeing the segment
6398 6396 * and unload was already done.
6399 6397 */
6400 6398 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
6401 6399 HAT_UNLOAD_UNMAP);
6402 6400 }
6403 6401
6404 6402 if (amp == NULL || svd->type == MAP_SHARED) {
6405 6403 seg->s_szc = 0;
6406 6404 return (0);
6407 6405 }
6408 6406
6409 6407 pgsz = page_get_pagesize(seg->s_szc);
6410 6408 pages = btop(pgsz);
6411 6409
6412 6410 /*
6413 6411 * XXX anon rwlock is not really needed because this is a
6414 6412 * private segment and we are writers.
6415 6413 */
6416 6414 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6417 6415
6418 6416 for (; a < ea; a += pgsz, an_idx += pages) {
6419 6417 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) {
6420 6418 ASSERT(vpage != NULL || svd->pageprot == 0);
6421 6419 if (vpage != NULL) {
6422 6420 ASSERT(sameprot(seg, a, pgsz));
6423 6421 prot = VPP_PROT(vpage);
6424 6422 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0;
6425 6423 }
6426 6424 if (seg->s_szc != 0) {
6427 6425 ASSERT(vp == NULL || anon_pages(amp->ahp,
6428 6426 an_idx, pages) == pages);
6429 6427 if ((err = anon_map_demotepages(amp, an_idx,
6430 6428 seg, a, prot, vpage, svd->cred)) != 0) {
6431 6429 goto out;
6432 6430 }
6433 6431 } else {
6434 6432 if (oldap->an_refcnt == 1) {
6435 6433 continue;
6436 6434 }
6437 6435 if ((err = anon_getpage(&oldap, &vpprot,
6438 6436 anon_pl, PAGESIZE, seg, a, S_READ,
6439 6437 svd->cred))) {
6440 6438 goto out;
6441 6439 }
6442 6440 if ((pp = anon_private(&ap, seg, a, prot,
6443 6441 anon_pl[0], pageflag, svd->cred)) == NULL) {
6444 6442 err = ENOMEM;
6445 6443 goto out;
6446 6444 }
6447 6445 anon_decref(oldap);
6448 6446 (void) anon_set_ptr(amp->ahp, an_idx, ap,
6449 6447 ANON_SLEEP);
6450 6448 page_unlock(pp);
6451 6449 }
6452 6450 }
6453 6451 vpage = (vpage == NULL) ? NULL : vpage + pages;
6454 6452 }
6455 6453
6456 6454 amp->a_szc = 0;
6457 6455 seg->s_szc = 0;
6458 6456 out:
6459 6457 ANON_LOCK_EXIT(&->a_rwlock);
6460 6458 return (err);
6461 6459 }
6462 6460
6463 6461 static int
6464 6462 segvn_claim_pages(
6465 6463 struct seg *seg,
6466 6464 struct vpage *svp,
6467 6465 u_offset_t off,
6468 6466 ulong_t anon_idx,
6469 6467 uint_t prot)
6470 6468 {
6471 6469 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6472 6470 size_t ppasize = (pgcnt + 1) * sizeof (page_t *);
6473 6471 page_t **ppa;
6474 6472 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6475 6473 struct anon_map *amp = svd->amp;
6476 6474 struct vpage *evp = svp + pgcnt;
6477 6475 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6478 6476 + seg->s_base;
6479 6477 struct anon *ap;
6480 6478 struct vnode *vp = svd->vp;
6481 6479 page_t *pp;
6482 6480 pgcnt_t pg_idx, i;
6483 6481 int err = 0;
6484 6482 anoff_t aoff;
6485 6483 int anon = (amp != NULL) ? 1 : 0;
6486 6484
6487 6485 ASSERT(svd->type == MAP_PRIVATE);
6488 6486 ASSERT(svd->vpage != NULL);
6489 6487 ASSERT(seg->s_szc != 0);
6490 6488 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
6491 6489 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt));
6492 6490 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT));
6493 6491
6494 6492 if (VPP_PROT(svp) == prot)
6495 6493 return (1);
6496 6494 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE))
6497 6495 return (1);
6498 6496
6499 6497 ppa = kmem_alloc(ppasize, KM_SLEEP);
6500 6498 if (anon && vp != NULL) {
6501 6499 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) {
6502 6500 anon = 0;
6503 6501 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt));
6504 6502 }
6505 6503 ASSERT(!anon ||
6506 6504 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt);
6507 6505 }
6508 6506
6509 6507 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) {
6510 6508 if (!VPP_ISPPLOCK(svp))
6511 6509 continue;
6512 6510 if (anon) {
6513 6511 ap = anon_get_ptr(amp->ahp, anon_idx);
6514 6512 if (ap == NULL) {
6515 6513 panic("segvn_claim_pages: no anon slot");
6516 6514 }
6517 6515 swap_xlate(ap, &vp, &aoff);
6518 6516 off = (u_offset_t)aoff;
6519 6517 }
6520 6518 ASSERT(vp != NULL);
6521 6519 if ((pp = page_lookup(vp,
6522 6520 (u_offset_t)off, SE_SHARED)) == NULL) {
6523 6521 panic("segvn_claim_pages: no page");
6524 6522 }
6525 6523 ppa[pg_idx++] = pp;
6526 6524 off += PAGESIZE;
6527 6525 }
6528 6526
6529 6527 if (ppa[0] == NULL) {
6530 6528 kmem_free(ppa, ppasize);
6531 6529 return (1);
6532 6530 }
6533 6531
6534 6532 ASSERT(pg_idx <= pgcnt);
6535 6533 ppa[pg_idx] = NULL;
6536 6534
6537 6535
6538 6536 /* Find each large page within ppa, and adjust its claim */
6539 6537
6540 6538 /* Does ppa cover a single large page? */
6541 6539 if (ppa[0]->p_szc == seg->s_szc) {
6542 6540 if (prot & PROT_WRITE)
6543 6541 err = page_addclaim_pages(ppa);
6544 6542 else
6545 6543 err = page_subclaim_pages(ppa);
6546 6544 } else {
6547 6545 for (i = 0; ppa[i]; i += pgcnt) {
6548 6546 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt));
6549 6547 if (prot & PROT_WRITE)
6550 6548 err = page_addclaim_pages(&ppa[i]);
6551 6549 else
6552 6550 err = page_subclaim_pages(&ppa[i]);
6553 6551 if (err == 0)
6554 6552 break;
6555 6553 }
6556 6554 }
6557 6555
6558 6556 for (i = 0; i < pg_idx; i++) {
6559 6557 ASSERT(ppa[i] != NULL);
6560 6558 page_unlock(ppa[i]);
6561 6559 }
6562 6560
6563 6561 kmem_free(ppa, ppasize);
6564 6562 return (err);
6565 6563 }
6566 6564
6567 6565 /*
6568 6566 * Returns right (upper address) segment if split occurred.
6569 6567 * If the address is equal to the beginning or end of its segment it returns
6570 6568 * the current segment.
6571 6569 */
6572 6570 static struct seg *
6573 6571 segvn_split_seg(struct seg *seg, caddr_t addr)
6574 6572 {
6575 6573 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6576 6574 struct seg *nseg;
6577 6575 size_t nsize;
6578 6576 struct segvn_data *nsvd;
6579 6577
6580 6578 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6581 6579 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6582 6580
6583 6581 ASSERT(addr >= seg->s_base);
6584 6582 ASSERT(addr <= seg->s_base + seg->s_size);
6585 6583 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6586 6584
6587 6585 if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6588 6586 return (seg);
6589 6587
6590 6588 nsize = seg->s_base + seg->s_size - addr;
6591 6589 seg->s_size = addr - seg->s_base;
6592 6590 nseg = seg_alloc(seg->s_as, addr, nsize);
6593 6591 ASSERT(nseg != NULL);
6594 6592 nseg->s_ops = seg->s_ops;
6595 6593 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
6596 6594 nseg->s_data = (void *)nsvd;
6597 6595 nseg->s_szc = seg->s_szc;
6598 6596 *nsvd = *svd;
6599 6597 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6600 6598 nsvd->seg = nseg;
6601 6599 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL);
6602 6600
6603 6601 if (nsvd->vp != NULL) {
6604 6602 VN_HOLD(nsvd->vp);
6605 6603 nsvd->offset = svd->offset +
6606 6604 (uintptr_t)(nseg->s_base - seg->s_base);
6607 6605 if (nsvd->type == MAP_SHARED)
6608 6606 lgrp_shm_policy_init(NULL, nsvd->vp);
6609 6607 } else {
6610 6608 /*
6611 6609 * The offset for an anonymous segment has no signifigance in
6612 6610 * terms of an offset into a file. If we were to use the above
6613 6611 * calculation instead, the structures read out of
6614 6612 * /proc/<pid>/xmap would be more difficult to decipher since
6615 6613 * it would be unclear whether two seemingly contiguous
6616 6614 * prxmap_t structures represented different segments or a
6617 6615 * single segment that had been split up into multiple prxmap_t
6618 6616 * structures (e.g. if some part of the segment had not yet
6619 6617 * been faulted in).
6620 6618 */
6621 6619 nsvd->offset = 0;
6622 6620 }
6623 6621
6624 6622 ASSERT(svd->softlockcnt == 0);
6625 6623 ASSERT(svd->softlockcnt_sbase == 0);
6626 6624 ASSERT(svd->softlockcnt_send == 0);
6627 6625 crhold(svd->cred);
6628 6626
6629 6627 if (svd->vpage != NULL) {
6630 6628 size_t bytes = vpgtob(seg_pages(seg));
6631 6629 size_t nbytes = vpgtob(seg_pages(nseg));
6632 6630 struct vpage *ovpage = svd->vpage;
6633 6631
6634 6632 svd->vpage = kmem_alloc(bytes, KM_SLEEP);
6635 6633 bcopy(ovpage, svd->vpage, bytes);
6636 6634 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
6637 6635 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes);
6638 6636 kmem_free(ovpage, bytes + nbytes);
6639 6637 }
6640 6638 if (svd->amp != NULL && svd->type == MAP_PRIVATE) {
6641 6639 struct anon_map *oamp = svd->amp, *namp;
6642 6640 struct anon_hdr *nahp;
6643 6641
6644 6642 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER);
6645 6643 ASSERT(oamp->refcnt == 1);
6646 6644 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
6647 6645 (void) anon_copy_ptr(oamp->ahp, svd->anon_index,
6648 6646 nahp, 0, btop(seg->s_size), ANON_SLEEP);
6649 6647
6650 6648 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
6651 6649 namp->a_szc = nseg->s_szc;
6652 6650 (void) anon_copy_ptr(oamp->ahp,
6653 6651 svd->anon_index + btop(seg->s_size),
6654 6652 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
6655 6653 anon_release(oamp->ahp, btop(oamp->size));
6656 6654 oamp->ahp = nahp;
6657 6655 oamp->size = seg->s_size;
6658 6656 svd->anon_index = 0;
6659 6657 nsvd->amp = namp;
6660 6658 nsvd->anon_index = 0;
6661 6659 ANON_LOCK_EXIT(&oamp->a_rwlock);
6662 6660 } else if (svd->amp != NULL) {
6663 6661 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6664 6662 ASSERT(svd->amp == nsvd->amp);
6665 6663 ASSERT(seg->s_szc <= svd->amp->a_szc);
6666 6664 nsvd->anon_index = svd->anon_index + seg_pages(seg);
6667 6665 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt));
6668 6666 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER);
6669 6667 svd->amp->refcnt++;
6670 6668 ANON_LOCK_EXIT(&svd->amp->a_rwlock);
6671 6669 }
6672 6670
6673 6671 /*
6674 6672 * Split the amount of swap reserved.
6675 6673 */
6676 6674 if (svd->swresv) {
6677 6675 /*
6678 6676 * For MAP_NORESERVE, only allocate swap reserve for pages
6679 6677 * being used. Other segments get enough to cover whole
6680 6678 * segment.
6681 6679 */
6682 6680 if (svd->flags & MAP_NORESERVE) {
6683 6681 size_t oswresv;
6684 6682
6685 6683 ASSERT(svd->amp);
6686 6684 oswresv = svd->swresv;
6687 6685 svd->swresv = ptob(anon_pages(svd->amp->ahp,
6688 6686 svd->anon_index, btop(seg->s_size)));
6689 6687 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
6690 6688 nsvd->anon_index, btop(nseg->s_size)));
6691 6689 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
6692 6690 } else {
6693 6691 if (svd->pageswap) {
6694 6692 svd->swresv = segvn_count_swap_by_vpages(seg);
6695 6693 ASSERT(nsvd->swresv >= svd->swresv);
6696 6694 nsvd->swresv -= svd->swresv;
6697 6695 } else {
6698 6696 ASSERT(svd->swresv == seg->s_size +
6699 6697 nseg->s_size);
6700 6698 svd->swresv = seg->s_size;
6701 6699 nsvd->swresv = nseg->s_size;
6702 6700 }
6703 6701 }
6704 6702 }
6705 6703
6706 6704 return (nseg);
6707 6705 }
6708 6706
6709 6707 /*
6710 6708 * called on memory operations (unmap, setprot, setpagesize) for a subset
6711 6709 * of a large page segment to either demote the memory range (SDR_RANGE)
6712 6710 * or the ends (SDR_END) by addr/len.
6713 6711 *
6714 6712 * returns 0 on success. returns errno, including ENOMEM, on failure.
6715 6713 */
6716 6714 static int
6717 6715 segvn_demote_range(
6718 6716 struct seg *seg,
6719 6717 caddr_t addr,
6720 6718 size_t len,
6721 6719 int flag,
6722 6720 uint_t szcvec)
6723 6721 {
6724 6722 caddr_t eaddr = addr + len;
6725 6723 caddr_t lpgaddr, lpgeaddr;
6726 6724 struct seg *nseg;
6727 6725 struct seg *badseg1 = NULL;
6728 6726 struct seg *badseg2 = NULL;
6729 6727 size_t pgsz;
6730 6728 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6731 6729 int err;
6732 6730 uint_t szc = seg->s_szc;
6733 6731 uint_t tszcvec;
6734 6732
6735 6733 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6736 6734 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6737 6735 ASSERT(szc != 0);
6738 6736 pgsz = page_get_pagesize(szc);
6739 6737 ASSERT(seg->s_base != addr || seg->s_size != len);
6740 6738 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6741 6739 ASSERT(svd->softlockcnt == 0);
6742 6740 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6743 6741 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6744 6742
6745 6743 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
6746 6744 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr);
6747 6745 if (flag == SDR_RANGE) {
6748 6746 /* demote entire range */
6749 6747 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6750 6748 (void) segvn_split_seg(nseg, lpgeaddr);
6751 6749 ASSERT(badseg1->s_base == lpgaddr);
6752 6750 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr);
6753 6751 } else if (addr != lpgaddr) {
6754 6752 ASSERT(flag == SDR_END);
6755 6753 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6756 6754 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz &&
6757 6755 eaddr < lpgaddr + 2 * pgsz) {
6758 6756 (void) segvn_split_seg(nseg, lpgeaddr);
6759 6757 ASSERT(badseg1->s_base == lpgaddr);
6760 6758 ASSERT(badseg1->s_size == 2 * pgsz);
6761 6759 } else {
6762 6760 nseg = segvn_split_seg(nseg, lpgaddr + pgsz);
6763 6761 ASSERT(badseg1->s_base == lpgaddr);
6764 6762 ASSERT(badseg1->s_size == pgsz);
6765 6763 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) {
6766 6764 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz);
6767 6765 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz);
6768 6766 badseg2 = nseg;
6769 6767 (void) segvn_split_seg(nseg, lpgeaddr);
6770 6768 ASSERT(badseg2->s_base == lpgeaddr - pgsz);
6771 6769 ASSERT(badseg2->s_size == pgsz);
6772 6770 }
6773 6771 }
6774 6772 } else {
6775 6773 ASSERT(flag == SDR_END);
6776 6774 ASSERT(eaddr < lpgeaddr);
6777 6775 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz);
6778 6776 (void) segvn_split_seg(nseg, lpgeaddr);
6779 6777 ASSERT(badseg1->s_base == lpgeaddr - pgsz);
6780 6778 ASSERT(badseg1->s_size == pgsz);
6781 6779 }
6782 6780
6783 6781 ASSERT(badseg1 != NULL);
6784 6782 ASSERT(badseg1->s_szc == szc);
6785 6783 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz ||
6786 6784 badseg1->s_size == 2 * pgsz);
6787 6785 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz));
6788 6786 ASSERT(badseg1->s_size == pgsz ||
6789 6787 sameprot(badseg1, badseg1->s_base + pgsz, pgsz));
6790 6788 if (err = segvn_clrszc(badseg1)) {
6791 6789 return (err);
6792 6790 }
6793 6791 ASSERT(badseg1->s_szc == 0);
6794 6792
6795 6793 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6796 6794 uint_t tszc = highbit(tszcvec) - 1;
6797 6795 caddr_t ta = MAX(addr, badseg1->s_base);
6798 6796 caddr_t te;
6799 6797 size_t tpgsz = page_get_pagesize(tszc);
6800 6798
6801 6799 ASSERT(svd->type == MAP_SHARED);
6802 6800 ASSERT(flag == SDR_END);
6803 6801 ASSERT(tszc < szc && tszc > 0);
6804 6802
6805 6803 if (eaddr > badseg1->s_base + badseg1->s_size) {
6806 6804 te = badseg1->s_base + badseg1->s_size;
6807 6805 } else {
6808 6806 te = eaddr;
6809 6807 }
6810 6808
6811 6809 ASSERT(ta <= te);
6812 6810 badseg1->s_szc = tszc;
6813 6811 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) {
6814 6812 if (badseg2 != NULL) {
6815 6813 err = segvn_demote_range(badseg1, ta, te - ta,
6816 6814 SDR_END, tszcvec);
6817 6815 if (err != 0) {
6818 6816 return (err);
6819 6817 }
6820 6818 } else {
6821 6819 return (segvn_demote_range(badseg1, ta,
6822 6820 te - ta, SDR_END, tszcvec));
6823 6821 }
6824 6822 }
6825 6823 }
6826 6824
6827 6825 if (badseg2 == NULL)
6828 6826 return (0);
6829 6827 ASSERT(badseg2->s_szc == szc);
6830 6828 ASSERT(badseg2->s_size == pgsz);
6831 6829 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size));
6832 6830 if (err = segvn_clrszc(badseg2)) {
6833 6831 return (err);
6834 6832 }
6835 6833 ASSERT(badseg2->s_szc == 0);
6836 6834
6837 6835 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6838 6836 uint_t tszc = highbit(tszcvec) - 1;
6839 6837 size_t tpgsz = page_get_pagesize(tszc);
6840 6838
6841 6839 ASSERT(svd->type == MAP_SHARED);
6842 6840 ASSERT(flag == SDR_END);
6843 6841 ASSERT(tszc < szc && tszc > 0);
6844 6842 ASSERT(badseg2->s_base > addr);
6845 6843 ASSERT(eaddr > badseg2->s_base);
6846 6844 ASSERT(eaddr < badseg2->s_base + badseg2->s_size);
6847 6845
6848 6846 badseg2->s_szc = tszc;
6849 6847 if (!IS_P2ALIGNED(eaddr, tpgsz)) {
6850 6848 return (segvn_demote_range(badseg2, badseg2->s_base,
6851 6849 eaddr - badseg2->s_base, SDR_END, tszcvec));
6852 6850 }
6853 6851 }
6854 6852
6855 6853 return (0);
6856 6854 }
6857 6855
6858 6856 static int
6859 6857 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6860 6858 {
6861 6859 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6862 6860 struct vpage *vp, *evp;
6863 6861
6864 6862 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6865 6863
6866 6864 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6867 6865 /*
6868 6866 * If segment protection can be used, simply check against them.
6869 6867 */
6870 6868 if (svd->pageprot == 0) {
6871 6869 int err;
6872 6870
6873 6871 err = ((svd->prot & prot) != prot) ? EACCES : 0;
6874 6872 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6875 6873 return (err);
6876 6874 }
6877 6875
6878 6876 /*
6879 6877 * Have to check down to the vpage level.
6880 6878 */
6881 6879 evp = &svd->vpage[seg_page(seg, addr + len)];
6882 6880 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6883 6881 if ((VPP_PROT(vp) & prot) != prot) {
6884 6882 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6885 6883 return (EACCES);
6886 6884 }
6887 6885 }
6888 6886 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6889 6887 return (0);
6890 6888 }
6891 6889
6892 6890 static int
6893 6891 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6894 6892 {
6895 6893 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6896 6894 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6897 6895
6898 6896 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6899 6897
6900 6898 if (pgno != 0) {
6901 6899 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6902 6900 if (svd->pageprot == 0) {
6903 6901 do {
6904 6902 protv[--pgno] = svd->prot;
6905 6903 } while (pgno != 0);
6906 6904 } else {
6907 6905 size_t pgoff = seg_page(seg, addr);
6908 6906
6909 6907 do {
6910 6908 pgno--;
6911 6909 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]);
6912 6910 } while (pgno != 0);
6913 6911 }
6914 6912 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6915 6913 }
6916 6914 return (0);
6917 6915 }
6918 6916
6919 6917 static u_offset_t
6920 6918 segvn_getoffset(struct seg *seg, caddr_t addr)
6921 6919 {
6922 6920 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6923 6921
6924 6922 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6925 6923
6926 6924 return (svd->offset + (uintptr_t)(addr - seg->s_base));
6927 6925 }
6928 6926
6929 6927 /*ARGSUSED*/
6930 6928 static int
6931 6929 segvn_gettype(struct seg *seg, caddr_t addr)
6932 6930 {
6933 6931 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6934 6932
6935 6933 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6936 6934
6937 6935 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6938 6936 MAP_INITDATA)));
6939 6937 }
6940 6938
6941 6939 /*ARGSUSED*/
6942 6940 static int
6943 6941 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6944 6942 {
6945 6943 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6946 6944
6947 6945 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6948 6946
6949 6947 *vpp = svd->vp;
6950 6948 return (0);
6951 6949 }
6952 6950
6953 6951 /*
6954 6952 * Check to see if it makes sense to do kluster/read ahead to
6955 6953 * addr + delta relative to the mapping at addr. We assume here
6956 6954 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6957 6955 *
6958 6956 * For segvn, we currently "approve" of the action if we are
6959 6957 * still in the segment and it maps from the same vp/off,
6960 6958 * or if the advice stored in segvn_data or vpages allows it.
6961 6959 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6962 6960 */
6963 6961 static int
6964 6962 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6965 6963 {
6966 6964 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6967 6965 struct anon *oap, *ap;
6968 6966 ssize_t pd;
6969 6967 size_t page;
6970 6968 struct vnode *vp1, *vp2;
6971 6969 u_offset_t off1, off2;
6972 6970 struct anon_map *amp;
6973 6971
6974 6972 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6975 6973 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6976 6974 SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
6977 6975
6978 6976 if (addr + delta < seg->s_base ||
6979 6977 addr + delta >= (seg->s_base + seg->s_size))
6980 6978 return (-1); /* exceeded segment bounds */
6981 6979
6982 6980 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */
6983 6981 page = seg_page(seg, addr);
6984 6982
6985 6983 /*
6986 6984 * Check to see if either of the pages addr or addr + delta
6987 6985 * have advice set that prevents klustering (if MADV_RANDOM advice
6988 6986 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
6989 6987 * is negative).
6990 6988 */
6991 6989 if (svd->advice == MADV_RANDOM ||
6992 6990 svd->advice == MADV_SEQUENTIAL && delta < 0)
6993 6991 return (-1);
6994 6992 else if (svd->pageadvice && svd->vpage) {
6995 6993 struct vpage *bvpp, *evpp;
6996 6994
6997 6995 bvpp = &svd->vpage[page];
6998 6996 evpp = &svd->vpage[page + pd];
6999 6997 if (VPP_ADVICE(bvpp) == MADV_RANDOM ||
7000 6998 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0)
7001 6999 return (-1);
7002 7000 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) &&
7003 7001 VPP_ADVICE(evpp) == MADV_RANDOM)
7004 7002 return (-1);
7005 7003 }
7006 7004
7007 7005 if (svd->type == MAP_SHARED)
7008 7006 return (0); /* shared mapping - all ok */
7009 7007
7010 7008 if ((amp = svd->amp) == NULL)
7011 7009 return (0); /* off original vnode */
7012 7010
7013 7011 page += svd->anon_index;
7014 7012
7015 7013 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7016 7014
7017 7015 oap = anon_get_ptr(amp->ahp, page);
7018 7016 ap = anon_get_ptr(amp->ahp, page + pd);
7019 7017
7020 7018 ANON_LOCK_EXIT(&->a_rwlock);
7021 7019
7022 7020 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) {
7023 7021 return (-1); /* one with and one without an anon */
7024 7022 }
7025 7023
7026 7024 if (oap == NULL) { /* implies that ap == NULL */
7027 7025 return (0); /* off original vnode */
7028 7026 }
7029 7027
7030 7028 /*
7031 7029 * Now we know we have two anon pointers - check to
7032 7030 * see if they happen to be properly allocated.
7033 7031 */
7034 7032
7035 7033 /*
7036 7034 * XXX We cheat here and don't lock the anon slots. We can't because
7037 7035 * we may have been called from the anon layer which might already
7038 7036 * have locked them. We are holding a refcnt on the slots so they
7039 7037 * can't disappear. The worst that will happen is we'll get the wrong
7040 7038 * names (vp, off) for the slots and make a poor klustering decision.
7041 7039 */
7042 7040 swap_xlate(ap, &vp1, &off1);
7043 7041 swap_xlate(oap, &vp2, &off2);
7044 7042
7045 7043
7046 7044 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta)
7047 7045 return (-1);
7048 7046 return (0);
7049 7047 }
7050 7048
7051 7049 /*
7052 7050 * Synchronize primary storage cache with real object in virtual memory.
7053 7051 *
7054 7052 * XXX - Anonymous pages should not be sync'ed out at all.
7055 7053 */
7056 7054 static int
7057 7055 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
7058 7056 {
7059 7057 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7060 7058 struct vpage *vpp;
7061 7059 page_t *pp;
7062 7060 u_offset_t offset;
7063 7061 struct vnode *vp;
7064 7062 u_offset_t off;
7065 7063 caddr_t eaddr;
7066 7064 int bflags;
7067 7065 int err = 0;
7068 7066 int segtype;
7069 7067 int pageprot;
7070 7068 int prot;
7071 7069 ulong_t anon_index;
7072 7070 struct anon_map *amp;
7073 7071 struct anon *ap;
7074 7072 anon_sync_obj_t cookie;
7075 7073
7076 7074 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7077 7075
7078 7076 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7079 7077
7080 7078 if (svd->softlockcnt > 0) {
7081 7079 /*
7082 7080 * If this is shared segment non 0 softlockcnt
7083 7081 * means locked pages are still in use.
7084 7082 */
7085 7083 if (svd->type == MAP_SHARED) {
7086 7084 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7087 7085 return (EAGAIN);
7088 7086 }
7089 7087
7090 7088 /*
7091 7089 * flush all pages from seg cache
7092 7090 * otherwise we may deadlock in swap_putpage
7093 7091 * for B_INVAL page (4175402).
7094 7092 *
7095 7093 * Even if we grab segvn WRITER's lock
7096 7094 * here, there might be another thread which could've
7097 7095 * successfully performed lookup/insert just before
7098 7096 * we acquired the lock here. So, grabbing either
7099 7097 * lock here is of not much use. Until we devise
7100 7098 * a strategy at upper layers to solve the
7101 7099 * synchronization issues completely, we expect
7102 7100 * applications to handle this appropriately.
7103 7101 */
7104 7102 segvn_purge(seg);
7105 7103 if (svd->softlockcnt > 0) {
7106 7104 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7107 7105 return (EAGAIN);
7108 7106 }
7109 7107 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7110 7108 svd->amp->a_softlockcnt > 0) {
7111 7109 /*
7112 7110 * Try to purge this amp's entries from pcache. It will
7113 7111 * succeed only if other segments that share the amp have no
7114 7112 * outstanding softlock's.
7115 7113 */
7116 7114 segvn_purge(seg);
7117 7115 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) {
7118 7116 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7119 7117 return (EAGAIN);
7120 7118 }
7121 7119 }
7122 7120
7123 7121 vpp = svd->vpage;
7124 7122 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7125 7123 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) |
7126 7124 ((flags & MS_INVALIDATE) ? B_INVAL : 0);
7127 7125
7128 7126 if (attr) {
7129 7127 pageprot = attr & ~(SHARED|PRIVATE);
7130 7128 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE;
7131 7129
7132 7130 /*
7133 7131 * We are done if the segment types don't match
7134 7132 * or if we have segment level protections and
7135 7133 * they don't match.
7136 7134 */
7137 7135 if (svd->type != segtype) {
7138 7136 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7139 7137 return (0);
7140 7138 }
7141 7139 if (vpp == NULL) {
7142 7140 if (svd->prot != pageprot) {
7143 7141 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7144 7142 return (0);
7145 7143 }
7146 7144 prot = svd->prot;
7147 7145 } else
7148 7146 vpp = &svd->vpage[seg_page(seg, addr)];
7149 7147
7150 7148 } else if (svd->vp && svd->amp == NULL &&
7151 7149 (flags & MS_INVALIDATE) == 0) {
7152 7150
7153 7151 /*
7154 7152 * No attributes, no anonymous pages and MS_INVALIDATE flag
7155 7153 * is not on, just use one big request.
7156 7154 */
7157 7155 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len,
7158 7156 bflags, svd->cred, NULL);
7159 7157 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7160 7158 return (err);
7161 7159 }
7162 7160
7163 7161 if ((amp = svd->amp) != NULL)
7164 7162 anon_index = svd->anon_index + seg_page(seg, addr);
7165 7163
7166 7164 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) {
7167 7165 ap = NULL;
7168 7166 if (amp != NULL) {
7169 7167 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7170 7168 anon_array_enter(amp, anon_index, &cookie);
7171 7169 ap = anon_get_ptr(amp->ahp, anon_index++);
7172 7170 if (ap != NULL) {
7173 7171 swap_xlate(ap, &vp, &off);
7174 7172 } else {
7175 7173 vp = svd->vp;
7176 7174 off = offset;
7177 7175 }
7178 7176 anon_array_exit(&cookie);
7179 7177 ANON_LOCK_EXIT(&->a_rwlock);
7180 7178 } else {
7181 7179 vp = svd->vp;
7182 7180 off = offset;
7183 7181 }
7184 7182 offset += PAGESIZE;
7185 7183
7186 7184 if (vp == NULL) /* untouched zfod page */
7187 7185 continue;
7188 7186
7189 7187 if (attr) {
7190 7188 if (vpp) {
7191 7189 prot = VPP_PROT(vpp);
7192 7190 vpp++;
7193 7191 }
7194 7192 if (prot != pageprot) {
7195 7193 continue;
7196 7194 }
7197 7195 }
7198 7196
7199 7197 /*
7200 7198 * See if any of these pages are locked -- if so, then we
7201 7199 * will have to truncate an invalidate request at the first
7202 7200 * locked one. We don't need the page_struct_lock to test
7203 7201 * as this is only advisory; even if we acquire it someone
7204 7202 * might race in and lock the page after we unlock and before
7205 7203 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7206 7204 */
7207 7205 if (flags & MS_INVALIDATE) {
7208 7206 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) {
7209 7207 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
7210 7208 page_unlock(pp);
7211 7209 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7212 7210 return (EBUSY);
7213 7211 }
7214 7212 if (ap != NULL && pp->p_szc != 0 &&
7215 7213 page_tryupgrade(pp)) {
7216 7214 if (pp->p_lckcnt == 0 &&
7217 7215 pp->p_cowcnt == 0) {
7218 7216 /*
7219 7217 * swapfs VN_DISPOSE() won't
7220 7218 * invalidate large pages.
7221 7219 * Attempt to demote.
7222 7220 * XXX can't help it if it
7223 7221 * fails. But for swapfs
7224 7222 * pages it is no big deal.
7225 7223 */
7226 7224 (void) page_try_demote_pages(
7227 7225 pp);
7228 7226 }
7229 7227 }
7230 7228 page_unlock(pp);
7231 7229 }
7232 7230 } else if (svd->type == MAP_SHARED && amp != NULL) {
7233 7231 /*
7234 7232 * Avoid writing out to disk ISM's large pages
7235 7233 * because segspt_free_pages() relies on NULL an_pvp
7236 7234 * of anon slots of such pages.
7237 7235 */
7238 7236
7239 7237 ASSERT(svd->vp == NULL);
7240 7238 /*
7241 7239 * swapfs uses page_lookup_nowait if not freeing or
7242 7240 * invalidating and skips a page if
7243 7241 * page_lookup_nowait returns NULL.
7244 7242 */
7245 7243 pp = page_lookup_nowait(vp, off, SE_SHARED);
7246 7244 if (pp == NULL) {
7247 7245 continue;
7248 7246 }
7249 7247 if (pp->p_szc != 0) {
7250 7248 page_unlock(pp);
7251 7249 continue;
7252 7250 }
7253 7251
7254 7252 /*
7255 7253 * Note ISM pages are created large so (vp, off)'s
7256 7254 * page cannot suddenly become large after we unlock
7257 7255 * pp.
7258 7256 */
7259 7257 page_unlock(pp);
7260 7258 }
7261 7259 /*
7262 7260 * XXX - Should ultimately try to kluster
7263 7261 * calls to VOP_PUTPAGE() for performance.
7264 7262 */
7265 7263 VN_HOLD(vp);
7266 7264 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
7267 7265 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)),
7268 7266 svd->cred, NULL);
7269 7267
7270 7268 VN_RELE(vp);
7271 7269 if (err)
7272 7270 break;
7273 7271 }
7274 7272 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7275 7273 return (err);
7276 7274 }
7277 7275
7278 7276 /*
7279 7277 * Determine if we have data corresponding to pages in the
7280 7278 * primary storage virtual memory cache (i.e., "in core").
7281 7279 */
7282 7280 static size_t
7283 7281 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
7284 7282 {
7285 7283 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7286 7284 struct vnode *vp, *avp;
7287 7285 u_offset_t offset, aoffset;
7288 7286 size_t p, ep;
7289 7287 int ret;
7290 7288 struct vpage *vpp;
7291 7289 page_t *pp;
7292 7290 uint_t start;
7293 7291 struct anon_map *amp; /* XXX - for locknest */
7294 7292 struct anon *ap;
7295 7293 uint_t attr;
7296 7294 anon_sync_obj_t cookie;
7297 7295
7298 7296 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7299 7297
7300 7298 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7301 7299 if (svd->amp == NULL && svd->vp == NULL) {
7302 7300 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7303 7301 bzero(vec, btopr(len));
7304 7302 return (len); /* no anonymous pages created yet */
7305 7303 }
7306 7304
7307 7305 p = seg_page(seg, addr);
7308 7306 ep = seg_page(seg, addr + len);
7309 7307 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0;
7310 7308
7311 7309 amp = svd->amp;
7312 7310 for (; p < ep; p++, addr += PAGESIZE) {
7313 7311 vpp = (svd->vpage) ? &svd->vpage[p]: NULL;
7314 7312 ret = start;
7315 7313 ap = NULL;
7316 7314 avp = NULL;
7317 7315 /* Grab the vnode/offset for the anon slot */
7318 7316 if (amp != NULL) {
7319 7317 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7320 7318 anon_array_enter(amp, svd->anon_index + p, &cookie);
7321 7319 ap = anon_get_ptr(amp->ahp, svd->anon_index + p);
7322 7320 if (ap != NULL) {
7323 7321 swap_xlate(ap, &avp, &aoffset);
7324 7322 }
7325 7323 anon_array_exit(&cookie);
7326 7324 ANON_LOCK_EXIT(&->a_rwlock);
7327 7325 }
7328 7326 if ((avp != NULL) && page_exists(avp, aoffset)) {
7329 7327 /* A page exists for the anon slot */
7330 7328 ret |= SEG_PAGE_INCORE;
7331 7329
7332 7330 /*
7333 7331 * If page is mapped and writable
7334 7332 */
7335 7333 attr = (uint_t)0;
7336 7334 if ((hat_getattr(seg->s_as->a_hat, addr,
7337 7335 &attr) != -1) && (attr & PROT_WRITE)) {
7338 7336 ret |= SEG_PAGE_ANON;
7339 7337 }
7340 7338 /*
7341 7339 * Don't get page_struct lock for lckcnt and cowcnt,
7342 7340 * since this is purely advisory.
7343 7341 */
7344 7342 if ((pp = page_lookup_nowait(avp, aoffset,
7345 7343 SE_SHARED)) != NULL) {
7346 7344 if (pp->p_lckcnt)
7347 7345 ret |= SEG_PAGE_SOFTLOCK;
7348 7346 if (pp->p_cowcnt)
7349 7347 ret |= SEG_PAGE_HASCOW;
7350 7348 page_unlock(pp);
7351 7349 }
7352 7350 }
7353 7351
7354 7352 /* Gather vnode statistics */
7355 7353 vp = svd->vp;
7356 7354 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7357 7355
7358 7356 if (vp != NULL) {
7359 7357 /*
7360 7358 * Try to obtain a "shared" lock on the page
7361 7359 * without blocking. If this fails, determine
7362 7360 * if the page is in memory.
7363 7361 */
7364 7362 pp = page_lookup_nowait(vp, offset, SE_SHARED);
7365 7363 if ((pp == NULL) && (page_exists(vp, offset))) {
7366 7364 /* Page is incore, and is named */
7367 7365 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7368 7366 }
7369 7367 /*
7370 7368 * Don't get page_struct lock for lckcnt and cowcnt,
7371 7369 * since this is purely advisory.
7372 7370 */
7373 7371 if (pp != NULL) {
7374 7372 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7375 7373 if (pp->p_lckcnt)
7376 7374 ret |= SEG_PAGE_SOFTLOCK;
7377 7375 if (pp->p_cowcnt)
7378 7376 ret |= SEG_PAGE_HASCOW;
7379 7377 page_unlock(pp);
7380 7378 }
7381 7379 }
7382 7380
7383 7381 /* Gather virtual page information */
7384 7382 if (vpp) {
7385 7383 if (VPP_ISPPLOCK(vpp))
7386 7384 ret |= SEG_PAGE_LOCKED;
7387 7385 vpp++;
7388 7386 }
7389 7387
7390 7388 *vec++ = (char)ret;
7391 7389 }
7392 7390 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7393 7391 return (len);
7394 7392 }
7395 7393
7396 7394 /*
7397 7395 * Statement for p_cowcnts/p_lckcnts.
7398 7396 *
7399 7397 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7400 7398 * irrespective of the following factors or anything else:
7401 7399 *
7402 7400 * (1) anon slots are populated or not
7403 7401 * (2) cow is broken or not
7404 7402 * (3) refcnt on ap is 1 or greater than 1
7405 7403 *
7406 7404 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7407 7405 * and munlock.
7408 7406 *
7409 7407 *
7410 7408 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7411 7409 *
7412 7410 * if vpage has PROT_WRITE
7413 7411 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7414 7412 * else
7415 7413 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7416 7414 *
7417 7415 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7418 7416 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7419 7417 *
7420 7418 * We may also break COW if softlocking on read access in the physio case.
7421 7419 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7422 7420 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7423 7421 * vpage doesn't have PROT_WRITE.
7424 7422 *
7425 7423 *
7426 7424 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7427 7425 *
7428 7426 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7429 7427 * increment p_lckcnt by calling page_subclaim() which takes care of
7430 7428 * availrmem accounting and p_lckcnt overflow.
7431 7429 *
7432 7430 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7433 7431 * increment p_cowcnt by calling page_addclaim() which takes care of
7434 7432 * availrmem availability and p_cowcnt overflow.
7435 7433 */
7436 7434
7437 7435 /*
7438 7436 * Lock down (or unlock) pages mapped by this segment.
7439 7437 *
7440 7438 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7441 7439 * At fault time they will be relocated into larger pages.
7442 7440 */
7443 7441 static int
7444 7442 segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
7445 7443 int attr, int op, ulong_t *lockmap, size_t pos)
7446 7444 {
7447 7445 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7448 7446 struct vpage *vpp;
7449 7447 struct vpage *evp;
7450 7448 page_t *pp;
7451 7449 u_offset_t offset;
7452 7450 u_offset_t off;
7453 7451 int segtype;
7454 7452 int pageprot;
7455 7453 int claim;
7456 7454 struct vnode *vp;
7457 7455 ulong_t anon_index;
7458 7456 struct anon_map *amp;
7459 7457 struct anon *ap;
7460 7458 struct vattr va;
7461 7459 anon_sync_obj_t cookie;
7462 7460 struct kshmid *sp = NULL;
7463 7461 struct proc *p = curproc;
7464 7462 kproject_t *proj = NULL;
7465 7463 int chargeproc = 1;
7466 7464 size_t locked_bytes = 0;
7467 7465 size_t unlocked_bytes = 0;
7468 7466 int err = 0;
7469 7467
7470 7468 /*
7471 7469 * Hold write lock on address space because may split or concatenate
7472 7470 * segments
7473 7471 */
7474 7472 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7475 7473
7476 7474 /*
7477 7475 * If this is a shm, use shm's project and zone, else use
7478 7476 * project and zone of calling process
7479 7477 */
7480 7478
7481 7479 /* Determine if this segment backs a sysV shm */
7482 7480 if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7483 7481 ASSERT(svd->type == MAP_SHARED);
7484 7482 ASSERT(svd->tr_state == SEGVN_TR_OFF);
7485 7483 sp = svd->amp->a_sp;
7486 7484 proj = sp->shm_perm.ipc_proj;
7487 7485 chargeproc = 0;
7488 7486 }
7489 7487
7490 7488 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7491 7489 if (attr) {
7492 7490 pageprot = attr & ~(SHARED|PRIVATE);
7493 7491 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE;
7494 7492
7495 7493 /*
7496 7494 * We are done if the segment types don't match
7497 7495 * or if we have segment level protections and
7498 7496 * they don't match.
7499 7497 */
7500 7498 if (svd->type != segtype) {
7501 7499 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7502 7500 return (0);
7503 7501 }
7504 7502 if (svd->pageprot == 0 && svd->prot != pageprot) {
7505 7503 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7506 7504 return (0);
7507 7505 }
7508 7506 }
7509 7507
7510 7508 if (op == MC_LOCK) {
7511 7509 if (svd->tr_state == SEGVN_TR_INIT) {
7512 7510 svd->tr_state = SEGVN_TR_OFF;
7513 7511 } else if (svd->tr_state == SEGVN_TR_ON) {
7514 7512 ASSERT(svd->amp != NULL);
7515 7513 segvn_textunrepl(seg, 0);
7516 7514 ASSERT(svd->amp == NULL &&
7517 7515 svd->tr_state == SEGVN_TR_OFF);
7518 7516 }
7519 7517 }
7520 7518
7521 7519 /*
7522 7520 * If we're locking, then we must create a vpage structure if
7523 7521 * none exists. If we're unlocking, then check to see if there
7524 7522 * is a vpage -- if not, then we could not have locked anything.
7525 7523 */
7526 7524
7527 7525 if ((vpp = svd->vpage) == NULL) {
7528 7526 if (op == MC_LOCK) {
7529 7527 segvn_vpage(seg);
7530 7528 if (svd->vpage == NULL) {
7531 7529 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7532 7530 return (ENOMEM);
7533 7531 }
7534 7532 } else {
7535 7533 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7536 7534 return (0);
7537 7535 }
7538 7536 }
7539 7537
7540 7538 /*
7541 7539 * The anonymous data vector (i.e., previously
7542 7540 * unreferenced mapping to swap space) can be allocated
7543 7541 * by lazily testing for its existence.
7544 7542 */
7545 7543 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) {
7546 7544 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
7547 7545 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
7548 7546 svd->amp->a_szc = seg->s_szc;
7549 7547 }
7550 7548
7551 7549 if ((amp = svd->amp) != NULL) {
7552 7550 anon_index = svd->anon_index + seg_page(seg, addr);
7553 7551 }
7554 7552
7555 7553 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7556 7554 evp = &svd->vpage[seg_page(seg, addr + len)];
7557 7555
7558 7556 if (sp != NULL)
7559 7557 mutex_enter(&sp->shm_mlock);
7560 7558
7561 7559 /* determine number of unlocked bytes in range for lock operation */
7562 7560 if (op == MC_LOCK) {
7563 7561
7564 7562 if (sp == NULL) {
7565 7563 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7566 7564 vpp++) {
7567 7565 if (!VPP_ISPPLOCK(vpp))
7568 7566 unlocked_bytes += PAGESIZE;
7569 7567 }
7570 7568 } else {
7571 7569 ulong_t i_idx, i_edx;
7572 7570 anon_sync_obj_t i_cookie;
7573 7571 struct anon *i_ap;
7574 7572 struct vnode *i_vp;
7575 7573 u_offset_t i_off;
7576 7574
7577 7575 /* Only count sysV pages once for locked memory */
7578 7576 i_edx = svd->anon_index + seg_page(seg, addr + len);
7579 7577 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7580 7578 for (i_idx = anon_index; i_idx < i_edx; i_idx++) {
7581 7579 anon_array_enter(amp, i_idx, &i_cookie);
7582 7580 i_ap = anon_get_ptr(amp->ahp, i_idx);
7583 7581 if (i_ap == NULL) {
7584 7582 unlocked_bytes += PAGESIZE;
7585 7583 anon_array_exit(&i_cookie);
7586 7584 continue;
7587 7585 }
7588 7586 swap_xlate(i_ap, &i_vp, &i_off);
7589 7587 anon_array_exit(&i_cookie);
7590 7588 pp = page_lookup(i_vp, i_off, SE_SHARED);
7591 7589 if (pp == NULL) {
7592 7590 unlocked_bytes += PAGESIZE;
7593 7591 continue;
7594 7592 } else if (pp->p_lckcnt == 0)
7595 7593 unlocked_bytes += PAGESIZE;
7596 7594 page_unlock(pp);
7597 7595 }
7598 7596 ANON_LOCK_EXIT(&->a_rwlock);
7599 7597 }
7600 7598
7601 7599 mutex_enter(&p->p_lock);
7602 7600 err = rctl_incr_locked_mem(p, proj, unlocked_bytes,
7603 7601 chargeproc);
7604 7602 mutex_exit(&p->p_lock);
7605 7603
7606 7604 if (err) {
7607 7605 if (sp != NULL)
7608 7606 mutex_exit(&sp->shm_mlock);
7609 7607 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7610 7608 return (err);
7611 7609 }
7612 7610 }
7613 7611 /*
7614 7612 * Loop over all pages in the range. Process if we're locking and
7615 7613 * page has not already been locked in this mapping; or if we're
7616 7614 * unlocking and the page has been locked.
7617 7615 */
7618 7616 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7619 7617 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) {
7620 7618 if ((attr == 0 || VPP_PROT(vpp) == pageprot) &&
7621 7619 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) ||
7622 7620 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) {
7623 7621
7624 7622 if (amp != NULL)
7625 7623 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7626 7624 /*
7627 7625 * If this isn't a MAP_NORESERVE segment and
7628 7626 * we're locking, allocate anon slots if they
7629 7627 * don't exist. The page is brought in later on.
7630 7628 */
7631 7629 if (op == MC_LOCK && svd->vp == NULL &&
7632 7630 ((svd->flags & MAP_NORESERVE) == 0) &&
7633 7631 amp != NULL &&
7634 7632 ((ap = anon_get_ptr(amp->ahp, anon_index))
7635 7633 == NULL)) {
7636 7634 anon_array_enter(amp, anon_index, &cookie);
7637 7635
7638 7636 if ((ap = anon_get_ptr(amp->ahp,
7639 7637 anon_index)) == NULL) {
7640 7638 pp = anon_zero(seg, addr, &ap,
7641 7639 svd->cred);
7642 7640 if (pp == NULL) {
7643 7641 anon_array_exit(&cookie);
7644 7642 ANON_LOCK_EXIT(&->a_rwlock);
7645 7643 err = ENOMEM;
7646 7644 goto out;
7647 7645 }
7648 7646 ASSERT(anon_get_ptr(amp->ahp,
7649 7647 anon_index) == NULL);
7650 7648 (void) anon_set_ptr(amp->ahp,
7651 7649 anon_index, ap, ANON_SLEEP);
7652 7650 page_unlock(pp);
7653 7651 }
7654 7652 anon_array_exit(&cookie);
7655 7653 }
7656 7654
7657 7655 /*
7658 7656 * Get name for page, accounting for
7659 7657 * existence of private copy.
7660 7658 */
7661 7659 ap = NULL;
7662 7660 if (amp != NULL) {
7663 7661 anon_array_enter(amp, anon_index, &cookie);
7664 7662 ap = anon_get_ptr(amp->ahp, anon_index);
7665 7663 if (ap != NULL) {
7666 7664 swap_xlate(ap, &vp, &off);
7667 7665 } else {
7668 7666 if (svd->vp == NULL &&
7669 7667 (svd->flags & MAP_NORESERVE)) {
7670 7668 anon_array_exit(&cookie);
7671 7669 ANON_LOCK_EXIT(&->a_rwlock);
7672 7670 continue;
7673 7671 }
7674 7672 vp = svd->vp;
7675 7673 off = offset;
7676 7674 }
7677 7675 if (op != MC_LOCK || ap == NULL) {
7678 7676 anon_array_exit(&cookie);
7679 7677 ANON_LOCK_EXIT(&->a_rwlock);
7680 7678 }
7681 7679 } else {
7682 7680 vp = svd->vp;
7683 7681 off = offset;
7684 7682 }
7685 7683
7686 7684 /*
7687 7685 * Get page frame. It's ok if the page is
7688 7686 * not available when we're unlocking, as this
7689 7687 * may simply mean that a page we locked got
7690 7688 * truncated out of existence after we locked it.
7691 7689 *
7692 7690 * Invoke VOP_GETPAGE() to obtain the page struct
7693 7691 * since we may need to read it from disk if its
7694 7692 * been paged out.
7695 7693 */
7696 7694 if (op != MC_LOCK)
7697 7695 pp = page_lookup(vp, off, SE_SHARED);
7698 7696 else {
7699 7697 page_t *pl[1 + 1];
7700 7698 int error;
7701 7699
7702 7700 ASSERT(vp != NULL);
7703 7701
7704 7702 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
7705 7703 (uint_t *)NULL, pl, PAGESIZE, seg, addr,
7706 7704 S_OTHER, svd->cred, NULL);
7707 7705
7708 7706 if (error && ap != NULL) {
7709 7707 anon_array_exit(&cookie);
7710 7708 ANON_LOCK_EXIT(&->a_rwlock);
7711 7709 }
7712 7710
7713 7711 /*
7714 7712 * If the error is EDEADLK then we must bounce
7715 7713 * up and drop all vm subsystem locks and then
7716 7714 * retry the operation later
7717 7715 * This behavior is a temporary measure because
7718 7716 * ufs/sds logging is badly designed and will
7719 7717 * deadlock if we don't allow this bounce to
7720 7718 * happen. The real solution is to re-design
7721 7719 * the logging code to work properly. See bug
7722 7720 * 4125102 for details of the problem.
7723 7721 */
7724 7722 if (error == EDEADLK) {
7725 7723 err = error;
7726 7724 goto out;
7727 7725 }
7728 7726 /*
7729 7727 * Quit if we fail to fault in the page. Treat
7730 7728 * the failure as an error, unless the addr
7731 7729 * is mapped beyond the end of a file.
7732 7730 */
7733 7731 if (error && svd->vp) {
7734 7732 va.va_mask = AT_SIZE;
7735 7733 if (VOP_GETATTR(svd->vp, &va, 0,
7736 7734 svd->cred, NULL) != 0) {
7737 7735 err = EIO;
7738 7736 goto out;
7739 7737 }
7740 7738 if (btopr(va.va_size) >=
7741 7739 btopr(off + 1)) {
7742 7740 err = EIO;
7743 7741 goto out;
7744 7742 }
7745 7743 goto out;
7746 7744
7747 7745 } else if (error) {
7748 7746 err = EIO;
7749 7747 goto out;
7750 7748 }
7751 7749 pp = pl[0];
7752 7750 ASSERT(pp != NULL);
7753 7751 }
7754 7752
7755 7753 /*
7756 7754 * See Statement at the beginning of this routine.
7757 7755 *
7758 7756 * claim is always set if MAP_PRIVATE and PROT_WRITE
7759 7757 * irrespective of following factors:
7760 7758 *
7761 7759 * (1) anon slots are populated or not
7762 7760 * (2) cow is broken or not
7763 7761 * (3) refcnt on ap is 1 or greater than 1
7764 7762 *
7765 7763 * See 4140683 for details
7766 7764 */
7767 7765 claim = ((VPP_PROT(vpp) & PROT_WRITE) &&
7768 7766 (svd->type == MAP_PRIVATE));
7769 7767
7770 7768 /*
7771 7769 * Perform page-level operation appropriate to
7772 7770 * operation. If locking, undo the SOFTLOCK
7773 7771 * performed to bring the page into memory
7774 7772 * after setting the lock. If unlocking,
7775 7773 * and no page was found, account for the claim
7776 7774 * separately.
7777 7775 */
7778 7776 if (op == MC_LOCK) {
7779 7777 int ret = 1; /* Assume success */
7780 7778
7781 7779 ASSERT(!VPP_ISPPLOCK(vpp));
7782 7780
7783 7781 ret = page_pp_lock(pp, claim, 0);
7784 7782 if (ap != NULL) {
7785 7783 if (ap->an_pvp != NULL) {
7786 7784 anon_swap_free(ap, pp);
7787 7785 }
7788 7786 anon_array_exit(&cookie);
7789 7787 ANON_LOCK_EXIT(&->a_rwlock);
7790 7788 }
7791 7789 if (ret == 0) {
7792 7790 /* locking page failed */
7793 7791 page_unlock(pp);
7794 7792 err = EAGAIN;
7795 7793 goto out;
7796 7794 }
7797 7795 VPP_SETPPLOCK(vpp);
7798 7796 if (sp != NULL) {
7799 7797 if (pp->p_lckcnt == 1)
7800 7798 locked_bytes += PAGESIZE;
7801 7799 } else
7802 7800 locked_bytes += PAGESIZE;
7803 7801
7804 7802 if (lockmap != (ulong_t *)NULL)
7805 7803 BT_SET(lockmap, pos);
7806 7804
7807 7805 page_unlock(pp);
7808 7806 } else {
7809 7807 ASSERT(VPP_ISPPLOCK(vpp));
7810 7808 if (pp != NULL) {
7811 7809 /* sysV pages should be locked */
7812 7810 ASSERT(sp == NULL || pp->p_lckcnt > 0);
7813 7811 page_pp_unlock(pp, claim, 0);
7814 7812 if (sp != NULL) {
7815 7813 if (pp->p_lckcnt == 0)
7816 7814 unlocked_bytes
7817 7815 += PAGESIZE;
7818 7816 } else
7819 7817 unlocked_bytes += PAGESIZE;
7820 7818 page_unlock(pp);
7821 7819 } else {
7822 7820 ASSERT(sp == NULL);
7823 7821 unlocked_bytes += PAGESIZE;
7824 7822 }
7825 7823 VPP_CLRPPLOCK(vpp);
7826 7824 }
7827 7825 }
7828 7826 }
7829 7827 out:
7830 7828 if (op == MC_LOCK) {
7831 7829 /* Credit back bytes that did not get locked */
7832 7830 if ((unlocked_bytes - locked_bytes) > 0) {
7833 7831 if (proj == NULL)
7834 7832 mutex_enter(&p->p_lock);
7835 7833 rctl_decr_locked_mem(p, proj,
7836 7834 (unlocked_bytes - locked_bytes), chargeproc);
7837 7835 if (proj == NULL)
7838 7836 mutex_exit(&p->p_lock);
7839 7837 }
7840 7838
7841 7839 } else {
7842 7840 /* Account bytes that were unlocked */
7843 7841 if (unlocked_bytes > 0) {
7844 7842 if (proj == NULL)
7845 7843 mutex_enter(&p->p_lock);
7846 7844 rctl_decr_locked_mem(p, proj, unlocked_bytes,
7847 7845 chargeproc);
7848 7846 if (proj == NULL)
7849 7847 mutex_exit(&p->p_lock);
7850 7848 }
7851 7849 }
7852 7850 if (sp != NULL)
7853 7851 mutex_exit(&sp->shm_mlock);
7854 7852 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7855 7853
7856 7854 return (err);
7857 7855 }
7858 7856
7859 7857 /*
7860 7858 * Set advice from user for specified pages
7861 7859 * There are 9 types of advice:
7862 7860 * MADV_NORMAL - Normal (default) behavior (whatever that is)
7863 7861 * MADV_RANDOM - Random page references
7864 7862 * do not allow readahead or 'klustering'
7865 7863 * MADV_SEQUENTIAL - Sequential page references
7866 7864 * Pages previous to the one currently being
7867 7865 * accessed (determined by fault) are 'not needed'
7868 7866 * and are freed immediately
7869 7867 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
7870 7868 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
7871 7869 * MADV_FREE - Contents can be discarded
7872 7870 * MADV_ACCESS_DEFAULT- Default access
7873 7871 * MADV_ACCESS_LWP - Next LWP will access heavily
7874 7872 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
7875 7873 */
7876 7874 static int
7877 7875 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
7878 7876 {
7879 7877 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7880 7878 size_t page;
7881 7879 int err = 0;
7882 7880 int already_set;
7883 7881 struct anon_map *amp;
7884 7882 ulong_t anon_index;
7885 7883 struct seg *next;
7886 7884 lgrp_mem_policy_t policy;
7887 7885 struct seg *prev;
7888 7886 struct vnode *vp;
7889 7887
7890 7888 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7891 7889
7892 7890 /*
7893 7891 * In case of MADV_FREE, we won't be modifying any segment private
7894 7892 * data structures; so, we only need to grab READER's lock
7895 7893 */
7896 7894 if (behav != MADV_FREE) {
7897 7895 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7898 7896 if (svd->tr_state != SEGVN_TR_OFF) {
7899 7897 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7900 7898 return (0);
7901 7899 }
7902 7900 } else {
7903 7901 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7904 7902 }
7905 7903
7906 7904 /*
7907 7905 * Large pages are assumed to be only turned on when accesses to the
7908 7906 * segment's address range have spatial and temporal locality. That
7909 7907 * justifies ignoring MADV_SEQUENTIAL for large page segments.
7910 7908 * Also, ignore advice affecting lgroup memory allocation
7911 7909 * if don't need to do lgroup optimizations on this system
7912 7910 */
7913 7911
7914 7912 if ((behav == MADV_SEQUENTIAL &&
7915 7913 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) ||
7916 7914 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT ||
7917 7915 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) {
7918 7916 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7919 7917 return (0);
7920 7918 }
7921 7919
7922 7920 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT ||
7923 7921 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) {
7924 7922 /*
7925 7923 * Since we are going to unload hat mappings
7926 7924 * we first have to flush the cache. Otherwise
7927 7925 * this might lead to system panic if another
7928 7926 * thread is doing physio on the range whose
7929 7927 * mappings are unloaded by madvise(3C).
7930 7928 */
7931 7929 if (svd->softlockcnt > 0) {
7932 7930 /*
7933 7931 * If this is shared segment non 0 softlockcnt
7934 7932 * means locked pages are still in use.
7935 7933 */
7936 7934 if (svd->type == MAP_SHARED) {
7937 7935 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7938 7936 return (EAGAIN);
7939 7937 }
7940 7938 /*
7941 7939 * Since we do have the segvn writers lock
7942 7940 * nobody can fill the cache with entries
7943 7941 * belonging to this seg during the purge.
7944 7942 * The flush either succeeds or we still
7945 7943 * have pending I/Os. In the later case,
7946 7944 * madvise(3C) fails.
7947 7945 */
7948 7946 segvn_purge(seg);
7949 7947 if (svd->softlockcnt > 0) {
7950 7948 /*
7951 7949 * Since madvise(3C) is advisory and
7952 7950 * it's not part of UNIX98, madvise(3C)
7953 7951 * failure here doesn't cause any hardship.
7954 7952 * Note that we don't block in "as" layer.
7955 7953 */
7956 7954 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7957 7955 return (EAGAIN);
7958 7956 }
7959 7957 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7960 7958 svd->amp->a_softlockcnt > 0) {
7961 7959 /*
7962 7960 * Try to purge this amp's entries from pcache. It
7963 7961 * will succeed only if other segments that share the
7964 7962 * amp have no outstanding softlock's.
7965 7963 */
7966 7964 segvn_purge(seg);
7967 7965 }
7968 7966 }
7969 7967
7970 7968 amp = svd->amp;
7971 7969 vp = svd->vp;
7972 7970 if (behav == MADV_FREE) {
7973 7971 /*
7974 7972 * MADV_FREE is not supported for segments with
7975 7973 * underlying object; if anonmap is NULL, anon slots
7976 7974 * are not yet populated and there is nothing for
7977 7975 * us to do. As MADV_FREE is advisory, we don't
7978 7976 * return error in either case.
7979 7977 */
7980 7978 if (vp != NULL || amp == NULL) {
7981 7979 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7982 7980 return (0);
7983 7981 }
7984 7982
7985 7983 segvn_purge(seg);
7986 7984
7987 7985 page = seg_page(seg, addr);
7988 7986 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7989 7987 anon_disclaim(amp, svd->anon_index + page, len);
7990 7988 ANON_LOCK_EXIT(&->a_rwlock);
7991 7989 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7992 7990 return (0);
7993 7991 }
7994 7992
7995 7993 /*
7996 7994 * If advice is to be applied to entire segment,
7997 7995 * use advice field in seg_data structure
7998 7996 * otherwise use appropriate vpage entry.
7999 7997 */
8000 7998 if ((addr == seg->s_base) && (len == seg->s_size)) {
8001 7999 switch (behav) {
8002 8000 case MADV_ACCESS_LWP:
8003 8001 case MADV_ACCESS_MANY:
8004 8002 case MADV_ACCESS_DEFAULT:
8005 8003 /*
8006 8004 * Set memory allocation policy for this segment
8007 8005 */
8008 8006 policy = lgrp_madv_to_policy(behav, len, svd->type);
8009 8007 if (svd->type == MAP_SHARED)
8010 8008 already_set = lgrp_shm_policy_set(policy, amp,
8011 8009 svd->anon_index, vp, svd->offset, len);
8012 8010 else {
8013 8011 /*
8014 8012 * For private memory, need writers lock on
8015 8013 * address space because the segment may be
8016 8014 * split or concatenated when changing policy
8017 8015 */
8018 8016 if (AS_READ_HELD(seg->s_as,
8019 8017 &seg->s_as->a_lock)) {
8020 8018 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8021 8019 return (IE_RETRY);
8022 8020 }
8023 8021
8024 8022 already_set = lgrp_privm_policy_set(policy,
8025 8023 &svd->policy_info, len);
8026 8024 }
8027 8025
8028 8026 /*
8029 8027 * If policy set already and it shouldn't be reapplied,
8030 8028 * don't do anything.
8031 8029 */
8032 8030 if (already_set &&
8033 8031 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8034 8032 break;
8035 8033
8036 8034 /*
8037 8035 * Mark any existing pages in given range for
8038 8036 * migration
8039 8037 */
8040 8038 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8041 8039 vp, svd->offset, 1);
8042 8040
8043 8041 /*
8044 8042 * If same policy set already or this is a shared
8045 8043 * memory segment, don't need to try to concatenate
8046 8044 * segment with adjacent ones.
8047 8045 */
8048 8046 if (already_set || svd->type == MAP_SHARED)
8049 8047 break;
8050 8048
8051 8049 /*
8052 8050 * Try to concatenate this segment with previous
8053 8051 * one and next one, since we changed policy for
8054 8052 * this one and it may be compatible with adjacent
8055 8053 * ones now.
8056 8054 */
8057 8055 prev = AS_SEGPREV(seg->s_as, seg);
8058 8056 next = AS_SEGNEXT(seg->s_as, seg);
8059 8057
8060 8058 if (next && next->s_ops == &segvn_ops &&
8061 8059 addr + len == next->s_base)
8062 8060 (void) segvn_concat(seg, next, 1);
8063 8061
8064 8062 if (prev && prev->s_ops == &segvn_ops &&
8065 8063 addr == prev->s_base + prev->s_size) {
8066 8064 /*
8067 8065 * Drop lock for private data of current
8068 8066 * segment before concatenating (deleting) it
8069 8067 * and return IE_REATTACH to tell as_ctl() that
8070 8068 * current segment has changed
8071 8069 */
8072 8070 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8073 8071 if (!segvn_concat(prev, seg, 1))
8074 8072 err = IE_REATTACH;
8075 8073
8076 8074 return (err);
8077 8075 }
8078 8076 break;
8079 8077
8080 8078 case MADV_SEQUENTIAL:
8081 8079 /*
8082 8080 * unloading mapping guarantees
8083 8081 * detection in segvn_fault
8084 8082 */
8085 8083 ASSERT(seg->s_szc == 0);
8086 8084 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8087 8085 hat_unload(seg->s_as->a_hat, addr, len,
8088 8086 HAT_UNLOAD);
8089 8087 /* FALLTHROUGH */
8090 8088 case MADV_NORMAL:
8091 8089 case MADV_RANDOM:
8092 8090 svd->advice = (uchar_t)behav;
8093 8091 svd->pageadvice = 0;
8094 8092 break;
8095 8093 case MADV_WILLNEED: /* handled in memcntl */
8096 8094 case MADV_DONTNEED: /* handled in memcntl */
8097 8095 case MADV_FREE: /* handled above */
8098 8096 break;
8099 8097 default:
8100 8098 err = EINVAL;
8101 8099 }
8102 8100 } else {
8103 8101 caddr_t eaddr;
8104 8102 struct seg *new_seg;
8105 8103 struct segvn_data *new_svd;
8106 8104 u_offset_t off;
8107 8105 caddr_t oldeaddr;
8108 8106
8109 8107 page = seg_page(seg, addr);
8110 8108
8111 8109 segvn_vpage(seg);
8112 8110 if (svd->vpage == NULL) {
8113 8111 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8114 8112 return (ENOMEM);
8115 8113 }
8116 8114
8117 8115 switch (behav) {
8118 8116 struct vpage *bvpp, *evpp;
8119 8117
8120 8118 case MADV_ACCESS_LWP:
8121 8119 case MADV_ACCESS_MANY:
8122 8120 case MADV_ACCESS_DEFAULT:
8123 8121 /*
8124 8122 * Set memory allocation policy for portion of this
8125 8123 * segment
8126 8124 */
8127 8125
8128 8126 /*
8129 8127 * Align address and length of advice to page
8130 8128 * boundaries for large pages
8131 8129 */
8132 8130 if (seg->s_szc != 0) {
8133 8131 size_t pgsz;
8134 8132
8135 8133 pgsz = page_get_pagesize(seg->s_szc);
8136 8134 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
8137 8135 len = P2ROUNDUP(len, pgsz);
8138 8136 }
8139 8137
8140 8138 /*
8141 8139 * Check to see whether policy is set already
8142 8140 */
8143 8141 policy = lgrp_madv_to_policy(behav, len, svd->type);
8144 8142
8145 8143 anon_index = svd->anon_index + page;
8146 8144 off = svd->offset + (uintptr_t)(addr - seg->s_base);
8147 8145
8148 8146 if (svd->type == MAP_SHARED)
8149 8147 already_set = lgrp_shm_policy_set(policy, amp,
8150 8148 anon_index, vp, off, len);
8151 8149 else
8152 8150 already_set =
8153 8151 (policy == svd->policy_info.mem_policy);
8154 8152
8155 8153 /*
8156 8154 * If policy set already and it shouldn't be reapplied,
8157 8155 * don't do anything.
8158 8156 */
8159 8157 if (already_set &&
8160 8158 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8161 8159 break;
8162 8160
8163 8161 /*
8164 8162 * For private memory, need writers lock on
8165 8163 * address space because the segment may be
8166 8164 * split or concatenated when changing policy
8167 8165 */
8168 8166 if (svd->type == MAP_PRIVATE &&
8169 8167 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) {
8170 8168 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8171 8169 return (IE_RETRY);
8172 8170 }
8173 8171
8174 8172 /*
8175 8173 * Mark any existing pages in given range for
8176 8174 * migration
8177 8175 */
8178 8176 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8179 8177 vp, svd->offset, 1);
8180 8178
8181 8179 /*
8182 8180 * Don't need to try to split or concatenate
8183 8181 * segments, since policy is same or this is a shared
8184 8182 * memory segment
8185 8183 */
8186 8184 if (already_set || svd->type == MAP_SHARED)
8187 8185 break;
8188 8186
8189 8187 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
8190 8188 ASSERT(svd->amp == NULL);
8191 8189 ASSERT(svd->tr_state == SEGVN_TR_OFF);
8192 8190 ASSERT(svd->softlockcnt == 0);
8193 8191 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
8194 8192 HAT_REGION_TEXT);
8195 8193 svd->rcookie = HAT_INVALID_REGION_COOKIE;
8196 8194 }
8197 8195
8198 8196 /*
8199 8197 * Split off new segment if advice only applies to a
8200 8198 * portion of existing segment starting in middle
8201 8199 */
8202 8200 new_seg = NULL;
8203 8201 eaddr = addr + len;
8204 8202 oldeaddr = seg->s_base + seg->s_size;
8205 8203 if (addr > seg->s_base) {
8206 8204 /*
8207 8205 * Must flush I/O page cache
8208 8206 * before splitting segment
8209 8207 */
8210 8208 if (svd->softlockcnt > 0)
8211 8209 segvn_purge(seg);
8212 8210
8213 8211 /*
8214 8212 * Split segment and return IE_REATTACH to tell
8215 8213 * as_ctl() that current segment changed
8216 8214 */
8217 8215 new_seg = segvn_split_seg(seg, addr);
8218 8216 new_svd = (struct segvn_data *)new_seg->s_data;
8219 8217 err = IE_REATTACH;
8220 8218
8221 8219 /*
8222 8220 * If new segment ends where old one
8223 8221 * did, try to concatenate the new
8224 8222 * segment with next one.
8225 8223 */
8226 8224 if (eaddr == oldeaddr) {
8227 8225 /*
8228 8226 * Set policy for new segment
8229 8227 */
8230 8228 (void) lgrp_privm_policy_set(policy,
8231 8229 &new_svd->policy_info,
8232 8230 new_seg->s_size);
8233 8231
8234 8232 next = AS_SEGNEXT(new_seg->s_as,
8235 8233 new_seg);
8236 8234
8237 8235 if (next &&
8238 8236 next->s_ops == &segvn_ops &&
8239 8237 eaddr == next->s_base)
8240 8238 (void) segvn_concat(new_seg,
8241 8239 next, 1);
8242 8240 }
8243 8241 }
8244 8242
8245 8243 /*
8246 8244 * Split off end of existing segment if advice only
8247 8245 * applies to a portion of segment ending before
8248 8246 * end of the existing segment
8249 8247 */
8250 8248 if (eaddr < oldeaddr) {
8251 8249 /*
8252 8250 * Must flush I/O page cache
8253 8251 * before splitting segment
8254 8252 */
8255 8253 if (svd->softlockcnt > 0)
8256 8254 segvn_purge(seg);
8257 8255
8258 8256 /*
8259 8257 * If beginning of old segment was already
8260 8258 * split off, use new segment to split end off
8261 8259 * from.
8262 8260 */
8263 8261 if (new_seg != NULL && new_seg != seg) {
8264 8262 /*
8265 8263 * Split segment
8266 8264 */
8267 8265 (void) segvn_split_seg(new_seg, eaddr);
8268 8266
8269 8267 /*
8270 8268 * Set policy for new segment
8271 8269 */
8272 8270 (void) lgrp_privm_policy_set(policy,
8273 8271 &new_svd->policy_info,
8274 8272 new_seg->s_size);
8275 8273 } else {
8276 8274 /*
8277 8275 * Split segment and return IE_REATTACH
8278 8276 * to tell as_ctl() that current
8279 8277 * segment changed
8280 8278 */
8281 8279 (void) segvn_split_seg(seg, eaddr);
8282 8280 err = IE_REATTACH;
8283 8281
8284 8282 (void) lgrp_privm_policy_set(policy,
8285 8283 &svd->policy_info, seg->s_size);
8286 8284
8287 8285 /*
8288 8286 * If new segment starts where old one
8289 8287 * did, try to concatenate it with
8290 8288 * previous segment.
8291 8289 */
8292 8290 if (addr == seg->s_base) {
8293 8291 prev = AS_SEGPREV(seg->s_as,
8294 8292 seg);
8295 8293
8296 8294 /*
8297 8295 * Drop lock for private data
8298 8296 * of current segment before
8299 8297 * concatenating (deleting) it
8300 8298 */
8301 8299 if (prev &&
8302 8300 prev->s_ops ==
8303 8301 &segvn_ops &&
8304 8302 addr == prev->s_base +
8305 8303 prev->s_size) {
8306 8304 SEGVN_LOCK_EXIT(
8307 8305 seg->s_as,
8308 8306 &svd->lock);
8309 8307 (void) segvn_concat(
8310 8308 prev, seg, 1);
8311 8309 return (err);
8312 8310 }
8313 8311 }
8314 8312 }
8315 8313 }
8316 8314 break;
8317 8315 case MADV_SEQUENTIAL:
8318 8316 ASSERT(seg->s_szc == 0);
8319 8317 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8320 8318 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
8321 8319 /* FALLTHROUGH */
8322 8320 case MADV_NORMAL:
8323 8321 case MADV_RANDOM:
8324 8322 bvpp = &svd->vpage[page];
8325 8323 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8326 8324 for (; bvpp < evpp; bvpp++)
8327 8325 VPP_SETADVICE(bvpp, behav);
8328 8326 svd->advice = MADV_NORMAL;
8329 8327 break;
8330 8328 case MADV_WILLNEED: /* handled in memcntl */
8331 8329 case MADV_DONTNEED: /* handled in memcntl */
8332 8330 case MADV_FREE: /* handled above */
8333 8331 break;
8334 8332 default:
8335 8333 err = EINVAL;
8336 8334 }
8337 8335 }
8338 8336 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8339 8337 return (err);
8340 8338 }
8341 8339
8342 8340 /*
8343 8341 * There is one kind of inheritance that can be specified for pages:
8344 8342 *
8345 8343 * SEGP_INH_ZERO - Pages should be zeroed in the child
8346 8344 */
8347 8345 static int
8348 8346 segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8349 8347 {
8350 8348 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8351 8349 struct vpage *bvpp, *evpp;
8352 8350 size_t page;
8353 8351 int ret = 0;
8354 8352
8355 8353 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8356 8354
8357 8355 /* Can't support something we don't know about */
8358 8356 if (behav != SEGP_INH_ZERO)
8359 8357 return (ENOTSUP);
8360 8358
8361 8359 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8362 8360
8363 8361 /*
8364 8362 * This must be a straightforward anonymous segment that is mapped
8365 8363 * privately and is not backed by a vnode.
8366 8364 */
8367 8365 if (svd->tr_state != SEGVN_TR_OFF ||
8368 8366 svd->type != MAP_PRIVATE ||
8369 8367 svd->vp != NULL) {
8370 8368 ret = EINVAL;
8371 8369 goto out;
8372 8370 }
8373 8371
8374 8372 /*
8375 8373 * If the entire segment has been marked as inherit zero, then no reason
8376 8374 * to do anything else.
8377 8375 */
8378 8376 if (svd->svn_inz == SEGVN_INZ_ALL) {
8379 8377 ret = 0;
8380 8378 goto out;
8381 8379 }
8382 8380
8383 8381 /*
8384 8382 * If this applies to the entire segment, simply mark it and we're done.
8385 8383 */
8386 8384 if ((addr == seg->s_base) && (len == seg->s_size)) {
8387 8385 svd->svn_inz = SEGVN_INZ_ALL;
8388 8386 ret = 0;
8389 8387 goto out;
8390 8388 }
8391 8389
8392 8390 /*
8393 8391 * We've been asked to mark a subset of this segment as inherit zero,
8394 8392 * therefore we need to mainpulate its vpages.
8395 8393 */
8396 8394 if (svd->vpage == NULL) {
8397 8395 segvn_vpage(seg);
8398 8396 if (svd->vpage == NULL) {
8399 8397 ret = ENOMEM;
8400 8398 goto out;
8401 8399 }
8402 8400 }
8403 8401
8404 8402 svd->svn_inz = SEGVN_INZ_VPP;
8405 8403 page = seg_page(seg, addr);
8406 8404 bvpp = &svd->vpage[page];
8407 8405 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8408 8406 for (; bvpp < evpp; bvpp++)
8409 8407 VPP_SETINHZERO(bvpp);
8410 8408 ret = 0;
8411 8409
8412 8410 out:
8413 8411 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8414 8412 return (ret);
8415 8413 }
8416 8414
8417 8415 /*
8418 8416 * Create a vpage structure for this seg.
8419 8417 */
8420 8418 static void
8421 8419 segvn_vpage(struct seg *seg)
8422 8420 {
8423 8421 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8424 8422 struct vpage *vp, *evp;
8425 8423 static pgcnt_t page_limit = 0;
8426 8424
8427 8425 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
8428 8426
8429 8427 /*
8430 8428 * If no vpage structure exists, allocate one. Copy the protections
8431 8429 * and the advice from the segment itself to the individual pages.
8432 8430 */
8433 8431 if (svd->vpage == NULL) {
8434 8432 /*
8435 8433 * Start by calculating the number of pages we must allocate to
8436 8434 * track the per-page vpage structs needs for this entire
8437 8435 * segment. If we know now that it will require more than our
8438 8436 * heuristic for the maximum amount of kmem we can consume then
8439 8437 * fail. We do this here, instead of trying to detect this deep
8440 8438 * in page_resv and propagating the error up, since the entire
8441 8439 * memory allocation stack is not amenable to passing this
8442 8440 * back. Instead, it wants to keep trying.
8443 8441 *
8444 8442 * As a heuristic we set a page limit of 5/8s of total_pages
8445 8443 * for this allocation. We use shifts so that no floating
8446 8444 * point conversion takes place and only need to do the
8447 8445 * calculation once.
8448 8446 */
8449 8447 ulong_t mem_needed = seg_pages(seg) * sizeof (struct vpage);
8450 8448 pgcnt_t npages = mem_needed >> PAGESHIFT;
8451 8449
8452 8450 if (page_limit == 0)
8453 8451 page_limit = (total_pages >> 1) + (total_pages >> 3);
8454 8452
8455 8453 if (npages > page_limit)
8456 8454 return;
8457 8455
8458 8456 svd->pageadvice = 1;
8459 8457 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP);
8460 8458 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
8461 8459 for (vp = svd->vpage; vp < evp; vp++) {
8462 8460 VPP_SETPROT(vp, svd->prot);
8463 8461 VPP_SETADVICE(vp, svd->advice);
8464 8462 }
8465 8463 }
8466 8464 }
8467 8465
8468 8466 /*
8469 8467 * Dump the pages belonging to this segvn segment.
8470 8468 */
8471 8469 static void
8472 8470 segvn_dump(struct seg *seg)
8473 8471 {
8474 8472 struct segvn_data *svd;
8475 8473 page_t *pp;
8476 8474 struct anon_map *amp;
8477 8475 ulong_t anon_index;
8478 8476 struct vnode *vp;
8479 8477 u_offset_t off, offset;
8480 8478 pfn_t pfn;
8481 8479 pgcnt_t page, npages;
8482 8480 caddr_t addr;
8483 8481
8484 8482 npages = seg_pages(seg);
8485 8483 svd = (struct segvn_data *)seg->s_data;
8486 8484 vp = svd->vp;
8487 8485 off = offset = svd->offset;
8488 8486 addr = seg->s_base;
8489 8487
8490 8488 if ((amp = svd->amp) != NULL) {
8491 8489 anon_index = svd->anon_index;
8492 8490 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8493 8491 }
8494 8492
8495 8493 for (page = 0; page < npages; page++, offset += PAGESIZE) {
8496 8494 struct anon *ap;
8497 8495 int we_own_it = 0;
8498 8496
8499 8497 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) {
8500 8498 swap_xlate_nopanic(ap, &vp, &off);
8501 8499 } else {
8502 8500 vp = svd->vp;
8503 8501 off = offset;
8504 8502 }
8505 8503
8506 8504 /*
8507 8505 * If pp == NULL, the page either does not exist
8508 8506 * or is exclusively locked. So determine if it
8509 8507 * exists before searching for it.
8510 8508 */
8511 8509
8512 8510 if ((pp = page_lookup_nowait(vp, off, SE_SHARED)))
8513 8511 we_own_it = 1;
8514 8512 else
8515 8513 pp = page_exists(vp, off);
8516 8514
8517 8515 if (pp) {
8518 8516 pfn = page_pptonum(pp);
8519 8517 dump_addpage(seg->s_as, addr, pfn);
8520 8518 if (we_own_it)
8521 8519 page_unlock(pp);
8522 8520 }
8523 8521 addr += PAGESIZE;
8524 8522 dump_timeleft = dump_timeout;
8525 8523 }
8526 8524
8527 8525 if (amp != NULL)
8528 8526 ANON_LOCK_EXIT(&->a_rwlock);
8529 8527 }
8530 8528
8531 8529 #ifdef DEBUG
8532 8530 static uint32_t segvn_pglock_mtbf = 0;
8533 8531 #endif
8534 8532
8535 8533 #define PCACHE_SHWLIST ((page_t *)-2)
8536 8534 #define NOPCACHE_SHWLIST ((page_t *)-1)
8537 8535
8538 8536 /*
8539 8537 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8540 8538 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8541 8539 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8542 8540 * the same parts of the segment. Currently shadow list creation is only
8543 8541 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8544 8542 * tagged with segment pointer, starting virtual address and length. This
8545 8543 * approach for MAP_SHARED segments may add many pcache entries for the same
8546 8544 * set of pages and lead to long hash chains that decrease pcache lookup
8547 8545 * performance. To avoid this issue for shared segments shared anon map and
8548 8546 * starting anon index are used for pcache entry tagging. This allows all
8549 8547 * segments to share pcache entries for the same anon range and reduces pcache
8550 8548 * chain's length as well as memory overhead from duplicate shadow lists and
8551 8549 * pcache entries.
8552 8550 *
8553 8551 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8554 8552 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8555 8553 * part of softlockcnt accounting is done differently for private and shared
8556 8554 * segments. In private segment case softlock is only incremented when a new
8557 8555 * shadow list is created but not when an existing one is found via
8558 8556 * seg_plookup(). pcache entries have reference count incremented/decremented
8559 8557 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8560 8558 * reference count can be purged (and purging is needed before segment can be
8561 8559 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8562 8560 * decrement softlockcnt. Since in private segment case each of its pcache
8563 8561 * entries only belongs to this segment we can expect that when
8564 8562 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8565 8563 * segment purge will succeed and softlockcnt will drop to 0. In shared
8566 8564 * segment case reference count in pcache entry counts active locks from many
8567 8565 * different segments so we can't expect segment purging to succeed even when
8568 8566 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8569 8567 * segment. To be able to determine when there're no pending pagelocks in
8570 8568 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8571 8569 * but instead softlockcnt is incremented and decremented for every
8572 8570 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8573 8571 * list was created or an existing one was found. When softlockcnt drops to 0
8574 8572 * this segment no longer has any claims for pcached shadow lists and the
8575 8573 * segment can be freed even if there're still active pcache entries
8576 8574 * shared by this segment anon map. Shared segment pcache entries belong to
8577 8575 * anon map and are typically removed when anon map is freed after all
8578 8576 * processes destroy the segments that use this anon map.
8579 8577 */
8580 8578 static int
8581 8579 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
8582 8580 enum lock_type type, enum seg_rw rw)
8583 8581 {
8584 8582 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8585 8583 size_t np;
8586 8584 pgcnt_t adjustpages;
8587 8585 pgcnt_t npages;
8588 8586 ulong_t anon_index;
8589 8587 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE;
8590 8588 uint_t error;
8591 8589 struct anon_map *amp;
8592 8590 pgcnt_t anpgcnt;
8593 8591 struct page **pplist, **pl, *pp;
8594 8592 caddr_t a;
8595 8593 size_t page;
8596 8594 caddr_t lpgaddr, lpgeaddr;
8597 8595 anon_sync_obj_t cookie;
8598 8596 int anlock;
8599 8597 struct anon_map *pamp;
8600 8598 caddr_t paddr;
8601 8599 seg_preclaim_cbfunc_t preclaim_callback;
8602 8600 size_t pgsz;
8603 8601 int use_pcache;
8604 8602 size_t wlen;
8605 8603 uint_t pflags = 0;
8606 8604 int sftlck_sbase = 0;
8607 8605 int sftlck_send = 0;
8608 8606
8609 8607 #ifdef DEBUG
8610 8608 if (type == L_PAGELOCK && segvn_pglock_mtbf) {
8611 8609 hrtime_t ts = gethrtime();
8612 8610 if ((ts % segvn_pglock_mtbf) == 0) {
8613 8611 return (ENOTSUP);
8614 8612 }
8615 8613 if ((ts % segvn_pglock_mtbf) == 1) {
8616 8614 return (EFAULT);
8617 8615 }
8618 8616 }
8619 8617 #endif
8620 8618
8621 8619 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START,
8622 8620 "segvn_pagelock: start seg %p addr %p", seg, addr);
8623 8621
8624 8622 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8625 8623 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
8626 8624
8627 8625 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8628 8626
8629 8627 /*
8630 8628 * for now we only support pagelock to anon memory. We would have to
8631 8629 * check protections for vnode objects and call into the vnode driver.
8632 8630 * That's too much for a fast path. Let the fault entry point handle
8633 8631 * it.
8634 8632 */
8635 8633 if (svd->vp != NULL) {
8636 8634 if (type == L_PAGELOCK) {
8637 8635 error = ENOTSUP;
8638 8636 goto out;
8639 8637 }
8640 8638 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8641 8639 }
8642 8640 if ((amp = svd->amp) == NULL) {
8643 8641 if (type == L_PAGELOCK) {
8644 8642 error = EFAULT;
8645 8643 goto out;
8646 8644 }
8647 8645 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8648 8646 }
8649 8647 if (rw != S_READ && rw != S_WRITE) {
8650 8648 if (type == L_PAGELOCK) {
8651 8649 error = ENOTSUP;
8652 8650 goto out;
8653 8651 }
8654 8652 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8655 8653 }
8656 8654
8657 8655 if (seg->s_szc != 0) {
8658 8656 /*
8659 8657 * We are adjusting the pagelock region to the large page size
8660 8658 * boundary because the unlocked part of a large page cannot
8661 8659 * be freed anyway unless all constituent pages of a large
8662 8660 * page are locked. Bigger regions reduce pcache chain length
8663 8661 * and improve lookup performance. The tradeoff is that the
8664 8662 * very first segvn_pagelock() call for a given page is more
8665 8663 * expensive if only 1 page_t is needed for IO. This is only
8666 8664 * an issue if pcache entry doesn't get reused by several
8667 8665 * subsequent calls. We optimize here for the case when pcache
8668 8666 * is heavily used by repeated IOs to the same address range.
8669 8667 *
8670 8668 * Note segment's page size cannot change while we are holding
8671 8669 * as lock. And then it cannot change while softlockcnt is
8672 8670 * not 0. This will allow us to correctly recalculate large
8673 8671 * page size region for the matching pageunlock/reclaim call
8674 8672 * since as_pageunlock() caller must always match
8675 8673 * as_pagelock() call's addr and len.
8676 8674 *
8677 8675 * For pageunlock *ppp points to the pointer of page_t that
8678 8676 * corresponds to the real unadjusted start address. Similar
8679 8677 * for pagelock *ppp must point to the pointer of page_t that
8680 8678 * corresponds to the real unadjusted start address.
8681 8679 */
8682 8680 pgsz = page_get_pagesize(seg->s_szc);
8683 8681 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
8684 8682 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8685 8683 } else if (len < segvn_pglock_comb_thrshld) {
8686 8684 lpgaddr = addr;
8687 8685 lpgeaddr = addr + len;
8688 8686 adjustpages = 0;
8689 8687 pgsz = PAGESIZE;
8690 8688 } else {
8691 8689 /*
8692 8690 * Align the address range of large enough requests to allow
8693 8691 * combining of different shadow lists into 1 to reduce memory
8694 8692 * overhead from potentially overlapping large shadow lists
8695 8693 * (worst case is we have a 1MB IO into buffers with start
8696 8694 * addresses separated by 4K). Alignment is only possible if
8697 8695 * padded chunks have sufficient access permissions. Note
8698 8696 * permissions won't change between L_PAGELOCK and
8699 8697 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8700 8698 * segvn_setprot() to wait until softlockcnt drops to 0. This
8701 8699 * allows us to determine in L_PAGEUNLOCK the same range we
8702 8700 * computed in L_PAGELOCK.
8703 8701 *
8704 8702 * If alignment is limited by segment ends set
8705 8703 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8706 8704 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8707 8705 * per segment counters. In L_PAGEUNLOCK case decrease
8708 8706 * softlockcnt_sbase/softlockcnt_send counters if
8709 8707 * sftlck_sbase/sftlck_send flags are set. When
8710 8708 * softlockcnt_sbase/softlockcnt_send are non 0
8711 8709 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8712 8710 * won't merge the segments. This restriction combined with
8713 8711 * restriction on segment unmapping and splitting for segments
8714 8712 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8715 8713 * correctly determine the same range that was previously
8716 8714 * locked by matching L_PAGELOCK.
8717 8715 */
8718 8716 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16);
8719 8717 pgsz = PAGESIZE;
8720 8718 if (svd->type == MAP_PRIVATE) {
8721 8719 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr,
8722 8720 segvn_pglock_comb_balign);
8723 8721 if (lpgaddr < seg->s_base) {
8724 8722 lpgaddr = seg->s_base;
8725 8723 sftlck_sbase = 1;
8726 8724 }
8727 8725 } else {
8728 8726 ulong_t aix = svd->anon_index + seg_page(seg, addr);
8729 8727 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign);
8730 8728 if (aaix < svd->anon_index) {
8731 8729 lpgaddr = seg->s_base;
8732 8730 sftlck_sbase = 1;
8733 8731 } else {
8734 8732 lpgaddr = addr - ptob(aix - aaix);
8735 8733 ASSERT(lpgaddr >= seg->s_base);
8736 8734 }
8737 8735 }
8738 8736 if (svd->pageprot && lpgaddr != addr) {
8739 8737 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)];
8740 8738 struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8741 8739 while (vp < evp) {
8742 8740 if ((VPP_PROT(vp) & protchk) == 0) {
8743 8741 break;
8744 8742 }
8745 8743 vp++;
8746 8744 }
8747 8745 if (vp < evp) {
8748 8746 lpgaddr = addr;
8749 8747 pflags = 0;
8750 8748 }
8751 8749 }
8752 8750 lpgeaddr = addr + len;
8753 8751 if (pflags) {
8754 8752 if (svd->type == MAP_PRIVATE) {
8755 8753 lpgeaddr = (caddr_t)P2ROUNDUP(
8756 8754 (uintptr_t)lpgeaddr,
8757 8755 segvn_pglock_comb_balign);
8758 8756 } else {
8759 8757 ulong_t aix = svd->anon_index +
8760 8758 seg_page(seg, lpgeaddr);
8761 8759 ulong_t aaix = P2ROUNDUP(aix,
8762 8760 segvn_pglock_comb_palign);
8763 8761 if (aaix < aix) {
8764 8762 lpgeaddr = 0;
8765 8763 } else {
8766 8764 lpgeaddr += ptob(aaix - aix);
8767 8765 }
8768 8766 }
8769 8767 if (lpgeaddr == 0 ||
8770 8768 lpgeaddr > seg->s_base + seg->s_size) {
8771 8769 lpgeaddr = seg->s_base + seg->s_size;
8772 8770 sftlck_send = 1;
8773 8771 }
8774 8772 }
8775 8773 if (svd->pageprot && lpgeaddr != addr + len) {
8776 8774 struct vpage *vp;
8777 8775 struct vpage *evp;
8778 8776
8779 8777 vp = &svd->vpage[seg_page(seg, addr + len)];
8780 8778 evp = &svd->vpage[seg_page(seg, lpgeaddr)];
8781 8779
8782 8780 while (vp < evp) {
8783 8781 if ((VPP_PROT(vp) & protchk) == 0) {
8784 8782 break;
8785 8783 }
8786 8784 vp++;
8787 8785 }
8788 8786 if (vp < evp) {
8789 8787 lpgeaddr = addr + len;
8790 8788 }
8791 8789 }
8792 8790 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8793 8791 }
8794 8792
8795 8793 /*
8796 8794 * For MAP_SHARED segments we create pcache entries tagged by amp and
8797 8795 * anon index so that we can share pcache entries with other segments
8798 8796 * that map this amp. For private segments pcache entries are tagged
8799 8797 * with segment and virtual address.
8800 8798 */
8801 8799 if (svd->type == MAP_SHARED) {
8802 8800 pamp = amp;
8803 8801 paddr = (caddr_t)((lpgaddr - seg->s_base) +
8804 8802 ptob(svd->anon_index));
8805 8803 preclaim_callback = shamp_reclaim;
8806 8804 } else {
8807 8805 pamp = NULL;
8808 8806 paddr = lpgaddr;
8809 8807 preclaim_callback = segvn_reclaim;
8810 8808 }
8811 8809
8812 8810 if (type == L_PAGEUNLOCK) {
8813 8811 VM_STAT_ADD(segvnvmstats.pagelock[0]);
8814 8812
8815 8813 /*
8816 8814 * update hat ref bits for /proc. We need to make sure
8817 8815 * that threads tracing the ref and mod bits of the
8818 8816 * address space get the right data.
8819 8817 * Note: page ref and mod bits are updated at reclaim time
8820 8818 */
8821 8819 if (seg->s_as->a_vbits) {
8822 8820 for (a = addr; a < addr + len; a += PAGESIZE) {
8823 8821 if (rw == S_WRITE) {
8824 8822 hat_setstat(seg->s_as, a,
8825 8823 PAGESIZE, P_REF | P_MOD);
8826 8824 } else {
8827 8825 hat_setstat(seg->s_as, a,
8828 8826 PAGESIZE, P_REF);
8829 8827 }
8830 8828 }
8831 8829 }
8832 8830
8833 8831 /*
8834 8832 * Check the shadow list entry after the last page used in
8835 8833 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
8836 8834 * was not inserted into pcache and is not large page
8837 8835 * adjusted. In this case call reclaim callback directly and
8838 8836 * don't adjust the shadow list start and size for large
8839 8837 * pages.
8840 8838 */
8841 8839 npages = btop(len);
8842 8840 if ((*ppp)[npages] == NOPCACHE_SHWLIST) {
8843 8841 void *ptag;
8844 8842 if (pamp != NULL) {
8845 8843 ASSERT(svd->type == MAP_SHARED);
8846 8844 ptag = (void *)pamp;
8847 8845 paddr = (caddr_t)((addr - seg->s_base) +
8848 8846 ptob(svd->anon_index));
8849 8847 } else {
8850 8848 ptag = (void *)seg;
8851 8849 paddr = addr;
8852 8850 }
8853 8851 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
8854 8852 } else {
8855 8853 ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
8856 8854 IS_SWAPFSVP((*ppp)[npages]->p_vnode));
8857 8855 len = lpgeaddr - lpgaddr;
8858 8856 npages = btop(len);
8859 8857 seg_pinactive(seg, pamp, paddr, len,
8860 8858 *ppp - adjustpages, rw, pflags, preclaim_callback);
8861 8859 }
8862 8860
8863 8861 if (pamp != NULL) {
8864 8862 ASSERT(svd->type == MAP_SHARED);
8865 8863 ASSERT(svd->softlockcnt >= npages);
8866 8864 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
8867 8865 }
8868 8866
8869 8867 if (sftlck_sbase) {
8870 8868 ASSERT(svd->softlockcnt_sbase > 0);
8871 8869 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase);
8872 8870 }
8873 8871 if (sftlck_send) {
8874 8872 ASSERT(svd->softlockcnt_send > 0);
8875 8873 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send);
8876 8874 }
8877 8875
8878 8876 /*
8879 8877 * If someone is blocked while unmapping, we purge
8880 8878 * segment page cache and thus reclaim pplist synchronously
8881 8879 * without waiting for seg_pasync_thread. This speeds up
8882 8880 * unmapping in cases where munmap(2) is called, while
8883 8881 * raw async i/o is still in progress or where a thread
8884 8882 * exits on data fault in a multithreaded application.
8885 8883 */
8886 8884 if (AS_ISUNMAPWAIT(seg->s_as)) {
8887 8885 if (svd->softlockcnt == 0) {
8888 8886 mutex_enter(&seg->s_as->a_contents);
8889 8887 if (AS_ISUNMAPWAIT(seg->s_as)) {
8890 8888 AS_CLRUNMAPWAIT(seg->s_as);
8891 8889 cv_broadcast(&seg->s_as->a_cv);
8892 8890 }
8893 8891 mutex_exit(&seg->s_as->a_contents);
8894 8892 } else if (pamp == NULL) {
8895 8893 /*
8896 8894 * softlockcnt is not 0 and this is a
8897 8895 * MAP_PRIVATE segment. Try to purge its
8898 8896 * pcache entries to reduce softlockcnt.
8899 8897 * If it drops to 0 segvn_reclaim()
8900 8898 * will wake up a thread waiting on
8901 8899 * unmapwait flag.
8902 8900 *
8903 8901 * We don't purge MAP_SHARED segments with non
8904 8902 * 0 softlockcnt since IO is still in progress
8905 8903 * for such segments.
8906 8904 */
8907 8905 ASSERT(svd->type == MAP_PRIVATE);
8908 8906 segvn_purge(seg);
8909 8907 }
8910 8908 }
8911 8909 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8912 8910 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END,
8913 8911 "segvn_pagelock: unlock seg %p addr %p", seg, addr);
8914 8912 return (0);
8915 8913 }
8916 8914
8917 8915 /* The L_PAGELOCK case ... */
8918 8916
8919 8917 VM_STAT_ADD(segvnvmstats.pagelock[1]);
8920 8918
8921 8919 /*
8922 8920 * For MAP_SHARED segments we have to check protections before
8923 8921 * seg_plookup() since pcache entries may be shared by many segments
8924 8922 * with potentially different page protections.
8925 8923 */
8926 8924 if (pamp != NULL) {
8927 8925 ASSERT(svd->type == MAP_SHARED);
8928 8926 if (svd->pageprot == 0) {
8929 8927 if ((svd->prot & protchk) == 0) {
8930 8928 error = EACCES;
8931 8929 goto out;
8932 8930 }
8933 8931 } else {
8934 8932 /*
8935 8933 * check page protections
8936 8934 */
8937 8935 caddr_t ea;
8938 8936
8939 8937 if (seg->s_szc) {
8940 8938 a = lpgaddr;
8941 8939 ea = lpgeaddr;
8942 8940 } else {
8943 8941 a = addr;
8944 8942 ea = addr + len;
8945 8943 }
8946 8944 for (; a < ea; a += pgsz) {
8947 8945 struct vpage *vp;
8948 8946
8949 8947 ASSERT(seg->s_szc == 0 ||
8950 8948 sameprot(seg, a, pgsz));
8951 8949 vp = &svd->vpage[seg_page(seg, a)];
8952 8950 if ((VPP_PROT(vp) & protchk) == 0) {
8953 8951 error = EACCES;
8954 8952 goto out;
8955 8953 }
8956 8954 }
8957 8955 }
8958 8956 }
8959 8957
8960 8958 /*
8961 8959 * try to find pages in segment page cache
8962 8960 */
8963 8961 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
8964 8962 if (pplist != NULL) {
8965 8963 if (pamp != NULL) {
8966 8964 npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
8967 8965 ASSERT(svd->type == MAP_SHARED);
8968 8966 atomic_add_long((ulong_t *)&svd->softlockcnt,
8969 8967 npages);
8970 8968 }
8971 8969 if (sftlck_sbase) {
8972 8970 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
8973 8971 }
8974 8972 if (sftlck_send) {
8975 8973 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
8976 8974 }
8977 8975 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8978 8976 *ppp = pplist + adjustpages;
8979 8977 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
8980 8978 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
8981 8979 return (0);
8982 8980 }
8983 8981
8984 8982 /*
8985 8983 * For MAP_SHARED segments we already verified above that segment
8986 8984 * protections allow this pagelock operation.
8987 8985 */
8988 8986 if (pamp == NULL) {
8989 8987 ASSERT(svd->type == MAP_PRIVATE);
8990 8988 if (svd->pageprot == 0) {
8991 8989 if ((svd->prot & protchk) == 0) {
8992 8990 error = EACCES;
8993 8991 goto out;
8994 8992 }
8995 8993 if (svd->prot & PROT_WRITE) {
8996 8994 wlen = lpgeaddr - lpgaddr;
8997 8995 } else {
8998 8996 wlen = 0;
8999 8997 ASSERT(rw == S_READ);
9000 8998 }
9001 8999 } else {
9002 9000 int wcont = 1;
9003 9001 /*
9004 9002 * check page protections
9005 9003 */
9006 9004 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) {
9007 9005 struct vpage *vp;
9008 9006
9009 9007 ASSERT(seg->s_szc == 0 ||
9010 9008 sameprot(seg, a, pgsz));
9011 9009 vp = &svd->vpage[seg_page(seg, a)];
9012 9010 if ((VPP_PROT(vp) & protchk) == 0) {
9013 9011 error = EACCES;
9014 9012 goto out;
9015 9013 }
9016 9014 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) {
9017 9015 wlen += pgsz;
9018 9016 } else {
9019 9017 wcont = 0;
9020 9018 ASSERT(rw == S_READ);
9021 9019 }
9022 9020 }
9023 9021 }
9024 9022 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr);
9025 9023 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr);
9026 9024 }
9027 9025
9028 9026 /*
9029 9027 * Only build large page adjusted shadow list if we expect to insert
9030 9028 * it into pcache. For large enough pages it's a big overhead to
9031 9029 * create a shadow list of the entire large page. But this overhead
9032 9030 * should be amortized over repeated pcache hits on subsequent reuse
9033 9031 * of this shadow list (IO into any range within this shadow list will
9034 9032 * find it in pcache since we large page align the request for pcache
9035 9033 * lookups). pcache performance is improved with bigger shadow lists
9036 9034 * as it reduces the time to pcache the entire big segment and reduces
9037 9035 * pcache chain length.
9038 9036 */
9039 9037 if (seg_pinsert_check(seg, pamp, paddr,
9040 9038 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) {
9041 9039 addr = lpgaddr;
9042 9040 len = lpgeaddr - lpgaddr;
9043 9041 use_pcache = 1;
9044 9042 } else {
9045 9043 use_pcache = 0;
9046 9044 /*
9047 9045 * Since this entry will not be inserted into the pcache, we
9048 9046 * will not do any adjustments to the starting address or
9049 9047 * size of the memory to be locked.
9050 9048 */
9051 9049 adjustpages = 0;
9052 9050 }
9053 9051 npages = btop(len);
9054 9052
9055 9053 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP);
9056 9054 pl = pplist;
9057 9055 *ppp = pplist + adjustpages;
9058 9056 /*
9059 9057 * If use_pcache is 0 this shadow list is not large page adjusted.
9060 9058 * Record this info in the last entry of shadow array so that
9061 9059 * L_PAGEUNLOCK can determine if it should large page adjust the
9062 9060 * address range to find the real range that was locked.
9063 9061 */
9064 9062 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST;
9065 9063
9066 9064 page = seg_page(seg, addr);
9067 9065 anon_index = svd->anon_index + page;
9068 9066
9069 9067 anlock = 0;
9070 9068 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9071 9069 ASSERT(amp->a_szc >= seg->s_szc);
9072 9070 anpgcnt = page_get_pagecnt(amp->a_szc);
9073 9071 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) {
9074 9072 struct anon *ap;
9075 9073 struct vnode *vp;
9076 9074 u_offset_t off;
9077 9075
9078 9076 /*
9079 9077 * Lock and unlock anon array only once per large page.
9080 9078 * anon_array_enter() locks the root anon slot according to
9081 9079 * a_szc which can't change while anon map is locked. We lock
9082 9080 * anon the first time through this loop and each time we
9083 9081 * reach anon index that corresponds to a root of a large
9084 9082 * page.
9085 9083 */
9086 9084 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) {
9087 9085 ASSERT(anlock == 0);
9088 9086 anon_array_enter(amp, anon_index, &cookie);
9089 9087 anlock = 1;
9090 9088 }
9091 9089 ap = anon_get_ptr(amp->ahp, anon_index);
9092 9090
9093 9091 /*
9094 9092 * We must never use seg_pcache for COW pages
9095 9093 * because we might end up with original page still
9096 9094 * lying in seg_pcache even after private page is
9097 9095 * created. This leads to data corruption as
9098 9096 * aio_write refers to the page still in cache
9099 9097 * while all other accesses refer to the private
9100 9098 * page.
9101 9099 */
9102 9100 if (ap == NULL || ap->an_refcnt != 1) {
9103 9101 struct vpage *vpage;
9104 9102
9105 9103 if (seg->s_szc) {
9106 9104 error = EFAULT;
9107 9105 break;
9108 9106 }
9109 9107 if (svd->vpage != NULL) {
9110 9108 vpage = &svd->vpage[seg_page(seg, a)];
9111 9109 } else {
9112 9110 vpage = NULL;
9113 9111 }
9114 9112 ASSERT(anlock);
9115 9113 anon_array_exit(&cookie);
9116 9114 anlock = 0;
9117 9115 pp = NULL;
9118 9116 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0,
9119 9117 vpage, &pp, 0, F_INVAL, rw, 1);
9120 9118 if (error) {
9121 9119 error = fc_decode(error);
9122 9120 break;
9123 9121 }
9124 9122 anon_array_enter(amp, anon_index, &cookie);
9125 9123 anlock = 1;
9126 9124 ap = anon_get_ptr(amp->ahp, anon_index);
9127 9125 if (ap == NULL || ap->an_refcnt != 1) {
9128 9126 error = EFAULT;
9129 9127 break;
9130 9128 }
9131 9129 }
9132 9130 swap_xlate(ap, &vp, &off);
9133 9131 pp = page_lookup_nowait(vp, off, SE_SHARED);
9134 9132 if (pp == NULL) {
9135 9133 error = EFAULT;
9136 9134 break;
9137 9135 }
9138 9136 if (ap->an_pvp != NULL) {
9139 9137 anon_swap_free(ap, pp);
9140 9138 }
9141 9139 /*
9142 9140 * Unlock anon if this is the last slot in a large page.
9143 9141 */
9144 9142 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) {
9145 9143 ASSERT(anlock);
9146 9144 anon_array_exit(&cookie);
9147 9145 anlock = 0;
9148 9146 }
9149 9147 *pplist++ = pp;
9150 9148 }
9151 9149 if (anlock) { /* Ensure the lock is dropped */
9152 9150 anon_array_exit(&cookie);
9153 9151 }
9154 9152 ANON_LOCK_EXIT(&->a_rwlock);
9155 9153
9156 9154 if (a >= addr + len) {
9157 9155 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9158 9156 if (pamp != NULL) {
9159 9157 ASSERT(svd->type == MAP_SHARED);
9160 9158 atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9161 9159 npages);
9162 9160 wlen = len;
9163 9161 }
9164 9162 if (sftlck_sbase) {
9165 9163 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9166 9164 }
9167 9165 if (sftlck_send) {
9168 9166 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9169 9167 }
9170 9168 if (use_pcache) {
9171 9169 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9172 9170 rw, pflags, preclaim_callback);
9173 9171 }
9174 9172 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9175 9173 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9176 9174 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9177 9175 return (0);
9178 9176 }
9179 9177
9180 9178 pplist = pl;
9181 9179 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9182 9180 while (np > (uint_t)0) {
9183 9181 ASSERT(PAGE_LOCKED(*pplist));
9184 9182 page_unlock(*pplist);
9185 9183 np--;
9186 9184 pplist++;
9187 9185 }
9188 9186 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9189 9187 out:
9190 9188 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9191 9189 *ppp = NULL;
9192 9190 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END,
9193 9191 "segvn_pagelock: cache miss seg %p addr %p", seg, addr);
9194 9192 return (error);
9195 9193 }
9196 9194
9197 9195 /*
9198 9196 * purge any cached pages in the I/O page cache
9199 9197 */
9200 9198 static void
9201 9199 segvn_purge(struct seg *seg)
9202 9200 {
9203 9201 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9204 9202
9205 9203 /*
9206 9204 * pcache is only used by pure anon segments.
9207 9205 */
9208 9206 if (svd->amp == NULL || svd->vp != NULL) {
9209 9207 return;
9210 9208 }
9211 9209
9212 9210 /*
9213 9211 * For MAP_SHARED segments non 0 segment's softlockcnt means
9214 9212 * active IO is still in progress via this segment. So we only
9215 9213 * purge MAP_SHARED segments when their softlockcnt is 0.
9216 9214 */
9217 9215 if (svd->type == MAP_PRIVATE) {
9218 9216 if (svd->softlockcnt) {
9219 9217 seg_ppurge(seg, NULL, 0);
9220 9218 }
9221 9219 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) {
9222 9220 seg_ppurge(seg, svd->amp, 0);
9223 9221 }
9224 9222 }
9225 9223
9226 9224 /*
9227 9225 * If async argument is not 0 we are called from pcache async thread and don't
9228 9226 * hold AS lock.
9229 9227 */
9230 9228
9231 9229 /*ARGSUSED*/
9232 9230 static int
9233 9231 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9234 9232 enum seg_rw rw, int async)
9235 9233 {
9236 9234 struct seg *seg = (struct seg *)ptag;
9237 9235 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9238 9236 pgcnt_t np, npages;
9239 9237 struct page **pl;
9240 9238
9241 9239 npages = np = btop(len);
9242 9240 ASSERT(npages);
9243 9241
9244 9242 ASSERT(svd->vp == NULL && svd->amp != NULL);
9245 9243 ASSERT(svd->softlockcnt >= npages);
9246 9244 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9247 9245
9248 9246 pl = pplist;
9249 9247
9250 9248 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9251 9249 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9252 9250
9253 9251 while (np > (uint_t)0) {
9254 9252 if (rw == S_WRITE) {
9255 9253 hat_setrefmod(*pplist);
9256 9254 } else {
9257 9255 hat_setref(*pplist);
9258 9256 }
9259 9257 page_unlock(*pplist);
9260 9258 np--;
9261 9259 pplist++;
9262 9260 }
9263 9261
9264 9262 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9265 9263
9266 9264 /*
9267 9265 * If we are pcache async thread we don't hold AS lock. This means if
9268 9266 * softlockcnt drops to 0 after the decrement below address space may
9269 9267 * get freed. We can't allow it since after softlock derement to 0 we
9270 9268 * still need to access as structure for possible wakeup of unmap
9271 9269 * waiters. To prevent the disappearance of as we take this segment
9272 9270 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9273 9271 * make sure this routine completes before segment is freed.
9274 9272 *
9275 9273 * The second complication we have to deal with in async case is a
9276 9274 * possibility of missed wake up of unmap wait thread. When we don't
9277 9275 * hold as lock here we may take a_contents lock before unmap wait
9278 9276 * thread that was first to see softlockcnt was still not 0. As a
9279 9277 * result we'll fail to wake up an unmap wait thread. To avoid this
9280 9278 * race we set nounmapwait flag in as structure if we drop softlockcnt
9281 9279 * to 0 when we were called by pcache async thread. unmapwait thread
9282 9280 * will not block if this flag is set.
9283 9281 */
9284 9282 if (async) {
9285 9283 mutex_enter(&svd->segfree_syncmtx);
9286 9284 }
9287 9285
9288 9286 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) {
9289 9287 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
9290 9288 mutex_enter(&seg->s_as->a_contents);
9291 9289 if (async) {
9292 9290 AS_SETNOUNMAPWAIT(seg->s_as);
9293 9291 }
9294 9292 if (AS_ISUNMAPWAIT(seg->s_as)) {
9295 9293 AS_CLRUNMAPWAIT(seg->s_as);
9296 9294 cv_broadcast(&seg->s_as->a_cv);
9297 9295 }
9298 9296 mutex_exit(&seg->s_as->a_contents);
9299 9297 }
9300 9298 }
9301 9299
9302 9300 if (async) {
9303 9301 mutex_exit(&svd->segfree_syncmtx);
9304 9302 }
9305 9303 return (0);
9306 9304 }
9307 9305
9308 9306 /*ARGSUSED*/
9309 9307 static int
9310 9308 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9311 9309 enum seg_rw rw, int async)
9312 9310 {
9313 9311 amp_t *amp = (amp_t *)ptag;
9314 9312 pgcnt_t np, npages;
9315 9313 struct page **pl;
9316 9314
9317 9315 npages = np = btop(len);
9318 9316 ASSERT(npages);
9319 9317 ASSERT(amp->a_softlockcnt >= npages);
9320 9318
9321 9319 pl = pplist;
9322 9320
9323 9321 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9324 9322 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9325 9323
9326 9324 while (np > (uint_t)0) {
9327 9325 if (rw == S_WRITE) {
9328 9326 hat_setrefmod(*pplist);
9329 9327 } else {
9330 9328 hat_setref(*pplist);
9331 9329 }
9332 9330 page_unlock(*pplist);
9333 9331 np--;
9334 9332 pplist++;
9335 9333 }
9336 9334
9337 9335 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9338 9336
9339 9337 /*
9340 9338 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9341 9339 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9342 9340 * and anonmap_purge() acquires a_purgemtx.
9343 9341 */
9344 9342 mutex_enter(&->a_purgemtx);
9345 9343 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) &&
9346 9344 amp->a_purgewait) {
9347 9345 amp->a_purgewait = 0;
9348 9346 cv_broadcast(&->a_purgecv);
9349 9347 }
9350 9348 mutex_exit(&->a_purgemtx);
9351 9349 return (0);
9352 9350 }
9353 9351
9354 9352 /*
9355 9353 * get a memory ID for an addr in a given segment
9356 9354 *
9357 9355 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9358 9356 * At fault time they will be relocated into larger pages.
9359 9357 */
9360 9358 static int
9361 9359 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
9362 9360 {
9363 9361 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9364 9362 struct anon *ap = NULL;
9365 9363 ulong_t anon_index;
9366 9364 struct anon_map *amp;
9367 9365 anon_sync_obj_t cookie;
9368 9366
9369 9367 if (svd->type == MAP_PRIVATE) {
9370 9368 memidp->val[0] = (uintptr_t)seg->s_as;
9371 9369 memidp->val[1] = (uintptr_t)addr;
9372 9370 return (0);
9373 9371 }
9374 9372
9375 9373 if (svd->type == MAP_SHARED) {
9376 9374 if (svd->vp) {
9377 9375 memidp->val[0] = (uintptr_t)svd->vp;
9378 9376 memidp->val[1] = (u_longlong_t)svd->offset +
9379 9377 (uintptr_t)(addr - seg->s_base);
9380 9378 return (0);
9381 9379 } else {
9382 9380
9383 9381 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
9384 9382 if ((amp = svd->amp) != NULL) {
9385 9383 anon_index = svd->anon_index +
9386 9384 seg_page(seg, addr);
9387 9385 }
9388 9386 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9389 9387
9390 9388 ASSERT(amp != NULL);
9391 9389
9392 9390 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9393 9391 anon_array_enter(amp, anon_index, &cookie);
9394 9392 ap = anon_get_ptr(amp->ahp, anon_index);
9395 9393 if (ap == NULL) {
9396 9394 page_t *pp;
9397 9395
9398 9396 pp = anon_zero(seg, addr, &ap, svd->cred);
9399 9397 if (pp == NULL) {
9400 9398 anon_array_exit(&cookie);
9401 9399 ANON_LOCK_EXIT(&->a_rwlock);
9402 9400 return (ENOMEM);
9403 9401 }
9404 9402 ASSERT(anon_get_ptr(amp->ahp, anon_index)
9405 9403 == NULL);
9406 9404 (void) anon_set_ptr(amp->ahp, anon_index,
9407 9405 ap, ANON_SLEEP);
9408 9406 page_unlock(pp);
9409 9407 }
9410 9408
9411 9409 anon_array_exit(&cookie);
9412 9410 ANON_LOCK_EXIT(&->a_rwlock);
9413 9411
9414 9412 memidp->val[0] = (uintptr_t)ap;
9415 9413 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
9416 9414 return (0);
9417 9415 }
9418 9416 }
9419 9417 return (EINVAL);
9420 9418 }
9421 9419
9422 9420 static int
9423 9421 sameprot(struct seg *seg, caddr_t a, size_t len)
9424 9422 {
9425 9423 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9426 9424 struct vpage *vpage;
9427 9425 spgcnt_t pages = btop(len);
9428 9426 uint_t prot;
9429 9427
9430 9428 if (svd->pageprot == 0)
9431 9429 return (1);
9432 9430
9433 9431 ASSERT(svd->vpage != NULL);
9434 9432
9435 9433 vpage = &svd->vpage[seg_page(seg, a)];
9436 9434 prot = VPP_PROT(vpage);
9437 9435 vpage++;
9438 9436 pages--;
9439 9437 while (pages-- > 0) {
9440 9438 if (prot != VPP_PROT(vpage))
9441 9439 return (0);
9442 9440 vpage++;
9443 9441 }
9444 9442 return (1);
9445 9443 }
9446 9444
9447 9445 /*
9448 9446 * Get memory allocation policy info for specified address in given segment
9449 9447 */
9450 9448 static lgrp_mem_policy_info_t *
9451 9449 segvn_getpolicy(struct seg *seg, caddr_t addr)
9452 9450 {
9453 9451 struct anon_map *amp;
9454 9452 ulong_t anon_index;
9455 9453 lgrp_mem_policy_info_t *policy_info;
9456 9454 struct segvn_data *svn_data;
9457 9455 u_offset_t vn_off;
9458 9456 vnode_t *vp;
9459 9457
9460 9458 ASSERT(seg != NULL);
9461 9459
9462 9460 svn_data = (struct segvn_data *)seg->s_data;
9463 9461 if (svn_data == NULL)
9464 9462 return (NULL);
9465 9463
9466 9464 /*
9467 9465 * Get policy info for private or shared memory
9468 9466 */
9469 9467 if (svn_data->type != MAP_SHARED) {
9470 9468 if (svn_data->tr_state != SEGVN_TR_ON) {
9471 9469 policy_info = &svn_data->policy_info;
9472 9470 } else {
9473 9471 policy_info = &svn_data->tr_policy_info;
9474 9472 ASSERT(policy_info->mem_policy ==
9475 9473 LGRP_MEM_POLICY_NEXT_SEG);
↓ open down ↓ |
9304 lines elided |
↑ open up ↑ |
9476 9474 }
9477 9475 } else {
9478 9476 amp = svn_data->amp;
9479 9477 anon_index = svn_data->anon_index + seg_page(seg, addr);
9480 9478 vp = svn_data->vp;
9481 9479 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base);
9482 9480 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off);
9483 9481 }
9484 9482
9485 9483 return (policy_info);
9486 -}
9487 -
9488 -/*ARGSUSED*/
9489 -static int
9490 -segvn_capable(struct seg *seg, segcapability_t capability)
9491 -{
9492 - return (0);
9493 9484 }
9494 9485
9495 9486 /*
9496 9487 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9497 9488 * established to per vnode mapping per lgroup amp pages instead of to vnode
9498 9489 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9499 9490 * may share the same text replication amp. If a suitable amp doesn't already
9500 9491 * exist in svntr hash table create a new one. We may fail to bind to amp if
9501 9492 * segment is not eligible for text replication. Code below first checks for
9502 9493 * these conditions. If binding is successful segment tr_state is set to on
9503 9494 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9504 9495 * svd->amp remains as NULL.
9505 9496 */
9506 9497 static void
9507 9498 segvn_textrepl(struct seg *seg)
9508 9499 {
9509 9500 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9510 9501 vnode_t *vp = svd->vp;
9511 9502 u_offset_t off = svd->offset;
9512 9503 size_t size = seg->s_size;
9513 9504 u_offset_t eoff = off + size;
9514 9505 uint_t szc = seg->s_szc;
9515 9506 ulong_t hash = SVNTR_HASH_FUNC(vp);
9516 9507 svntr_t *svntrp;
9517 9508 struct vattr va;
9518 9509 proc_t *p = seg->s_as->a_proc;
9519 9510 lgrp_id_t lgrp_id;
9520 9511 lgrp_id_t olid;
9521 9512 int first;
9522 9513 struct anon_map *amp;
9523 9514
9524 9515 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9525 9516 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9526 9517 ASSERT(p != NULL);
9527 9518 ASSERT(svd->tr_state == SEGVN_TR_INIT);
9528 9519 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9529 9520 ASSERT(svd->flags & MAP_TEXT);
9530 9521 ASSERT(svd->type == MAP_PRIVATE);
9531 9522 ASSERT(vp != NULL && svd->amp == NULL);
9532 9523 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9533 9524 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9534 9525 ASSERT(seg->s_as != &kas);
9535 9526 ASSERT(off < eoff);
9536 9527 ASSERT(svntr_hashtab != NULL);
9537 9528
9538 9529 /*
9539 9530 * If numa optimizations are no longer desired bail out.
9540 9531 */
9541 9532 if (!lgrp_optimizations()) {
9542 9533 svd->tr_state = SEGVN_TR_OFF;
9543 9534 return;
9544 9535 }
9545 9536
9546 9537 /*
9547 9538 * Avoid creating anon maps with size bigger than the file size.
9548 9539 * If VOP_GETATTR() call fails bail out.
9549 9540 */
9550 9541 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME;
9551 9542 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) {
9552 9543 svd->tr_state = SEGVN_TR_OFF;
9553 9544 SEGVN_TR_ADDSTAT(gaerr);
9554 9545 return;
9555 9546 }
9556 9547 if (btopr(va.va_size) < btopr(eoff)) {
9557 9548 svd->tr_state = SEGVN_TR_OFF;
9558 9549 SEGVN_TR_ADDSTAT(overmap);
9559 9550 return;
9560 9551 }
9561 9552
9562 9553 /*
9563 9554 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9564 9555 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9565 9556 * mapping that checks if trcache for this vnode needs to be
9566 9557 * invalidated can't miss us.
9567 9558 */
9568 9559 if (!(vp->v_flag & VVMEXEC)) {
9569 9560 mutex_enter(&vp->v_lock);
9570 9561 vp->v_flag |= VVMEXEC;
9571 9562 mutex_exit(&vp->v_lock);
9572 9563 }
9573 9564 mutex_enter(&svntr_hashtab[hash].tr_lock);
9574 9565 /*
9575 9566 * Bail out if potentially MAP_SHARED writable mappings exist to this
9576 9567 * vnode. We don't want to use old file contents from existing
9577 9568 * replicas if this mapping was established after the original file
9578 9569 * was changed.
9579 9570 */
9580 9571 if (vn_is_mapped(vp, V_WRITE)) {
9581 9572 mutex_exit(&svntr_hashtab[hash].tr_lock);
9582 9573 svd->tr_state = SEGVN_TR_OFF;
9583 9574 SEGVN_TR_ADDSTAT(wrcnt);
9584 9575 return;
9585 9576 }
9586 9577 svntrp = svntr_hashtab[hash].tr_head;
9587 9578 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9588 9579 ASSERT(svntrp->tr_refcnt != 0);
9589 9580 if (svntrp->tr_vp != vp) {
9590 9581 continue;
9591 9582 }
9592 9583
9593 9584 /*
9594 9585 * Bail out if the file or its attributes were changed after
9595 9586 * this replication entry was created since we need to use the
9596 9587 * latest file contents. Note that mtime test alone is not
9597 9588 * sufficient because a user can explicitly change mtime via
9598 9589 * utimes(2) interfaces back to the old value after modifiying
9599 9590 * the file contents. To detect this case we also have to test
9600 9591 * ctime which among other things records the time of the last
9601 9592 * mtime change by utimes(2). ctime is not changed when the file
9602 9593 * is only read or executed so we expect that typically existing
9603 9594 * replication amp's can be used most of the time.
9604 9595 */
9605 9596 if (!svntrp->tr_valid ||
9606 9597 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec ||
9607 9598 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec ||
9608 9599 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec ||
9609 9600 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) {
9610 9601 mutex_exit(&svntr_hashtab[hash].tr_lock);
9611 9602 svd->tr_state = SEGVN_TR_OFF;
9612 9603 SEGVN_TR_ADDSTAT(stale);
9613 9604 return;
9614 9605 }
9615 9606 /*
9616 9607 * if off, eoff and szc match current segment we found the
9617 9608 * existing entry we can use.
9618 9609 */
9619 9610 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff &&
9620 9611 svntrp->tr_szc == szc) {
9621 9612 break;
9622 9613 }
9623 9614 /*
9624 9615 * Don't create different but overlapping in file offsets
9625 9616 * entries to avoid replication of the same file pages more
9626 9617 * than once per lgroup.
9627 9618 */
9628 9619 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) ||
9629 9620 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) {
9630 9621 mutex_exit(&svntr_hashtab[hash].tr_lock);
9631 9622 svd->tr_state = SEGVN_TR_OFF;
9632 9623 SEGVN_TR_ADDSTAT(overlap);
9633 9624 return;
9634 9625 }
9635 9626 }
9636 9627 /*
9637 9628 * If we didn't find existing entry create a new one.
9638 9629 */
9639 9630 if (svntrp == NULL) {
9640 9631 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP);
9641 9632 if (svntrp == NULL) {
9642 9633 mutex_exit(&svntr_hashtab[hash].tr_lock);
9643 9634 svd->tr_state = SEGVN_TR_OFF;
9644 9635 SEGVN_TR_ADDSTAT(nokmem);
9645 9636 return;
9646 9637 }
9647 9638 #ifdef DEBUG
9648 9639 {
9649 9640 lgrp_id_t i;
9650 9641 for (i = 0; i < NLGRPS_MAX; i++) {
9651 9642 ASSERT(svntrp->tr_amp[i] == NULL);
9652 9643 }
9653 9644 }
9654 9645 #endif /* DEBUG */
9655 9646 svntrp->tr_vp = vp;
9656 9647 svntrp->tr_off = off;
9657 9648 svntrp->tr_eoff = eoff;
9658 9649 svntrp->tr_szc = szc;
9659 9650 svntrp->tr_valid = 1;
9660 9651 svntrp->tr_mtime = va.va_mtime;
9661 9652 svntrp->tr_ctime = va.va_ctime;
9662 9653 svntrp->tr_refcnt = 0;
9663 9654 svntrp->tr_next = svntr_hashtab[hash].tr_head;
9664 9655 svntr_hashtab[hash].tr_head = svntrp;
9665 9656 }
9666 9657 first = 1;
9667 9658 again:
9668 9659 /*
9669 9660 * We want to pick a replica with pages on main thread's (t_tid = 1,
9670 9661 * aka T1) lgrp. Currently text replication is only optimized for
9671 9662 * workloads that either have all threads of a process on the same
9672 9663 * lgrp or execute their large text primarily on main thread.
9673 9664 */
9674 9665 lgrp_id = p->p_t1_lgrpid;
9675 9666 if (lgrp_id == LGRP_NONE) {
9676 9667 /*
9677 9668 * In case exec() prefaults text on non main thread use
9678 9669 * current thread lgrpid. It will become main thread anyway
9679 9670 * soon.
9680 9671 */
9681 9672 lgrp_id = lgrp_home_id(curthread);
9682 9673 }
9683 9674 /*
9684 9675 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9685 9676 * just set it to NLGRPS_MAX if it's different from current process T1
9686 9677 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9687 9678 * replication and T1 new home is different from lgrp used for text
9688 9679 * replication. When this happens asyncronous segvn thread rechecks if
9689 9680 * segments should change lgrps used for text replication. If we fail
9690 9681 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9691 9682 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9692 9683 * we want to use. We don't need to use cas in this case because
9693 9684 * another thread that races in between our non atomic check and set
9694 9685 * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9695 9686 */
9696 9687 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9697 9688 olid = p->p_tr_lgrpid;
9698 9689 if (lgrp_id != olid && olid != NLGRPS_MAX) {
9699 9690 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX;
9700 9691 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) !=
9701 9692 olid) {
9702 9693 olid = p->p_tr_lgrpid;
9703 9694 ASSERT(olid != LGRP_NONE);
9704 9695 if (olid != lgrp_id && olid != NLGRPS_MAX) {
9705 9696 p->p_tr_lgrpid = NLGRPS_MAX;
9706 9697 }
9707 9698 }
9708 9699 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
9709 9700 membar_producer();
9710 9701 /*
9711 9702 * lgrp_move_thread() won't schedule async recheck after
9712 9703 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9713 9704 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9714 9705 * is not LGRP_NONE.
9715 9706 */
9716 9707 if (first && p->p_t1_lgrpid != LGRP_NONE &&
9717 9708 p->p_t1_lgrpid != lgrp_id) {
9718 9709 first = 0;
9719 9710 goto again;
9720 9711 }
9721 9712 }
9722 9713 /*
9723 9714 * If no amp was created yet for lgrp_id create a new one as long as
9724 9715 * we have enough memory to afford it.
9725 9716 */
9726 9717 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) {
9727 9718 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
9728 9719 if (trmem > segvn_textrepl_max_bytes) {
9729 9720 SEGVN_TR_ADDSTAT(normem);
9730 9721 goto fail;
9731 9722 }
9732 9723 if (anon_try_resv_zone(size, NULL) == 0) {
9733 9724 SEGVN_TR_ADDSTAT(noanon);
9734 9725 goto fail;
9735 9726 }
9736 9727 amp = anonmap_alloc(size, size, ANON_NOSLEEP);
9737 9728 if (amp == NULL) {
9738 9729 anon_unresv_zone(size, NULL);
9739 9730 SEGVN_TR_ADDSTAT(nokmem);
9740 9731 goto fail;
9741 9732 }
9742 9733 ASSERT(amp->refcnt == 1);
9743 9734 amp->a_szc = szc;
9744 9735 svntrp->tr_amp[lgrp_id] = amp;
9745 9736 SEGVN_TR_ADDSTAT(newamp);
9746 9737 }
9747 9738 svntrp->tr_refcnt++;
9748 9739 ASSERT(svd->svn_trnext == NULL);
9749 9740 ASSERT(svd->svn_trprev == NULL);
9750 9741 svd->svn_trnext = svntrp->tr_svnhead;
9751 9742 svd->svn_trprev = NULL;
9752 9743 if (svntrp->tr_svnhead != NULL) {
9753 9744 svntrp->tr_svnhead->svn_trprev = svd;
9754 9745 }
9755 9746 svntrp->tr_svnhead = svd;
9756 9747 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size);
9757 9748 ASSERT(amp->refcnt >= 1);
9758 9749 svd->amp = amp;
9759 9750 svd->anon_index = 0;
9760 9751 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG;
9761 9752 svd->tr_policy_info.mem_lgrpid = lgrp_id;
9762 9753 svd->tr_state = SEGVN_TR_ON;
9763 9754 mutex_exit(&svntr_hashtab[hash].tr_lock);
9764 9755 SEGVN_TR_ADDSTAT(repl);
9765 9756 return;
9766 9757 fail:
9767 9758 ASSERT(segvn_textrepl_bytes >= size);
9768 9759 atomic_add_long(&segvn_textrepl_bytes, -size);
9769 9760 ASSERT(svntrp != NULL);
9770 9761 ASSERT(svntrp->tr_amp[lgrp_id] == NULL);
9771 9762 if (svntrp->tr_refcnt == 0) {
9772 9763 ASSERT(svntrp == svntr_hashtab[hash].tr_head);
9773 9764 svntr_hashtab[hash].tr_head = svntrp->tr_next;
9774 9765 mutex_exit(&svntr_hashtab[hash].tr_lock);
9775 9766 kmem_cache_free(svntr_cache, svntrp);
9776 9767 } else {
9777 9768 mutex_exit(&svntr_hashtab[hash].tr_lock);
9778 9769 }
9779 9770 svd->tr_state = SEGVN_TR_OFF;
9780 9771 }
9781 9772
9782 9773 /*
9783 9774 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9784 9775 * replication amp. This routine is most typically called when segment is
9785 9776 * unmapped but can also be called when segment no longer qualifies for text
9786 9777 * replication (e.g. due to protection changes). If unload_unmap is set use
9787 9778 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9788 9779 * svntr free all its anon maps and remove it from the hash table.
9789 9780 */
9790 9781 static void
9791 9782 segvn_textunrepl(struct seg *seg, int unload_unmap)
9792 9783 {
9793 9784 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9794 9785 vnode_t *vp = svd->vp;
9795 9786 u_offset_t off = svd->offset;
9796 9787 size_t size = seg->s_size;
9797 9788 u_offset_t eoff = off + size;
9798 9789 uint_t szc = seg->s_szc;
9799 9790 ulong_t hash = SVNTR_HASH_FUNC(vp);
9800 9791 svntr_t *svntrp;
9801 9792 svntr_t **prv_svntrp;
9802 9793 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid;
9803 9794 lgrp_id_t i;
9804 9795
9805 9796 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9806 9797 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
9807 9798 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9808 9799 ASSERT(svd->tr_state == SEGVN_TR_ON);
9809 9800 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9810 9801 ASSERT(svd->amp != NULL);
9811 9802 ASSERT(svd->amp->refcnt >= 1);
9812 9803 ASSERT(svd->anon_index == 0);
9813 9804 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9814 9805 ASSERT(svntr_hashtab != NULL);
9815 9806
9816 9807 mutex_enter(&svntr_hashtab[hash].tr_lock);
9817 9808 prv_svntrp = &svntr_hashtab[hash].tr_head;
9818 9809 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) {
9819 9810 ASSERT(svntrp->tr_refcnt != 0);
9820 9811 if (svntrp->tr_vp == vp && svntrp->tr_off == off &&
9821 9812 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) {
9822 9813 break;
9823 9814 }
9824 9815 }
9825 9816 if (svntrp == NULL) {
9826 9817 panic("segvn_textunrepl: svntr record not found");
9827 9818 }
9828 9819 if (svntrp->tr_amp[lgrp_id] != svd->amp) {
9829 9820 panic("segvn_textunrepl: amp mismatch");
9830 9821 }
9831 9822 svd->tr_state = SEGVN_TR_OFF;
9832 9823 svd->amp = NULL;
9833 9824 if (svd->svn_trprev == NULL) {
9834 9825 ASSERT(svntrp->tr_svnhead == svd);
9835 9826 svntrp->tr_svnhead = svd->svn_trnext;
9836 9827 if (svntrp->tr_svnhead != NULL) {
9837 9828 svntrp->tr_svnhead->svn_trprev = NULL;
9838 9829 }
9839 9830 svd->svn_trnext = NULL;
9840 9831 } else {
9841 9832 svd->svn_trprev->svn_trnext = svd->svn_trnext;
9842 9833 if (svd->svn_trnext != NULL) {
9843 9834 svd->svn_trnext->svn_trprev = svd->svn_trprev;
9844 9835 svd->svn_trnext = NULL;
9845 9836 }
9846 9837 svd->svn_trprev = NULL;
9847 9838 }
9848 9839 if (--svntrp->tr_refcnt) {
9849 9840 mutex_exit(&svntr_hashtab[hash].tr_lock);
9850 9841 goto done;
9851 9842 }
9852 9843 *prv_svntrp = svntrp->tr_next;
9853 9844 mutex_exit(&svntr_hashtab[hash].tr_lock);
9854 9845 for (i = 0; i < NLGRPS_MAX; i++) {
9855 9846 struct anon_map *amp = svntrp->tr_amp[i];
9856 9847 if (amp == NULL) {
9857 9848 continue;
9858 9849 }
9859 9850 ASSERT(amp->refcnt == 1);
9860 9851 ASSERT(amp->swresv == size);
9861 9852 ASSERT(amp->size == size);
9862 9853 ASSERT(amp->a_szc == szc);
9863 9854 if (amp->a_szc != 0) {
9864 9855 anon_free_pages(amp->ahp, 0, size, szc);
9865 9856 } else {
9866 9857 anon_free(amp->ahp, 0, size);
9867 9858 }
9868 9859 svntrp->tr_amp[i] = NULL;
9869 9860 ASSERT(segvn_textrepl_bytes >= size);
9870 9861 atomic_add_long(&segvn_textrepl_bytes, -size);
9871 9862 anon_unresv_zone(amp->swresv, NULL);
9872 9863 amp->refcnt = 0;
9873 9864 anonmap_free(amp);
9874 9865 }
9875 9866 kmem_cache_free(svntr_cache, svntrp);
9876 9867 done:
9877 9868 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size,
9878 9869 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL);
9879 9870 }
9880 9871
9881 9872 /*
9882 9873 * This is called when a MAP_SHARED writable mapping is created to a vnode
9883 9874 * that is currently used for execution (VVMEXEC flag is set). In this case we
9884 9875 * need to prevent further use of existing replicas.
9885 9876 */
9886 9877 static void
9887 9878 segvn_inval_trcache(vnode_t *vp)
9888 9879 {
9889 9880 ulong_t hash = SVNTR_HASH_FUNC(vp);
9890 9881 svntr_t *svntrp;
9891 9882
9892 9883 ASSERT(vp->v_flag & VVMEXEC);
9893 9884
9894 9885 if (svntr_hashtab == NULL) {
9895 9886 return;
9896 9887 }
9897 9888
9898 9889 mutex_enter(&svntr_hashtab[hash].tr_lock);
9899 9890 svntrp = svntr_hashtab[hash].tr_head;
9900 9891 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9901 9892 ASSERT(svntrp->tr_refcnt != 0);
9902 9893 if (svntrp->tr_vp == vp && svntrp->tr_valid) {
9903 9894 svntrp->tr_valid = 0;
9904 9895 }
9905 9896 }
9906 9897 mutex_exit(&svntr_hashtab[hash].tr_lock);
9907 9898 }
9908 9899
9909 9900 static void
9910 9901 segvn_trasync_thread(void)
9911 9902 {
9912 9903 callb_cpr_t cpr_info;
9913 9904 kmutex_t cpr_lock; /* just for CPR stuff */
9914 9905
9915 9906 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
9916 9907
9917 9908 CALLB_CPR_INIT(&cpr_info, &cpr_lock,
9918 9909 callb_generic_cpr, "segvn_async");
9919 9910
9920 9911 if (segvn_update_textrepl_interval == 0) {
9921 9912 segvn_update_textrepl_interval = segvn_update_tr_time * hz;
9922 9913 } else {
9923 9914 segvn_update_textrepl_interval *= hz;
9924 9915 }
9925 9916 (void) timeout(segvn_trupdate_wakeup, NULL,
9926 9917 segvn_update_textrepl_interval);
9927 9918
9928 9919 for (;;) {
9929 9920 mutex_enter(&cpr_lock);
9930 9921 CALLB_CPR_SAFE_BEGIN(&cpr_info);
9931 9922 mutex_exit(&cpr_lock);
9932 9923 sema_p(&segvn_trasync_sem);
9933 9924 mutex_enter(&cpr_lock);
9934 9925 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
9935 9926 mutex_exit(&cpr_lock);
9936 9927 segvn_trupdate();
9937 9928 }
9938 9929 }
9939 9930
9940 9931 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0;
9941 9932
9942 9933 static void
9943 9934 segvn_trupdate_wakeup(void *dummy)
9944 9935 {
9945 9936 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations();
9946 9937
9947 9938 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) {
9948 9939 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs;
9949 9940 sema_v(&segvn_trasync_sem);
9950 9941 }
9951 9942
9952 9943 if (!segvn_disable_textrepl_update &&
9953 9944 segvn_update_textrepl_interval != 0) {
9954 9945 (void) timeout(segvn_trupdate_wakeup, dummy,
9955 9946 segvn_update_textrepl_interval);
9956 9947 }
9957 9948 }
9958 9949
9959 9950 static void
9960 9951 segvn_trupdate(void)
9961 9952 {
9962 9953 ulong_t hash;
9963 9954 svntr_t *svntrp;
9964 9955 segvn_data_t *svd;
9965 9956
9966 9957 ASSERT(svntr_hashtab != NULL);
9967 9958
9968 9959 for (hash = 0; hash < svntr_hashtab_sz; hash++) {
9969 9960 mutex_enter(&svntr_hashtab[hash].tr_lock);
9970 9961 svntrp = svntr_hashtab[hash].tr_head;
9971 9962 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9972 9963 ASSERT(svntrp->tr_refcnt != 0);
9973 9964 svd = svntrp->tr_svnhead;
9974 9965 for (; svd != NULL; svd = svd->svn_trnext) {
9975 9966 segvn_trupdate_seg(svd->seg, svd, svntrp,
9976 9967 hash);
9977 9968 }
9978 9969 }
9979 9970 mutex_exit(&svntr_hashtab[hash].tr_lock);
9980 9971 }
9981 9972 }
9982 9973
9983 9974 static void
9984 9975 segvn_trupdate_seg(struct seg *seg,
9985 9976 segvn_data_t *svd,
9986 9977 svntr_t *svntrp,
9987 9978 ulong_t hash)
9988 9979 {
9989 9980 proc_t *p;
9990 9981 lgrp_id_t lgrp_id;
9991 9982 struct as *as;
9992 9983 size_t size;
9993 9984 struct anon_map *amp;
9994 9985
9995 9986 ASSERT(svd->vp != NULL);
9996 9987 ASSERT(svd->vp == svntrp->tr_vp);
9997 9988 ASSERT(svd->offset == svntrp->tr_off);
9998 9989 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff);
9999 9990 ASSERT(seg != NULL);
10000 9991 ASSERT(svd->seg == seg);
10001 9992 ASSERT(seg->s_data == (void *)svd);
10002 9993 ASSERT(seg->s_szc == svntrp->tr_szc);
10003 9994 ASSERT(svd->tr_state == SEGVN_TR_ON);
10004 9995 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10005 9996 ASSERT(svd->amp != NULL);
10006 9997 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10007 9998 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE);
10008 9999 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX);
10009 10000 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp);
10010 10001 ASSERT(svntrp->tr_refcnt != 0);
10011 10002 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock));
10012 10003
10013 10004 as = seg->s_as;
10014 10005 ASSERT(as != NULL && as != &kas);
10015 10006 p = as->a_proc;
10016 10007 ASSERT(p != NULL);
10017 10008 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
10018 10009 lgrp_id = p->p_t1_lgrpid;
10019 10010 if (lgrp_id == LGRP_NONE) {
10020 10011 return;
10021 10012 }
10022 10013 ASSERT(lgrp_id < NLGRPS_MAX);
10023 10014 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10024 10015 return;
10025 10016 }
10026 10017
10027 10018 /*
10028 10019 * Use tryenter locking since we are locking as/seg and svntr hash
10029 10020 * lock in reverse from syncrounous thread order.
10030 10021 */
10031 10022 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) {
10032 10023 SEGVN_TR_ADDSTAT(nolock);
10033 10024 if (segvn_lgrp_trthr_migrs_snpsht) {
10034 10025 segvn_lgrp_trthr_migrs_snpsht = 0;
10035 10026 }
10036 10027 return;
10037 10028 }
10038 10029 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10039 10030 AS_LOCK_EXIT(as, &as->a_lock);
10040 10031 SEGVN_TR_ADDSTAT(nolock);
10041 10032 if (segvn_lgrp_trthr_migrs_snpsht) {
10042 10033 segvn_lgrp_trthr_migrs_snpsht = 0;
10043 10034 }
10044 10035 return;
10045 10036 }
10046 10037 size = seg->s_size;
10047 10038 if (svntrp->tr_amp[lgrp_id] == NULL) {
10048 10039 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
10049 10040 if (trmem > segvn_textrepl_max_bytes) {
10050 10041 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10051 10042 AS_LOCK_EXIT(as, &as->a_lock);
10052 10043 atomic_add_long(&segvn_textrepl_bytes, -size);
10053 10044 SEGVN_TR_ADDSTAT(normem);
10054 10045 return;
10055 10046 }
10056 10047 if (anon_try_resv_zone(size, NULL) == 0) {
10057 10048 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10058 10049 AS_LOCK_EXIT(as, &as->a_lock);
10059 10050 atomic_add_long(&segvn_textrepl_bytes, -size);
10060 10051 SEGVN_TR_ADDSTAT(noanon);
10061 10052 return;
10062 10053 }
10063 10054 amp = anonmap_alloc(size, size, KM_NOSLEEP);
10064 10055 if (amp == NULL) {
10065 10056 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10066 10057 AS_LOCK_EXIT(as, &as->a_lock);
10067 10058 atomic_add_long(&segvn_textrepl_bytes, -size);
10068 10059 anon_unresv_zone(size, NULL);
10069 10060 SEGVN_TR_ADDSTAT(nokmem);
10070 10061 return;
10071 10062 }
10072 10063 ASSERT(amp->refcnt == 1);
10073 10064 amp->a_szc = seg->s_szc;
10074 10065 svntrp->tr_amp[lgrp_id] = amp;
10075 10066 }
10076 10067 /*
10077 10068 * We don't need to drop the bucket lock but here we give other
10078 10069 * threads a chance. svntr and svd can't be unlinked as long as
10079 10070 * segment lock is held as a writer and AS held as well. After we
10080 10071 * retake bucket lock we'll continue from where we left. We'll be able
10081 10072 * to reach the end of either list since new entries are always added
10082 10073 * to the beginning of the lists.
10083 10074 */
10084 10075 mutex_exit(&svntr_hashtab[hash].tr_lock);
10085 10076 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL);
10086 10077 mutex_enter(&svntr_hashtab[hash].tr_lock);
10087 10078
10088 10079 ASSERT(svd->tr_state == SEGVN_TR_ON);
10089 10080 ASSERT(svd->amp != NULL);
10090 10081 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10091 10082 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10092 10083 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10093 10084
10094 10085 svd->tr_policy_info.mem_lgrpid = lgrp_id;
10095 10086 svd->amp = svntrp->tr_amp[lgrp_id];
10096 10087 p->p_tr_lgrpid = NLGRPS_MAX;
10097 10088 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10098 10089 AS_LOCK_EXIT(as, &as->a_lock);
10099 10090
10100 10091 ASSERT(svntrp->tr_refcnt != 0);
10101 10092 ASSERT(svd->vp == svntrp->tr_vp);
10102 10093 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10103 10094 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10104 10095 ASSERT(svd->seg == seg);
10105 10096 ASSERT(svd->tr_state == SEGVN_TR_ON);
10106 10097
10107 10098 SEGVN_TR_ADDSTAT(asyncrepl);
10108 10099 }
↓ open down ↓ |
606 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX