Print this page
6154 const-ify segment ops structures
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_vn.c
+++ new/usr/src/uts/common/vm/seg_vn.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2015, Joyent, Inc. All rights reserved.
24 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 32 * The Regents of the University of California
33 33 * All Rights Reserved
34 34 *
35 35 * University Acknowledgment- Portions of this document are derived from
36 36 * software developed by the University of California, Berkeley, and its
37 37 * contributors.
38 38 */
39 39
40 40 /*
41 41 * VM - shared or copy-on-write from a vnode/anonymous memory.
42 42 */
43 43
44 44 #include <sys/types.h>
45 45 #include <sys/param.h>
46 46 #include <sys/t_lock.h>
47 47 #include <sys/errno.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/mman.h>
50 50 #include <sys/debug.h>
51 51 #include <sys/cred.h>
52 52 #include <sys/vmsystm.h>
53 53 #include <sys/tuneable.h>
54 54 #include <sys/bitmap.h>
55 55 #include <sys/swap.h>
56 56 #include <sys/kmem.h>
57 57 #include <sys/sysmacros.h>
58 58 #include <sys/vtrace.h>
59 59 #include <sys/cmn_err.h>
60 60 #include <sys/callb.h>
61 61 #include <sys/vm.h>
62 62 #include <sys/dumphdr.h>
63 63 #include <sys/lgrp.h>
64 64
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_vn.h>
69 69 #include <vm/pvn.h>
70 70 #include <vm/anon.h>
71 71 #include <vm/page.h>
72 72 #include <vm/vpage.h>
73 73 #include <sys/proc.h>
74 74 #include <sys/task.h>
75 75 #include <sys/project.h>
76 76 #include <sys/zone.h>
77 77 #include <sys/shm_impl.h>
78 78
79 79 /*
80 80 * segvn_fault needs a temporary page list array. To avoid calling kmem all
81 81 * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if
82 82 * it can. In the rare case when this page list is not large enough, it
83 83 * goes and gets a large enough array from kmem.
84 84 *
85 85 * This small page list array covers either 8 pages or 64kB worth of pages -
86 86 * whichever is smaller.
87 87 */
88 88 #define PVN_MAX_GETPAGE_SZ 0x10000
89 89 #define PVN_MAX_GETPAGE_NUM 0x8
90 90
91 91 #if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE
92 92 #define PVN_GETPAGE_SZ ptob(PVN_MAX_GETPAGE_NUM)
93 93 #define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM
94 94 #else
95 95 #define PVN_GETPAGE_SZ PVN_MAX_GETPAGE_SZ
96 96 #define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ)
97 97 #endif
98 98
99 99 /*
100 100 * Private seg op routines.
101 101 */
102 102 static int segvn_dup(struct seg *seg, struct seg *newseg);
103 103 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len);
104 104 static void segvn_free(struct seg *seg);
105 105 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg,
106 106 caddr_t addr, size_t len, enum fault_type type,
107 107 enum seg_rw rw);
108 108 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr);
109 109 static int segvn_setprot(struct seg *seg, caddr_t addr,
110 110 size_t len, uint_t prot);
111 111 static int segvn_checkprot(struct seg *seg, caddr_t addr,
112 112 size_t len, uint_t prot);
113 113 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
114 114 static size_t segvn_swapout(struct seg *seg);
115 115 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len,
116 116 int attr, uint_t flags);
117 117 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len,
118 118 char *vec);
119 119 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
120 120 int attr, int op, ulong_t *lockmap, size_t pos);
121 121 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len,
122 122 uint_t *protv);
123 123 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr);
124 124 static int segvn_gettype(struct seg *seg, caddr_t addr);
125 125 static int segvn_getvp(struct seg *seg, caddr_t addr,
126 126 struct vnode **vpp);
127 127 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len,
128 128 uint_t behav);
↓ open down ↓ |
128 lines elided |
↑ open up ↑ |
129 129 static void segvn_dump(struct seg *seg);
130 130 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len,
131 131 struct page ***ppp, enum lock_type type, enum seg_rw rw);
132 132 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len,
133 133 uint_t szc);
134 134 static int segvn_getmemid(struct seg *seg, caddr_t addr,
135 135 memid_t *memidp);
136 136 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t);
137 137 static int segvn_inherit(struct seg *, caddr_t, size_t, uint_t);
138 138
139 -struct seg_ops segvn_ops = {
139 +const struct seg_ops segvn_ops = {
140 140 .dup = segvn_dup,
141 141 .unmap = segvn_unmap,
142 142 .free = segvn_free,
143 143 .fault = segvn_fault,
144 144 .faulta = segvn_faulta,
145 145 .setprot = segvn_setprot,
146 146 .checkprot = segvn_checkprot,
147 147 .kluster = segvn_kluster,
148 148 .swapout = segvn_swapout,
149 149 .sync = segvn_sync,
150 150 .incore = segvn_incore,
151 151 .lockop = segvn_lockop,
152 152 .getprot = segvn_getprot,
153 153 .getoffset = segvn_getoffset,
154 154 .gettype = segvn_gettype,
155 155 .getvp = segvn_getvp,
156 156 .advise = segvn_advise,
157 157 .dump = segvn_dump,
158 158 .pagelock = segvn_pagelock,
159 159 .setpagesize = segvn_setpagesize,
160 160 .getmemid = segvn_getmemid,
161 161 .getpolicy = segvn_getpolicy,
162 162 .inherit = segvn_inherit,
163 163 };
164 164
165 165 /*
166 166 * Common zfod structures, provided as a shorthand for others to use.
167 167 */
168 168 static segvn_crargs_t zfod_segvn_crargs =
169 169 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
170 170 static segvn_crargs_t kzfod_segvn_crargs =
171 171 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER,
172 172 PROT_ALL & ~PROT_USER);
173 173 static segvn_crargs_t stack_noexec_crargs =
174 174 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL);
175 175
176 176 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */
177 177 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */
178 178 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */
179 179 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */
180 180
181 181 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
182 182
183 183 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */
184 184
185 185 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */
186 186 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */
187 187 uint_t segvn_pglock_comb_bshift;
188 188 size_t segvn_pglock_comb_palign;
189 189
190 190 static int segvn_concat(struct seg *, struct seg *, int);
191 191 static int segvn_extend_prev(struct seg *, struct seg *,
192 192 struct segvn_crargs *, size_t);
193 193 static int segvn_extend_next(struct seg *, struct seg *,
194 194 struct segvn_crargs *, size_t);
195 195 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw);
196 196 static void segvn_pagelist_rele(page_t **);
197 197 static void segvn_setvnode_mpss(vnode_t *);
198 198 static void segvn_relocate_pages(page_t **, page_t *);
199 199 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *);
200 200 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t,
201 201 uint_t, page_t **, page_t **, uint_t *, int *);
202 202 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t,
203 203 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
204 204 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t,
205 205 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
206 206 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t,
207 207 u_offset_t, struct vpage *, page_t **, uint_t,
208 208 enum fault_type, enum seg_rw, int);
209 209 static void segvn_vpage(struct seg *);
210 210 static size_t segvn_count_swap_by_vpages(struct seg *);
211 211
212 212 static void segvn_purge(struct seg *seg);
213 213 static int segvn_reclaim(void *, caddr_t, size_t, struct page **,
214 214 enum seg_rw, int);
215 215 static int shamp_reclaim(void *, caddr_t, size_t, struct page **,
216 216 enum seg_rw, int);
217 217
218 218 static int sameprot(struct seg *, caddr_t, size_t);
219 219
220 220 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t);
221 221 static int segvn_clrszc(struct seg *);
222 222 static struct seg *segvn_split_seg(struct seg *, caddr_t);
223 223 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t,
224 224 ulong_t, uint_t);
225 225
226 226 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t,
227 227 size_t, void *, u_offset_t);
228 228
229 229 static struct kmem_cache *segvn_cache;
230 230 static struct kmem_cache **segvn_szc_cache;
231 231
232 232 #ifdef VM_STATS
233 233 static struct segvnvmstats_str {
234 234 ulong_t fill_vp_pages[31];
235 235 ulong_t fltvnpages[49];
236 236 ulong_t fullszcpages[10];
237 237 ulong_t relocatepages[3];
238 238 ulong_t fltanpages[17];
239 239 ulong_t pagelock[2];
240 240 ulong_t demoterange[3];
241 241 } segvnvmstats;
242 242 #endif /* VM_STATS */
243 243
244 244 #define SDR_RANGE 1 /* demote entire range */
245 245 #define SDR_END 2 /* demote non aligned ends only */
246 246
247 247 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
248 248 if ((len) != 0) { \
249 249 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
250 250 ASSERT(lpgaddr >= (seg)->s_base); \
251 251 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
252 252 (len)), pgsz); \
253 253 ASSERT(lpgeaddr > lpgaddr); \
254 254 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
255 255 } else { \
256 256 lpgeaddr = lpgaddr = (addr); \
257 257 } \
258 258 }
259 259
260 260 /*ARGSUSED*/
261 261 static int
262 262 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags)
263 263 {
264 264 struct segvn_data *svd = buf;
265 265
266 266 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
267 267 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
268 268 svd->svn_trnext = svd->svn_trprev = NULL;
269 269 return (0);
270 270 }
271 271
272 272 /*ARGSUSED1*/
273 273 static void
274 274 segvn_cache_destructor(void *buf, void *cdrarg)
275 275 {
276 276 struct segvn_data *svd = buf;
277 277
278 278 rw_destroy(&svd->lock);
279 279 mutex_destroy(&svd->segfree_syncmtx);
280 280 }
281 281
282 282 /*ARGSUSED*/
283 283 static int
284 284 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags)
285 285 {
286 286 bzero(buf, sizeof (svntr_t));
287 287 return (0);
288 288 }
289 289
290 290 /*
291 291 * Patching this variable to non-zero allows the system to run with
292 292 * stacks marked as "not executable". It's a bit of a kludge, but is
293 293 * provided as a tweakable for platforms that export those ABIs
294 294 * (e.g. sparc V8) that have executable stacks enabled by default.
295 295 * There are also some restrictions for platforms that don't actually
296 296 * implement 'noexec' protections.
297 297 *
298 298 * Once enabled, the system is (therefore) unable to provide a fully
299 299 * ABI-compliant execution environment, though practically speaking,
300 300 * most everything works. The exceptions are generally some interpreters
301 301 * and debuggers that create executable code on the stack and jump
302 302 * into it (without explicitly mprotecting the address range to include
303 303 * PROT_EXEC).
304 304 *
305 305 * One important class of applications that are disabled are those
306 306 * that have been transformed into malicious agents using one of the
307 307 * numerous "buffer overflow" attacks. See 4007890.
308 308 */
309 309 int noexec_user_stack = 0;
310 310 int noexec_user_stack_log = 1;
311 311
312 312 int segvn_lpg_disable = 0;
313 313 uint_t segvn_maxpgszc = 0;
314 314
315 315 ulong_t segvn_vmpss_clrszc_cnt;
316 316 ulong_t segvn_vmpss_clrszc_err;
317 317 ulong_t segvn_fltvnpages_clrszc_cnt;
318 318 ulong_t segvn_fltvnpages_clrszc_err;
319 319 ulong_t segvn_setpgsz_align_err;
320 320 ulong_t segvn_setpgsz_anon_align_err;
321 321 ulong_t segvn_setpgsz_getattr_err;
322 322 ulong_t segvn_setpgsz_eof_err;
323 323 ulong_t segvn_faultvnmpss_align_err1;
324 324 ulong_t segvn_faultvnmpss_align_err2;
325 325 ulong_t segvn_faultvnmpss_align_err3;
326 326 ulong_t segvn_faultvnmpss_align_err4;
327 327 ulong_t segvn_faultvnmpss_align_err5;
328 328 ulong_t segvn_vmpss_pageio_deadlk_err;
329 329
330 330 int segvn_use_regions = 1;
331 331
332 332 /*
333 333 * Segvn supports text replication optimization for NUMA platforms. Text
334 334 * replica's are represented by anon maps (amp). There's one amp per text file
335 335 * region per lgroup. A process chooses the amp for each of its text mappings
336 336 * based on the lgroup assignment of its main thread (t_tid = 1). All
337 337 * processes that want a replica on a particular lgroup for the same text file
338 338 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
339 339 * with vp,off,size,szc used as a key. Text replication segments are read only
340 340 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
341 341 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
342 342 * pages. Replication amp is assigned to a segment when it gets its first
343 343 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
344 344 * rechecks periodically if the process still maps an amp local to the main
345 345 * thread. If not async thread forces process to remap to an amp in the new
346 346 * home lgroup of the main thread. Current text replication implementation
347 347 * only provides the benefit to workloads that do most of their work in the
348 348 * main thread of a process or all the threads of a process run in the same
349 349 * lgroup. To extend text replication benefit to different types of
350 350 * multithreaded workloads further work would be needed in the hat layer to
351 351 * allow the same virtual address in the same hat to simultaneously map
352 352 * different physical addresses (i.e. page table replication would be needed
353 353 * for x86).
354 354 *
355 355 * amp pages are used instead of vnode pages as long as segment has a very
356 356 * simple life cycle. It's created via segvn_create(), handles S_EXEC
357 357 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
358 358 * happens such as protection is changed, real COW fault happens, pagesize is
359 359 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
360 360 * text replication by converting the segment back to vnode only segment
361 361 * (unmap segment's address range and set svd->amp to NULL).
362 362 *
363 363 * The original file can be changed after amp is inserted into
364 364 * svntr_hashtab. Processes that are launched after the file is already
365 365 * changed can't use the replica's created prior to the file change. To
366 366 * implement this functionality hash entries are timestamped. Replica's can
367 367 * only be used if current file modification time is the same as the timestamp
368 368 * saved when hash entry was created. However just timestamps alone are not
369 369 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
370 370 * deal with file changes via MAP_SHARED mappings differently. When writable
371 371 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
372 372 * existing replica's for this vnode as not usable for future text
373 373 * mappings. And we don't create new replica's for files that currently have
374 374 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
375 375 * true).
376 376 */
377 377
378 378 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
379 379 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR;
380 380
381 381 static ulong_t svntr_hashtab_sz = 512;
382 382 static svntr_bucket_t *svntr_hashtab = NULL;
383 383 static struct kmem_cache *svntr_cache;
384 384 static svntr_stats_t *segvn_textrepl_stats;
385 385 static ksema_t segvn_trasync_sem;
386 386
387 387 int segvn_disable_textrepl = 1;
388 388 size_t textrepl_size_thresh = (size_t)-1;
389 389 size_t segvn_textrepl_bytes = 0;
390 390 size_t segvn_textrepl_max_bytes = 0;
391 391 clock_t segvn_update_textrepl_interval = 0;
392 392 int segvn_update_tr_time = 10;
393 393 int segvn_disable_textrepl_update = 0;
394 394
395 395 static void segvn_textrepl(struct seg *);
396 396 static void segvn_textunrepl(struct seg *, int);
397 397 static void segvn_inval_trcache(vnode_t *);
398 398 static void segvn_trasync_thread(void);
399 399 static void segvn_trupdate_wakeup(void *);
400 400 static void segvn_trupdate(void);
401 401 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *,
402 402 ulong_t);
403 403
404 404 /*
405 405 * Initialize segvn data structures
406 406 */
407 407 void
408 408 segvn_init(void)
409 409 {
410 410 uint_t maxszc;
411 411 uint_t szc;
412 412 size_t pgsz;
413 413
414 414 segvn_cache = kmem_cache_create("segvn_cache",
415 415 sizeof (struct segvn_data), 0,
416 416 segvn_cache_constructor, segvn_cache_destructor, NULL,
417 417 NULL, NULL, 0);
418 418
419 419 if (segvn_lpg_disable == 0) {
420 420 szc = maxszc = page_num_pagesizes() - 1;
421 421 if (szc == 0) {
422 422 segvn_lpg_disable = 1;
423 423 }
424 424 if (page_get_pagesize(0) != PAGESIZE) {
425 425 panic("segvn_init: bad szc 0");
426 426 /*NOTREACHED*/
427 427 }
428 428 while (szc != 0) {
429 429 pgsz = page_get_pagesize(szc);
430 430 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) {
431 431 panic("segvn_init: bad szc %d", szc);
432 432 /*NOTREACHED*/
433 433 }
434 434 szc--;
435 435 }
436 436 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc)
437 437 segvn_maxpgszc = maxszc;
438 438 }
439 439
440 440 if (segvn_maxpgszc) {
441 441 segvn_szc_cache = (struct kmem_cache **)kmem_alloc(
442 442 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *),
443 443 KM_SLEEP);
444 444 }
445 445
446 446 for (szc = 1; szc <= segvn_maxpgszc; szc++) {
447 447 char str[32];
448 448
449 449 (void) sprintf(str, "segvn_szc_cache%d", szc);
450 450 segvn_szc_cache[szc] = kmem_cache_create(str,
451 451 page_get_pagecnt(szc) * sizeof (page_t *), 0,
452 452 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
453 453 }
454 454
455 455
456 456 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL))
457 457 segvn_use_regions = 0;
458 458
459 459 /*
460 460 * For now shared regions and text replication segvn support
461 461 * are mutually exclusive. This is acceptable because
462 462 * currently significant benefit from text replication was
463 463 * only observed on AMD64 NUMA platforms (due to relatively
464 464 * small L2$ size) and currently we don't support shared
465 465 * regions on x86.
466 466 */
467 467 if (segvn_use_regions && !segvn_disable_textrepl) {
468 468 segvn_disable_textrepl = 1;
469 469 }
470 470
471 471 #if defined(_LP64)
472 472 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 &&
473 473 !segvn_disable_textrepl) {
474 474 ulong_t i;
475 475 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t);
476 476
477 477 svntr_cache = kmem_cache_create("svntr_cache",
478 478 sizeof (svntr_t), 0, svntr_cache_constructor, NULL,
479 479 NULL, NULL, NULL, 0);
480 480 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP);
481 481 for (i = 0; i < svntr_hashtab_sz; i++) {
482 482 mutex_init(&svntr_hashtab[i].tr_lock, NULL,
483 483 MUTEX_DEFAULT, NULL);
484 484 }
485 485 segvn_textrepl_max_bytes = ptob(physmem) /
486 486 segvn_textrepl_max_bytes_factor;
487 487 segvn_textrepl_stats = kmem_zalloc(NCPU *
488 488 sizeof (svntr_stats_t), KM_SLEEP);
489 489 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL);
490 490 (void) thread_create(NULL, 0, segvn_trasync_thread,
491 491 NULL, 0, &p0, TS_RUN, minclsyspri);
492 492 }
493 493 #endif
494 494
495 495 if (!ISP2(segvn_pglock_comb_balign) ||
496 496 segvn_pglock_comb_balign < PAGESIZE) {
497 497 segvn_pglock_comb_balign = 1UL << 16; /* 64K */
498 498 }
499 499 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1;
500 500 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign);
501 501 }
502 502
503 503 #define SEGVN_PAGEIO ((void *)0x1)
504 504 #define SEGVN_NOPAGEIO ((void *)0x2)
505 505
506 506 static void
507 507 segvn_setvnode_mpss(vnode_t *vp)
508 508 {
509 509 int err;
510 510
511 511 ASSERT(vp->v_mpssdata == NULL ||
512 512 vp->v_mpssdata == SEGVN_PAGEIO ||
513 513 vp->v_mpssdata == SEGVN_NOPAGEIO);
514 514
515 515 if (vp->v_mpssdata == NULL) {
516 516 if (vn_vmpss_usepageio(vp)) {
517 517 err = VOP_PAGEIO(vp, (page_t *)NULL,
518 518 (u_offset_t)0, 0, 0, CRED(), NULL);
519 519 } else {
520 520 err = ENOSYS;
521 521 }
522 522 /*
523 523 * set v_mpssdata just once per vnode life
524 524 * so that it never changes.
525 525 */
526 526 mutex_enter(&vp->v_lock);
527 527 if (vp->v_mpssdata == NULL) {
528 528 if (err == EINVAL) {
529 529 vp->v_mpssdata = SEGVN_PAGEIO;
530 530 } else {
531 531 vp->v_mpssdata = SEGVN_NOPAGEIO;
532 532 }
533 533 }
534 534 mutex_exit(&vp->v_lock);
535 535 }
536 536 }
537 537
538 538 int
539 539 segvn_create(struct seg *seg, void *argsp)
540 540 {
541 541 struct segvn_crargs *a = (struct segvn_crargs *)argsp;
542 542 struct segvn_data *svd;
543 543 size_t swresv = 0;
544 544 struct cred *cred;
545 545 struct anon_map *amp;
546 546 int error = 0;
547 547 size_t pgsz;
548 548 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT;
549 549 int use_rgn = 0;
550 550 int trok = 0;
551 551
552 552 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
553 553
554 554 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) {
555 555 panic("segvn_create type");
556 556 /*NOTREACHED*/
557 557 }
558 558
559 559 /*
560 560 * Check arguments. If a shared anon structure is given then
561 561 * it is illegal to also specify a vp.
562 562 */
563 563 if (a->amp != NULL && a->vp != NULL) {
564 564 panic("segvn_create anon_map");
565 565 /*NOTREACHED*/
566 566 }
567 567
568 568 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) &&
569 569 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) &&
570 570 segvn_use_regions) {
571 571 use_rgn = 1;
572 572 }
573 573
574 574 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
575 575 if (a->type == MAP_SHARED)
576 576 a->flags &= ~MAP_NORESERVE;
577 577
578 578 if (a->szc != 0) {
579 579 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) ||
580 580 (a->amp != NULL && a->type == MAP_PRIVATE) ||
581 581 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) {
582 582 a->szc = 0;
583 583 } else {
584 584 if (a->szc > segvn_maxpgszc)
585 585 a->szc = segvn_maxpgszc;
586 586 pgsz = page_get_pagesize(a->szc);
587 587 if (!IS_P2ALIGNED(seg->s_base, pgsz) ||
588 588 !IS_P2ALIGNED(seg->s_size, pgsz)) {
589 589 a->szc = 0;
590 590 } else if (a->vp != NULL) {
591 591 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) {
592 592 /*
593 593 * paranoid check.
594 594 * hat_page_demote() is not supported
595 595 * on swapfs pages.
596 596 */
597 597 a->szc = 0;
598 598 } else if (map_addr_vacalign_check(seg->s_base,
599 599 a->offset & PAGEMASK)) {
600 600 a->szc = 0;
601 601 }
602 602 } else if (a->amp != NULL) {
603 603 pgcnt_t anum = btopr(a->offset);
604 604 pgcnt_t pgcnt = page_get_pagecnt(a->szc);
605 605 if (!IS_P2ALIGNED(anum, pgcnt)) {
606 606 a->szc = 0;
607 607 }
608 608 }
609 609 }
610 610 }
611 611
612 612 /*
613 613 * If segment may need private pages, reserve them now.
614 614 */
615 615 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) ||
616 616 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) {
617 617 if (anon_resv_zone(seg->s_size,
618 618 seg->s_as->a_proc->p_zone) == 0)
619 619 return (EAGAIN);
620 620 swresv = seg->s_size;
621 621 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
622 622 seg, swresv, 1);
623 623 }
624 624
625 625 /*
626 626 * Reserve any mapping structures that may be required.
627 627 *
628 628 * Don't do it for segments that may use regions. It's currently a
629 629 * noop in the hat implementations anyway.
630 630 */
631 631 if (!use_rgn) {
632 632 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
633 633 }
634 634
635 635 if (a->cred) {
636 636 cred = a->cred;
637 637 crhold(cred);
638 638 } else {
639 639 crhold(cred = CRED());
640 640 }
641 641
642 642 /* Inform the vnode of the new mapping */
643 643 if (a->vp != NULL) {
644 644 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK,
645 645 seg->s_as, seg->s_base, seg->s_size, a->prot,
646 646 a->maxprot, a->type, cred, NULL);
647 647 if (error) {
648 648 if (swresv != 0) {
649 649 anon_unresv_zone(swresv,
650 650 seg->s_as->a_proc->p_zone);
651 651 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
652 652 "anon proc:%p %lu %u", seg, swresv, 0);
653 653 }
654 654 crfree(cred);
655 655 if (!use_rgn) {
656 656 hat_unload(seg->s_as->a_hat, seg->s_base,
657 657 seg->s_size, HAT_UNLOAD_UNMAP);
658 658 }
659 659 return (error);
660 660 }
661 661 /*
662 662 * svntr_hashtab will be NULL if we support shared regions.
663 663 */
664 664 trok = ((a->flags & MAP_TEXT) &&
665 665 (seg->s_size > textrepl_size_thresh ||
666 666 (a->flags & _MAP_TEXTREPL)) &&
667 667 lgrp_optimizations() && svntr_hashtab != NULL &&
668 668 a->type == MAP_PRIVATE && swresv == 0 &&
669 669 !(a->flags & MAP_NORESERVE) &&
670 670 seg->s_as != &kas && a->vp->v_type == VREG);
671 671
672 672 ASSERT(!trok || !use_rgn);
673 673 }
674 674
675 675 /*
676 676 * MAP_NORESERVE mappings don't count towards the VSZ of a process
677 677 * until we fault the pages in.
678 678 */
679 679 if ((a->vp == NULL || a->vp->v_type != VREG) &&
680 680 a->flags & MAP_NORESERVE) {
681 681 seg->s_as->a_resvsize -= seg->s_size;
682 682 }
683 683
684 684 /*
685 685 * If more than one segment in the address space, and they're adjacent
686 686 * virtually, try to concatenate them. Don't concatenate if an
687 687 * explicit anon_map structure was supplied (e.g., SystemV shared
688 688 * memory) or if we'll use text replication for this segment.
689 689 */
690 690 if (a->amp == NULL && !use_rgn && !trok) {
691 691 struct seg *pseg, *nseg;
692 692 struct segvn_data *psvd, *nsvd;
693 693 lgrp_mem_policy_t ppolicy, npolicy;
694 694 uint_t lgrp_mem_policy_flags = 0;
695 695 extern lgrp_mem_policy_t lgrp_mem_default_policy;
696 696
697 697 /*
698 698 * Memory policy flags (lgrp_mem_policy_flags) is valid when
699 699 * extending stack/heap segments.
700 700 */
701 701 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) &&
702 702 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) {
703 703 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags;
704 704 } else {
705 705 /*
706 706 * Get policy when not extending it from another segment
707 707 */
708 708 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type);
709 709 }
710 710
711 711 /*
712 712 * First, try to concatenate the previous and new segments
713 713 */
714 714 pseg = AS_SEGPREV(seg->s_as, seg);
715 715 if (pseg != NULL &&
716 716 pseg->s_base + pseg->s_size == seg->s_base &&
717 717 pseg->s_ops == &segvn_ops) {
718 718 /*
719 719 * Get memory allocation policy from previous segment.
720 720 * When extension is specified (e.g. for heap) apply
721 721 * this policy to the new segment regardless of the
722 722 * outcome of segment concatenation. Extension occurs
723 723 * for non-default policy otherwise default policy is
724 724 * used and is based on extended segment size.
725 725 */
726 726 psvd = (struct segvn_data *)pseg->s_data;
727 727 ppolicy = psvd->policy_info.mem_policy;
728 728 if (lgrp_mem_policy_flags ==
729 729 LGRP_MP_FLAG_EXTEND_UP) {
730 730 if (ppolicy != lgrp_mem_default_policy) {
731 731 mpolicy = ppolicy;
732 732 } else {
733 733 mpolicy = lgrp_mem_policy_default(
734 734 pseg->s_size + seg->s_size,
735 735 a->type);
736 736 }
737 737 }
738 738
739 739 if (mpolicy == ppolicy &&
740 740 (pseg->s_size + seg->s_size <=
741 741 segvn_comb_thrshld || psvd->amp == NULL) &&
742 742 segvn_extend_prev(pseg, seg, a, swresv) == 0) {
743 743 /*
744 744 * success! now try to concatenate
745 745 * with following seg
746 746 */
747 747 crfree(cred);
748 748 nseg = AS_SEGNEXT(pseg->s_as, pseg);
749 749 if (nseg != NULL &&
750 750 nseg != pseg &&
751 751 nseg->s_ops == &segvn_ops &&
752 752 pseg->s_base + pseg->s_size ==
753 753 nseg->s_base)
754 754 (void) segvn_concat(pseg, nseg, 0);
755 755 ASSERT(pseg->s_szc == 0 ||
756 756 (a->szc == pseg->s_szc &&
757 757 IS_P2ALIGNED(pseg->s_base, pgsz) &&
758 758 IS_P2ALIGNED(pseg->s_size, pgsz)));
759 759 return (0);
760 760 }
761 761 }
762 762
763 763 /*
764 764 * Failed, so try to concatenate with following seg
765 765 */
766 766 nseg = AS_SEGNEXT(seg->s_as, seg);
767 767 if (nseg != NULL &&
768 768 seg->s_base + seg->s_size == nseg->s_base &&
769 769 nseg->s_ops == &segvn_ops) {
770 770 /*
771 771 * Get memory allocation policy from next segment.
772 772 * When extension is specified (e.g. for stack) apply
773 773 * this policy to the new segment regardless of the
774 774 * outcome of segment concatenation. Extension occurs
775 775 * for non-default policy otherwise default policy is
776 776 * used and is based on extended segment size.
777 777 */
778 778 nsvd = (struct segvn_data *)nseg->s_data;
779 779 npolicy = nsvd->policy_info.mem_policy;
780 780 if (lgrp_mem_policy_flags ==
781 781 LGRP_MP_FLAG_EXTEND_DOWN) {
782 782 if (npolicy != lgrp_mem_default_policy) {
783 783 mpolicy = npolicy;
784 784 } else {
785 785 mpolicy = lgrp_mem_policy_default(
786 786 nseg->s_size + seg->s_size,
787 787 a->type);
788 788 }
789 789 }
790 790
791 791 if (mpolicy == npolicy &&
792 792 segvn_extend_next(seg, nseg, a, swresv) == 0) {
793 793 crfree(cred);
794 794 ASSERT(nseg->s_szc == 0 ||
795 795 (a->szc == nseg->s_szc &&
796 796 IS_P2ALIGNED(nseg->s_base, pgsz) &&
797 797 IS_P2ALIGNED(nseg->s_size, pgsz)));
798 798 return (0);
799 799 }
800 800 }
801 801 }
802 802
803 803 if (a->vp != NULL) {
804 804 VN_HOLD(a->vp);
805 805 if (a->type == MAP_SHARED)
806 806 lgrp_shm_policy_init(NULL, a->vp);
807 807 }
808 808 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
809 809
810 810 seg->s_ops = &segvn_ops;
811 811 seg->s_data = (void *)svd;
812 812 seg->s_szc = a->szc;
813 813
814 814 svd->seg = seg;
815 815 svd->vp = a->vp;
816 816 /*
817 817 * Anonymous mappings have no backing file so the offset is meaningless.
818 818 */
819 819 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0;
820 820 svd->prot = a->prot;
821 821 svd->maxprot = a->maxprot;
822 822 svd->pageprot = 0;
823 823 svd->type = a->type;
824 824 svd->vpage = NULL;
825 825 svd->cred = cred;
826 826 svd->advice = MADV_NORMAL;
827 827 svd->pageadvice = 0;
828 828 svd->flags = (ushort_t)a->flags;
829 829 svd->softlockcnt = 0;
830 830 svd->softlockcnt_sbase = 0;
831 831 svd->softlockcnt_send = 0;
832 832 svd->svn_inz = 0;
833 833 svd->rcookie = HAT_INVALID_REGION_COOKIE;
834 834 svd->pageswap = 0;
835 835
836 836 if (a->szc != 0 && a->vp != NULL) {
837 837 segvn_setvnode_mpss(a->vp);
838 838 }
839 839 if (svd->type == MAP_SHARED && svd->vp != NULL &&
840 840 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) {
841 841 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
842 842 segvn_inval_trcache(svd->vp);
843 843 }
844 844
845 845 amp = a->amp;
846 846 if ((svd->amp = amp) == NULL) {
847 847 svd->anon_index = 0;
848 848 if (svd->type == MAP_SHARED) {
849 849 svd->swresv = 0;
850 850 /*
851 851 * Shared mappings to a vp need no other setup.
852 852 * If we have a shared mapping to an anon_map object
853 853 * which hasn't been allocated yet, allocate the
854 854 * struct now so that it will be properly shared
855 855 * by remembering the swap reservation there.
856 856 */
857 857 if (a->vp == NULL) {
858 858 svd->amp = anonmap_alloc(seg->s_size, swresv,
859 859 ANON_SLEEP);
860 860 svd->amp->a_szc = seg->s_szc;
861 861 }
862 862 } else {
863 863 /*
864 864 * Private mapping (with or without a vp).
865 865 * Allocate anon_map when needed.
866 866 */
867 867 svd->swresv = swresv;
868 868 }
869 869 } else {
870 870 pgcnt_t anon_num;
871 871
872 872 /*
873 873 * Mapping to an existing anon_map structure without a vp.
874 874 * For now we will insure that the segment size isn't larger
875 875 * than the size - offset gives us. Later on we may wish to
876 876 * have the anon array dynamically allocated itself so that
877 877 * we don't always have to allocate all the anon pointer slots.
878 878 * This of course involves adding extra code to check that we
879 879 * aren't trying to use an anon pointer slot beyond the end
880 880 * of the currently allocated anon array.
881 881 */
882 882 if ((amp->size - a->offset) < seg->s_size) {
883 883 panic("segvn_create anon_map size");
884 884 /*NOTREACHED*/
885 885 }
886 886
887 887 anon_num = btopr(a->offset);
888 888
889 889 if (a->type == MAP_SHARED) {
890 890 /*
891 891 * SHARED mapping to a given anon_map.
892 892 */
893 893 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
894 894 amp->refcnt++;
895 895 if (a->szc > amp->a_szc) {
896 896 amp->a_szc = a->szc;
897 897 }
898 898 ANON_LOCK_EXIT(&->a_rwlock);
899 899 svd->anon_index = anon_num;
900 900 svd->swresv = 0;
901 901 } else {
902 902 /*
903 903 * PRIVATE mapping to a given anon_map.
904 904 * Make sure that all the needed anon
905 905 * structures are created (so that we will
906 906 * share the underlying pages if nothing
907 907 * is written by this mapping) and then
908 908 * duplicate the anon array as is done
909 909 * when a privately mapped segment is dup'ed.
910 910 */
911 911 struct anon *ap;
912 912 caddr_t addr;
913 913 caddr_t eaddr;
914 914 ulong_t anon_idx;
915 915 int hat_flag = HAT_LOAD;
916 916
917 917 if (svd->flags & MAP_TEXT) {
918 918 hat_flag |= HAT_LOAD_TEXT;
919 919 }
920 920
921 921 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
922 922 svd->amp->a_szc = seg->s_szc;
923 923 svd->anon_index = 0;
924 924 svd->swresv = swresv;
925 925
926 926 /*
927 927 * Prevent 2 threads from allocating anon
928 928 * slots simultaneously.
929 929 */
930 930 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
931 931 eaddr = seg->s_base + seg->s_size;
932 932
933 933 for (anon_idx = anon_num, addr = seg->s_base;
934 934 addr < eaddr; addr += PAGESIZE, anon_idx++) {
935 935 page_t *pp;
936 936
937 937 if ((ap = anon_get_ptr(amp->ahp,
938 938 anon_idx)) != NULL)
939 939 continue;
940 940
941 941 /*
942 942 * Allocate the anon struct now.
943 943 * Might as well load up translation
944 944 * to the page while we're at it...
945 945 */
946 946 pp = anon_zero(seg, addr, &ap, cred);
947 947 if (ap == NULL || pp == NULL) {
948 948 panic("segvn_create anon_zero");
949 949 /*NOTREACHED*/
950 950 }
951 951
952 952 /*
953 953 * Re-acquire the anon_map lock and
954 954 * initialize the anon array entry.
955 955 */
956 956 ASSERT(anon_get_ptr(amp->ahp,
957 957 anon_idx) == NULL);
958 958 (void) anon_set_ptr(amp->ahp, anon_idx, ap,
959 959 ANON_SLEEP);
960 960
961 961 ASSERT(seg->s_szc == 0);
962 962 ASSERT(!IS_VMODSORT(pp->p_vnode));
963 963
964 964 ASSERT(use_rgn == 0);
965 965 hat_memload(seg->s_as->a_hat, addr, pp,
966 966 svd->prot & ~PROT_WRITE, hat_flag);
967 967
968 968 page_unlock(pp);
969 969 }
970 970 ASSERT(seg->s_szc == 0);
971 971 anon_dup(amp->ahp, anon_num, svd->amp->ahp,
972 972 0, seg->s_size);
973 973 ANON_LOCK_EXIT(&->a_rwlock);
974 974 }
975 975 }
976 976
977 977 /*
978 978 * Set default memory allocation policy for segment
979 979 *
980 980 * Always set policy for private memory at least for initialization
981 981 * even if this is a shared memory segment
982 982 */
983 983 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size);
984 984
985 985 if (svd->type == MAP_SHARED)
986 986 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index,
987 987 svd->vp, svd->offset, seg->s_size);
988 988
989 989 if (use_rgn) {
990 990 ASSERT(!trok);
991 991 ASSERT(svd->amp == NULL);
992 992 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base,
993 993 seg->s_size, (void *)svd->vp, svd->offset, svd->prot,
994 994 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback,
995 995 HAT_REGION_TEXT);
996 996 }
997 997
998 998 ASSERT(!trok || !(svd->prot & PROT_WRITE));
999 999 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF;
1000 1000
1001 1001 return (0);
1002 1002 }
1003 1003
1004 1004 /*
1005 1005 * Concatenate two existing segments, if possible.
1006 1006 * Return 0 on success, -1 if two segments are not compatible
1007 1007 * or -2 on memory allocation failure.
1008 1008 * If amp_cat == 1 then try and concat segments with anon maps
1009 1009 */
1010 1010 static int
1011 1011 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat)
1012 1012 {
1013 1013 struct segvn_data *svd1 = seg1->s_data;
1014 1014 struct segvn_data *svd2 = seg2->s_data;
1015 1015 struct anon_map *amp1 = svd1->amp;
1016 1016 struct anon_map *amp2 = svd2->amp;
1017 1017 struct vpage *vpage1 = svd1->vpage;
1018 1018 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL;
1019 1019 size_t size, nvpsize;
1020 1020 pgcnt_t npages1, npages2;
1021 1021
1022 1022 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as);
1023 1023 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1024 1024 ASSERT(seg1->s_ops == seg2->s_ops);
1025 1025
1026 1026 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) ||
1027 1027 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1028 1028 return (-1);
1029 1029 }
1030 1030
1031 1031 /* both segments exist, try to merge them */
1032 1032 #define incompat(x) (svd1->x != svd2->x)
1033 1033 if (incompat(vp) || incompat(maxprot) ||
1034 1034 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) ||
1035 1035 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) ||
1036 1036 incompat(type) || incompat(cred) || incompat(flags) ||
1037 1037 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) ||
1038 1038 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0)
1039 1039 return (-1);
1040 1040 #undef incompat
1041 1041
1042 1042 /*
1043 1043 * vp == NULL implies zfod, offset doesn't matter
1044 1044 */
1045 1045 if (svd1->vp != NULL &&
1046 1046 svd1->offset + seg1->s_size != svd2->offset) {
1047 1047 return (-1);
1048 1048 }
1049 1049
1050 1050 /*
1051 1051 * Don't concatenate if either segment uses text replication.
1052 1052 */
1053 1053 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) {
1054 1054 return (-1);
1055 1055 }
1056 1056
1057 1057 /*
1058 1058 * Fail early if we're not supposed to concatenate
1059 1059 * segments with non NULL amp.
1060 1060 */
1061 1061 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) {
1062 1062 return (-1);
1063 1063 }
1064 1064
1065 1065 if (svd1->vp == NULL && svd1->type == MAP_SHARED) {
1066 1066 if (amp1 != amp2) {
1067 1067 return (-1);
1068 1068 }
1069 1069 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) !=
1070 1070 svd2->anon_index) {
1071 1071 return (-1);
1072 1072 }
1073 1073 ASSERT(amp1 == NULL || amp1->refcnt >= 2);
1074 1074 }
1075 1075
1076 1076 /*
1077 1077 * If either seg has vpages, create a new merged vpage array.
1078 1078 */
1079 1079 if (vpage1 != NULL || vpage2 != NULL) {
1080 1080 struct vpage *vp, *evp;
1081 1081
1082 1082 npages1 = seg_pages(seg1);
1083 1083 npages2 = seg_pages(seg2);
1084 1084 nvpsize = vpgtob(npages1 + npages2);
1085 1085
1086 1086 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) {
1087 1087 return (-2);
1088 1088 }
1089 1089
1090 1090 if (vpage1 != NULL) {
1091 1091 bcopy(vpage1, nvpage, vpgtob(npages1));
1092 1092 } else {
1093 1093 evp = nvpage + npages1;
1094 1094 for (vp = nvpage; vp < evp; vp++) {
1095 1095 VPP_SETPROT(vp, svd1->prot);
1096 1096 VPP_SETADVICE(vp, svd1->advice);
1097 1097 }
1098 1098 }
1099 1099
1100 1100 if (vpage2 != NULL) {
1101 1101 bcopy(vpage2, nvpage + npages1, vpgtob(npages2));
1102 1102 } else {
1103 1103 evp = nvpage + npages1 + npages2;
1104 1104 for (vp = nvpage + npages1; vp < evp; vp++) {
1105 1105 VPP_SETPROT(vp, svd2->prot);
1106 1106 VPP_SETADVICE(vp, svd2->advice);
1107 1107 }
1108 1108 }
1109 1109
1110 1110 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) {
1111 1111 ASSERT(svd1->swresv == seg1->s_size);
1112 1112 ASSERT(!(svd1->flags & MAP_NORESERVE));
1113 1113 ASSERT(!(svd2->flags & MAP_NORESERVE));
1114 1114 evp = nvpage + npages1;
1115 1115 for (vp = nvpage; vp < evp; vp++) {
1116 1116 VPP_SETSWAPRES(vp);
1117 1117 }
1118 1118 }
1119 1119
1120 1120 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) {
1121 1121 ASSERT(svd2->swresv == seg2->s_size);
1122 1122 ASSERT(!(svd1->flags & MAP_NORESERVE));
1123 1123 ASSERT(!(svd2->flags & MAP_NORESERVE));
1124 1124 vp = nvpage + npages1;
1125 1125 evp = vp + npages2;
1126 1126 for (; vp < evp; vp++) {
1127 1127 VPP_SETSWAPRES(vp);
1128 1128 }
1129 1129 }
1130 1130 }
1131 1131 ASSERT((vpage1 != NULL || vpage2 != NULL) ||
1132 1132 (svd1->pageswap == 0 && svd2->pageswap == 0));
1133 1133
1134 1134 /*
1135 1135 * If either segment has private pages, create a new merged anon
1136 1136 * array. If mergeing shared anon segments just decrement anon map's
1137 1137 * refcnt.
1138 1138 */
1139 1139 if (amp1 != NULL && svd1->type == MAP_SHARED) {
1140 1140 ASSERT(amp1 == amp2 && svd1->vp == NULL);
1141 1141 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1142 1142 ASSERT(amp1->refcnt >= 2);
1143 1143 amp1->refcnt--;
1144 1144 ANON_LOCK_EXIT(&1->a_rwlock);
1145 1145 svd2->amp = NULL;
1146 1146 } else if (amp1 != NULL || amp2 != NULL) {
1147 1147 struct anon_hdr *nahp;
1148 1148 struct anon_map *namp = NULL;
1149 1149 size_t asize;
1150 1150
1151 1151 ASSERT(svd1->type == MAP_PRIVATE);
1152 1152
1153 1153 asize = seg1->s_size + seg2->s_size;
1154 1154 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) {
1155 1155 if (nvpage != NULL) {
1156 1156 kmem_free(nvpage, nvpsize);
1157 1157 }
1158 1158 return (-2);
1159 1159 }
1160 1160 if (amp1 != NULL) {
1161 1161 /*
1162 1162 * XXX anon rwlock is not really needed because
1163 1163 * this is a private segment and we are writers.
1164 1164 */
1165 1165 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1166 1166 ASSERT(amp1->refcnt == 1);
1167 1167 if (anon_copy_ptr(amp1->ahp, svd1->anon_index,
1168 1168 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) {
1169 1169 anon_release(nahp, btop(asize));
1170 1170 ANON_LOCK_EXIT(&1->a_rwlock);
1171 1171 if (nvpage != NULL) {
1172 1172 kmem_free(nvpage, nvpsize);
1173 1173 }
1174 1174 return (-2);
1175 1175 }
1176 1176 }
1177 1177 if (amp2 != NULL) {
1178 1178 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1179 1179 ASSERT(amp2->refcnt == 1);
1180 1180 if (anon_copy_ptr(amp2->ahp, svd2->anon_index,
1181 1181 nahp, btop(seg1->s_size), btop(seg2->s_size),
1182 1182 ANON_NOSLEEP)) {
1183 1183 anon_release(nahp, btop(asize));
1184 1184 ANON_LOCK_EXIT(&2->a_rwlock);
1185 1185 if (amp1 != NULL) {
1186 1186 ANON_LOCK_EXIT(&1->a_rwlock);
1187 1187 }
1188 1188 if (nvpage != NULL) {
1189 1189 kmem_free(nvpage, nvpsize);
1190 1190 }
1191 1191 return (-2);
1192 1192 }
1193 1193 }
1194 1194 if (amp1 != NULL) {
1195 1195 namp = amp1;
1196 1196 anon_release(amp1->ahp, btop(amp1->size));
1197 1197 }
1198 1198 if (amp2 != NULL) {
1199 1199 if (namp == NULL) {
1200 1200 ASSERT(amp1 == NULL);
1201 1201 namp = amp2;
1202 1202 anon_release(amp2->ahp, btop(amp2->size));
1203 1203 } else {
1204 1204 amp2->refcnt--;
1205 1205 ANON_LOCK_EXIT(&2->a_rwlock);
1206 1206 anonmap_free(amp2);
1207 1207 }
1208 1208 svd2->amp = NULL; /* needed for seg_free */
1209 1209 }
1210 1210 namp->ahp = nahp;
1211 1211 namp->size = asize;
1212 1212 svd1->amp = namp;
1213 1213 svd1->anon_index = 0;
1214 1214 ANON_LOCK_EXIT(&namp->a_rwlock);
1215 1215 }
1216 1216 /*
1217 1217 * Now free the old vpage structures.
1218 1218 */
1219 1219 if (nvpage != NULL) {
1220 1220 if (vpage1 != NULL) {
1221 1221 kmem_free(vpage1, vpgtob(npages1));
1222 1222 }
1223 1223 if (vpage2 != NULL) {
1224 1224 svd2->vpage = NULL;
1225 1225 kmem_free(vpage2, vpgtob(npages2));
1226 1226 }
1227 1227 if (svd2->pageprot) {
1228 1228 svd1->pageprot = 1;
1229 1229 }
1230 1230 if (svd2->pageadvice) {
1231 1231 svd1->pageadvice = 1;
1232 1232 }
1233 1233 if (svd2->pageswap) {
1234 1234 svd1->pageswap = 1;
1235 1235 }
1236 1236 svd1->vpage = nvpage;
1237 1237 }
1238 1238
1239 1239 /* all looks ok, merge segments */
1240 1240 svd1->swresv += svd2->swresv;
1241 1241 svd2->swresv = 0; /* so seg_free doesn't release swap space */
1242 1242 size = seg2->s_size;
1243 1243 seg_free(seg2);
1244 1244 seg1->s_size += size;
1245 1245 return (0);
1246 1246 }
1247 1247
1248 1248 /*
1249 1249 * Extend the previous segment (seg1) to include the
1250 1250 * new segment (seg2 + a), if possible.
1251 1251 * Return 0 on success.
1252 1252 */
1253 1253 static int
1254 1254 segvn_extend_prev(seg1, seg2, a, swresv)
1255 1255 struct seg *seg1, *seg2;
1256 1256 struct segvn_crargs *a;
1257 1257 size_t swresv;
1258 1258 {
1259 1259 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data;
1260 1260 size_t size;
1261 1261 struct anon_map *amp1;
1262 1262 struct vpage *new_vpage;
1263 1263
1264 1264 /*
1265 1265 * We don't need any segment level locks for "segvn" data
1266 1266 * since the address space is "write" locked.
1267 1267 */
1268 1268 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1269 1269
1270 1270 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) {
1271 1271 return (-1);
1272 1272 }
1273 1273
1274 1274 /* second segment is new, try to extend first */
1275 1275 /* XXX - should also check cred */
1276 1276 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot ||
1277 1277 (!svd1->pageprot && (svd1->prot != a->prot)) ||
1278 1278 svd1->type != a->type || svd1->flags != a->flags ||
1279 1279 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0)
1280 1280 return (-1);
1281 1281
1282 1282 /* vp == NULL implies zfod, offset doesn't matter */
1283 1283 if (svd1->vp != NULL &&
1284 1284 svd1->offset + seg1->s_size != (a->offset & PAGEMASK))
1285 1285 return (-1);
1286 1286
1287 1287 if (svd1->tr_state != SEGVN_TR_OFF) {
1288 1288 return (-1);
1289 1289 }
1290 1290
1291 1291 amp1 = svd1->amp;
1292 1292 if (amp1) {
1293 1293 pgcnt_t newpgs;
1294 1294
1295 1295 /*
1296 1296 * Segment has private pages, can data structures
1297 1297 * be expanded?
1298 1298 *
1299 1299 * Acquire the anon_map lock to prevent it from changing,
1300 1300 * if it is shared. This ensures that the anon_map
1301 1301 * will not change while a thread which has a read/write
1302 1302 * lock on an address space references it.
1303 1303 * XXX - Don't need the anon_map lock at all if "refcnt"
1304 1304 * is 1.
1305 1305 *
1306 1306 * Can't grow a MAP_SHARED segment with an anonmap because
1307 1307 * there may be existing anon slots where we want to extend
1308 1308 * the segment and we wouldn't know what to do with them
1309 1309 * (e.g., for tmpfs right thing is to just leave them there,
1310 1310 * for /dev/zero they should be cleared out).
1311 1311 */
1312 1312 if (svd1->type == MAP_SHARED)
1313 1313 return (-1);
1314 1314
1315 1315 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1316 1316 if (amp1->refcnt > 1) {
1317 1317 ANON_LOCK_EXIT(&1->a_rwlock);
1318 1318 return (-1);
1319 1319 }
1320 1320 newpgs = anon_grow(amp1->ahp, &svd1->anon_index,
1321 1321 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP);
1322 1322
1323 1323 if (newpgs == 0) {
1324 1324 ANON_LOCK_EXIT(&1->a_rwlock);
1325 1325 return (-1);
1326 1326 }
1327 1327 amp1->size = ptob(newpgs);
1328 1328 ANON_LOCK_EXIT(&1->a_rwlock);
1329 1329 }
1330 1330 if (svd1->vpage != NULL) {
1331 1331 struct vpage *vp, *evp;
1332 1332 new_vpage =
1333 1333 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1334 1334 KM_NOSLEEP);
1335 1335 if (new_vpage == NULL)
1336 1336 return (-1);
1337 1337 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1)));
1338 1338 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1)));
1339 1339 svd1->vpage = new_vpage;
1340 1340
1341 1341 vp = new_vpage + seg_pages(seg1);
1342 1342 evp = vp + seg_pages(seg2);
1343 1343 for (; vp < evp; vp++)
1344 1344 VPP_SETPROT(vp, a->prot);
1345 1345 if (svd1->pageswap && swresv) {
1346 1346 ASSERT(!(svd1->flags & MAP_NORESERVE));
1347 1347 ASSERT(swresv == seg2->s_size);
1348 1348 vp = new_vpage + seg_pages(seg1);
1349 1349 for (; vp < evp; vp++) {
1350 1350 VPP_SETSWAPRES(vp);
1351 1351 }
1352 1352 }
1353 1353 }
1354 1354 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0);
1355 1355 size = seg2->s_size;
1356 1356 seg_free(seg2);
1357 1357 seg1->s_size += size;
1358 1358 svd1->swresv += swresv;
1359 1359 if (svd1->pageprot && (a->prot & PROT_WRITE) &&
1360 1360 svd1->type == MAP_SHARED && svd1->vp != NULL &&
1361 1361 (svd1->vp->v_flag & VVMEXEC)) {
1362 1362 ASSERT(vn_is_mapped(svd1->vp, V_WRITE));
1363 1363 segvn_inval_trcache(svd1->vp);
1364 1364 }
1365 1365 return (0);
1366 1366 }
1367 1367
1368 1368 /*
1369 1369 * Extend the next segment (seg2) to include the
1370 1370 * new segment (seg1 + a), if possible.
1371 1371 * Return 0 on success.
1372 1372 */
1373 1373 static int
1374 1374 segvn_extend_next(
1375 1375 struct seg *seg1,
1376 1376 struct seg *seg2,
1377 1377 struct segvn_crargs *a,
1378 1378 size_t swresv)
1379 1379 {
1380 1380 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data;
1381 1381 size_t size;
1382 1382 struct anon_map *amp2;
1383 1383 struct vpage *new_vpage;
1384 1384
1385 1385 /*
1386 1386 * We don't need any segment level locks for "segvn" data
1387 1387 * since the address space is "write" locked.
1388 1388 */
1389 1389 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock));
1390 1390
1391 1391 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1392 1392 return (-1);
1393 1393 }
1394 1394
1395 1395 /* first segment is new, try to extend second */
1396 1396 /* XXX - should also check cred */
1397 1397 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot ||
1398 1398 (!svd2->pageprot && (svd2->prot != a->prot)) ||
1399 1399 svd2->type != a->type || svd2->flags != a->flags ||
1400 1400 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0)
1401 1401 return (-1);
1402 1402 /* vp == NULL implies zfod, offset doesn't matter */
1403 1403 if (svd2->vp != NULL &&
1404 1404 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset)
1405 1405 return (-1);
1406 1406
1407 1407 if (svd2->tr_state != SEGVN_TR_OFF) {
1408 1408 return (-1);
1409 1409 }
1410 1410
1411 1411 amp2 = svd2->amp;
1412 1412 if (amp2) {
1413 1413 pgcnt_t newpgs;
1414 1414
1415 1415 /*
1416 1416 * Segment has private pages, can data structures
1417 1417 * be expanded?
1418 1418 *
1419 1419 * Acquire the anon_map lock to prevent it from changing,
1420 1420 * if it is shared. This ensures that the anon_map
1421 1421 * will not change while a thread which has a read/write
1422 1422 * lock on an address space references it.
1423 1423 *
1424 1424 * XXX - Don't need the anon_map lock at all if "refcnt"
1425 1425 * is 1.
1426 1426 */
1427 1427 if (svd2->type == MAP_SHARED)
1428 1428 return (-1);
1429 1429
1430 1430 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1431 1431 if (amp2->refcnt > 1) {
1432 1432 ANON_LOCK_EXIT(&2->a_rwlock);
1433 1433 return (-1);
1434 1434 }
1435 1435 newpgs = anon_grow(amp2->ahp, &svd2->anon_index,
1436 1436 btop(seg2->s_size), btop(seg1->s_size),
1437 1437 ANON_NOSLEEP | ANON_GROWDOWN);
1438 1438
1439 1439 if (newpgs == 0) {
1440 1440 ANON_LOCK_EXIT(&2->a_rwlock);
1441 1441 return (-1);
1442 1442 }
1443 1443 amp2->size = ptob(newpgs);
1444 1444 ANON_LOCK_EXIT(&2->a_rwlock);
1445 1445 }
1446 1446 if (svd2->vpage != NULL) {
1447 1447 struct vpage *vp, *evp;
1448 1448 new_vpage =
1449 1449 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1450 1450 KM_NOSLEEP);
1451 1451 if (new_vpage == NULL) {
1452 1452 /* Not merging segments so adjust anon_index back */
1453 1453 if (amp2)
1454 1454 svd2->anon_index += seg_pages(seg1);
1455 1455 return (-1);
1456 1456 }
1457 1457 bcopy(svd2->vpage, new_vpage + seg_pages(seg1),
1458 1458 vpgtob(seg_pages(seg2)));
1459 1459 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2)));
1460 1460 svd2->vpage = new_vpage;
1461 1461
1462 1462 vp = new_vpage;
1463 1463 evp = vp + seg_pages(seg1);
1464 1464 for (; vp < evp; vp++)
1465 1465 VPP_SETPROT(vp, a->prot);
1466 1466 if (svd2->pageswap && swresv) {
1467 1467 ASSERT(!(svd2->flags & MAP_NORESERVE));
1468 1468 ASSERT(swresv == seg1->s_size);
1469 1469 vp = new_vpage;
1470 1470 for (; vp < evp; vp++) {
1471 1471 VPP_SETSWAPRES(vp);
1472 1472 }
1473 1473 }
1474 1474 }
1475 1475 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0);
1476 1476 size = seg1->s_size;
1477 1477 seg_free(seg1);
1478 1478 seg2->s_size += size;
1479 1479 seg2->s_base -= size;
1480 1480 svd2->offset -= size;
1481 1481 svd2->swresv += swresv;
1482 1482 if (svd2->pageprot && (a->prot & PROT_WRITE) &&
1483 1483 svd2->type == MAP_SHARED && svd2->vp != NULL &&
1484 1484 (svd2->vp->v_flag & VVMEXEC)) {
1485 1485 ASSERT(vn_is_mapped(svd2->vp, V_WRITE));
1486 1486 segvn_inval_trcache(svd2->vp);
1487 1487 }
1488 1488 return (0);
1489 1489 }
1490 1490
1491 1491 /*
1492 1492 * Duplicate all the pages in the segment. This may break COW sharing for a
1493 1493 * given page. If the page is marked with inherit zero set, then instead of
1494 1494 * duplicating the page, we zero the page.
1495 1495 */
1496 1496 static int
1497 1497 segvn_dup_pages(struct seg *seg, struct seg *newseg)
1498 1498 {
1499 1499 int error;
1500 1500 uint_t prot;
1501 1501 page_t *pp;
1502 1502 struct anon *ap, *newap;
1503 1503 size_t i;
1504 1504 caddr_t addr;
1505 1505
1506 1506 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1507 1507 struct segvn_data *newsvd = (struct segvn_data *)newseg->s_data;
1508 1508 ulong_t old_idx = svd->anon_index;
1509 1509 ulong_t new_idx = 0;
1510 1510
1511 1511 i = btopr(seg->s_size);
1512 1512 addr = seg->s_base;
1513 1513
1514 1514 /*
1515 1515 * XXX break cow sharing using PAGESIZE
1516 1516 * pages. They will be relocated into larger
1517 1517 * pages at fault time.
1518 1518 */
1519 1519 while (i-- > 0) {
1520 1520 if ((ap = anon_get_ptr(svd->amp->ahp, old_idx)) != NULL) {
1521 1521 struct vpage *vpp;
1522 1522
1523 1523 vpp = &svd->vpage[seg_page(seg, addr)];
1524 1524
1525 1525 /*
1526 1526 * prot need not be computed below 'cause anon_private
1527 1527 * is going to ignore it anyway as child doesn't inherit
1528 1528 * pagelock from parent.
1529 1529 */
1530 1530 prot = svd->pageprot ? VPP_PROT(vpp) : svd->prot;
1531 1531
1532 1532 /*
1533 1533 * Check whether we should zero this or dup it.
1534 1534 */
1535 1535 if (svd->svn_inz == SEGVN_INZ_ALL ||
1536 1536 (svd->svn_inz == SEGVN_INZ_VPP &&
1537 1537 VPP_ISINHZERO(vpp))) {
1538 1538 pp = anon_zero(newseg, addr, &newap,
1539 1539 newsvd->cred);
1540 1540 } else {
1541 1541 page_t *anon_pl[1+1];
1542 1542 uint_t vpprot;
1543 1543 error = anon_getpage(&ap, &vpprot, anon_pl,
1544 1544 PAGESIZE, seg, addr, S_READ, svd->cred);
1545 1545 if (error != 0)
1546 1546 return (error);
1547 1547
1548 1548 pp = anon_private(&newap, newseg, addr, prot,
1549 1549 anon_pl[0], 0, newsvd->cred);
1550 1550 }
1551 1551 if (pp == NULL) {
1552 1552 return (ENOMEM);
1553 1553 }
1554 1554 (void) anon_set_ptr(newsvd->amp->ahp, new_idx, newap,
1555 1555 ANON_SLEEP);
1556 1556 page_unlock(pp);
1557 1557 }
1558 1558 addr += PAGESIZE;
1559 1559 old_idx++;
1560 1560 new_idx++;
1561 1561 }
1562 1562
1563 1563 return (0);
1564 1564 }
1565 1565
1566 1566 static int
1567 1567 segvn_dup(struct seg *seg, struct seg *newseg)
1568 1568 {
1569 1569 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1570 1570 struct segvn_data *newsvd;
1571 1571 pgcnt_t npages = seg_pages(seg);
1572 1572 int error = 0;
1573 1573 size_t len;
1574 1574 struct anon_map *amp;
1575 1575
1576 1576 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1577 1577 ASSERT(newseg->s_as->a_proc->p_parent == curproc);
1578 1578
1579 1579 /*
1580 1580 * If segment has anon reserved, reserve more for the new seg.
1581 1581 * For a MAP_NORESERVE segment swresv will be a count of all the
1582 1582 * allocated anon slots; thus we reserve for the child as many slots
1583 1583 * as the parent has allocated. This semantic prevents the child or
1584 1584 * parent from dieing during a copy-on-write fault caused by trying
1585 1585 * to write a shared pre-existing anon page.
1586 1586 */
1587 1587 if ((len = svd->swresv) != 0) {
1588 1588 if (anon_resv(svd->swresv) == 0)
1589 1589 return (ENOMEM);
1590 1590
1591 1591 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
1592 1592 seg, len, 0);
1593 1593 }
1594 1594
1595 1595 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
1596 1596
1597 1597 newseg->s_ops = &segvn_ops;
1598 1598 newseg->s_data = (void *)newsvd;
1599 1599 newseg->s_szc = seg->s_szc;
1600 1600
1601 1601 newsvd->seg = newseg;
1602 1602 if ((newsvd->vp = svd->vp) != NULL) {
1603 1603 VN_HOLD(svd->vp);
1604 1604 if (svd->type == MAP_SHARED)
1605 1605 lgrp_shm_policy_init(NULL, svd->vp);
1606 1606 }
1607 1607 newsvd->offset = svd->offset;
1608 1608 newsvd->prot = svd->prot;
1609 1609 newsvd->maxprot = svd->maxprot;
1610 1610 newsvd->pageprot = svd->pageprot;
1611 1611 newsvd->type = svd->type;
1612 1612 newsvd->cred = svd->cred;
1613 1613 crhold(newsvd->cred);
1614 1614 newsvd->advice = svd->advice;
1615 1615 newsvd->pageadvice = svd->pageadvice;
1616 1616 newsvd->svn_inz = svd->svn_inz;
1617 1617 newsvd->swresv = svd->swresv;
1618 1618 newsvd->pageswap = svd->pageswap;
1619 1619 newsvd->flags = svd->flags;
1620 1620 newsvd->softlockcnt = 0;
1621 1621 newsvd->softlockcnt_sbase = 0;
1622 1622 newsvd->softlockcnt_send = 0;
1623 1623 newsvd->policy_info = svd->policy_info;
1624 1624 newsvd->rcookie = HAT_INVALID_REGION_COOKIE;
1625 1625
1626 1626 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) {
1627 1627 /*
1628 1628 * Not attaching to a shared anon object.
1629 1629 */
1630 1630 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) ||
1631 1631 svd->tr_state == SEGVN_TR_OFF);
1632 1632 if (svd->tr_state == SEGVN_TR_ON) {
1633 1633 ASSERT(newsvd->vp != NULL && amp != NULL);
1634 1634 newsvd->tr_state = SEGVN_TR_INIT;
1635 1635 } else {
1636 1636 newsvd->tr_state = svd->tr_state;
1637 1637 }
1638 1638 newsvd->amp = NULL;
1639 1639 newsvd->anon_index = 0;
1640 1640 } else {
1641 1641 /* regions for now are only used on pure vnode segments */
1642 1642 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
1643 1643 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1644 1644 newsvd->tr_state = SEGVN_TR_OFF;
1645 1645 if (svd->type == MAP_SHARED) {
1646 1646 ASSERT(svd->svn_inz == SEGVN_INZ_NONE);
1647 1647 newsvd->amp = amp;
1648 1648 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1649 1649 amp->refcnt++;
1650 1650 ANON_LOCK_EXIT(&->a_rwlock);
1651 1651 newsvd->anon_index = svd->anon_index;
1652 1652 } else {
1653 1653 int reclaim = 1;
1654 1654
1655 1655 /*
1656 1656 * Allocate and initialize new anon_map structure.
1657 1657 */
1658 1658 newsvd->amp = anonmap_alloc(newseg->s_size, 0,
1659 1659 ANON_SLEEP);
1660 1660 newsvd->amp->a_szc = newseg->s_szc;
1661 1661 newsvd->anon_index = 0;
1662 1662 ASSERT(svd->svn_inz == SEGVN_INZ_NONE ||
1663 1663 svd->svn_inz == SEGVN_INZ_ALL ||
1664 1664 svd->svn_inz == SEGVN_INZ_VPP);
1665 1665
1666 1666 /*
1667 1667 * We don't have to acquire the anon_map lock
1668 1668 * for the new segment (since it belongs to an
1669 1669 * address space that is still not associated
1670 1670 * with any process), or the segment in the old
1671 1671 * address space (since all threads in it
1672 1672 * are stopped while duplicating the address space).
1673 1673 */
1674 1674
1675 1675 /*
1676 1676 * The goal of the following code is to make sure that
1677 1677 * softlocked pages do not end up as copy on write
1678 1678 * pages. This would cause problems where one
1679 1679 * thread writes to a page that is COW and a different
1680 1680 * thread in the same process has softlocked it. The
1681 1681 * softlock lock would move away from this process
1682 1682 * because the write would cause this process to get
1683 1683 * a copy (without the softlock).
1684 1684 *
1685 1685 * The strategy here is to just break the
1686 1686 * sharing on pages that could possibly be
1687 1687 * softlocked.
1688 1688 *
1689 1689 * In addition, if any pages have been marked that they
1690 1690 * should be inherited as zero, then we immediately go
1691 1691 * ahead and break COW and zero them. In the case of a
1692 1692 * softlocked page that should be inherited zero, we
1693 1693 * break COW and just get a zero page.
1694 1694 */
1695 1695 retry:
1696 1696 if (svd->softlockcnt ||
1697 1697 svd->svn_inz != SEGVN_INZ_NONE) {
1698 1698 /*
1699 1699 * The softlock count might be non zero
1700 1700 * because some pages are still stuck in the
1701 1701 * cache for lazy reclaim. Flush the cache
1702 1702 * now. This should drop the count to zero.
1703 1703 * [or there is really I/O going on to these
1704 1704 * pages]. Note, we have the writers lock so
1705 1705 * nothing gets inserted during the flush.
1706 1706 */
1707 1707 if (svd->softlockcnt && reclaim == 1) {
1708 1708 segvn_purge(seg);
1709 1709 reclaim = 0;
1710 1710 goto retry;
1711 1711 }
1712 1712
1713 1713 error = segvn_dup_pages(seg, newseg);
1714 1714 if (error != 0) {
1715 1715 newsvd->vpage = NULL;
1716 1716 goto out;
1717 1717 }
1718 1718 } else { /* common case */
1719 1719 if (seg->s_szc != 0) {
1720 1720 /*
1721 1721 * If at least one of anon slots of a
1722 1722 * large page exists then make sure
1723 1723 * all anon slots of a large page
1724 1724 * exist to avoid partial cow sharing
1725 1725 * of a large page in the future.
1726 1726 */
1727 1727 anon_dup_fill_holes(amp->ahp,
1728 1728 svd->anon_index, newsvd->amp->ahp,
1729 1729 0, seg->s_size, seg->s_szc,
1730 1730 svd->vp != NULL);
1731 1731 } else {
1732 1732 anon_dup(amp->ahp, svd->anon_index,
1733 1733 newsvd->amp->ahp, 0, seg->s_size);
1734 1734 }
1735 1735
1736 1736 hat_clrattr(seg->s_as->a_hat, seg->s_base,
1737 1737 seg->s_size, PROT_WRITE);
1738 1738 }
1739 1739 }
1740 1740 }
1741 1741 /*
1742 1742 * If necessary, create a vpage structure for the new segment.
1743 1743 * Do not copy any page lock indications.
1744 1744 */
1745 1745 if (svd->vpage != NULL) {
1746 1746 uint_t i;
1747 1747 struct vpage *ovp = svd->vpage;
1748 1748 struct vpage *nvp;
1749 1749
1750 1750 nvp = newsvd->vpage =
1751 1751 kmem_alloc(vpgtob(npages), KM_SLEEP);
1752 1752 for (i = 0; i < npages; i++) {
1753 1753 *nvp = *ovp++;
1754 1754 VPP_CLRPPLOCK(nvp++);
1755 1755 }
1756 1756 } else
1757 1757 newsvd->vpage = NULL;
1758 1758
1759 1759 /* Inform the vnode of the new mapping */
1760 1760 if (newsvd->vp != NULL) {
1761 1761 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset,
1762 1762 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot,
1763 1763 newsvd->maxprot, newsvd->type, newsvd->cred, NULL);
1764 1764 }
1765 1765 out:
1766 1766 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1767 1767 ASSERT(newsvd->amp == NULL);
1768 1768 ASSERT(newsvd->tr_state == SEGVN_TR_OFF);
1769 1769 newsvd->rcookie = svd->rcookie;
1770 1770 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie);
1771 1771 }
1772 1772 return (error);
1773 1773 }
1774 1774
1775 1775
1776 1776 /*
1777 1777 * callback function to invoke free_vp_pages() for only those pages actually
1778 1778 * processed by the HAT when a shared region is destroyed.
1779 1779 */
1780 1780 extern int free_pages;
1781 1781
1782 1782 static void
1783 1783 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
1784 1784 size_t r_size, void *r_obj, u_offset_t r_objoff)
1785 1785 {
1786 1786 u_offset_t off;
1787 1787 size_t len;
1788 1788 vnode_t *vp = (vnode_t *)r_obj;
1789 1789
1790 1790 ASSERT(eaddr > saddr);
1791 1791 ASSERT(saddr >= r_saddr);
1792 1792 ASSERT(saddr < r_saddr + r_size);
1793 1793 ASSERT(eaddr > r_saddr);
1794 1794 ASSERT(eaddr <= r_saddr + r_size);
1795 1795 ASSERT(vp != NULL);
1796 1796
1797 1797 if (!free_pages) {
1798 1798 return;
1799 1799 }
1800 1800
1801 1801 len = eaddr - saddr;
1802 1802 off = (saddr - r_saddr) + r_objoff;
1803 1803 free_vp_pages(vp, off, len);
1804 1804 }
1805 1805
1806 1806 /*
1807 1807 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1808 1808 * those pages actually processed by the HAT
1809 1809 */
1810 1810 static void
1811 1811 segvn_hat_unload_callback(hat_callback_t *cb)
1812 1812 {
1813 1813 struct seg *seg = cb->hcb_data;
1814 1814 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1815 1815 size_t len;
1816 1816 u_offset_t off;
1817 1817
1818 1818 ASSERT(svd->vp != NULL);
1819 1819 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr);
1820 1820 ASSERT(cb->hcb_start_addr >= seg->s_base);
1821 1821
1822 1822 len = cb->hcb_end_addr - cb->hcb_start_addr;
1823 1823 off = cb->hcb_start_addr - seg->s_base;
1824 1824 free_vp_pages(svd->vp, svd->offset + off, len);
1825 1825 }
1826 1826
1827 1827 /*
1828 1828 * This function determines the number of bytes of swap reserved by
1829 1829 * a segment for which per-page accounting is present. It is used to
1830 1830 * calculate the correct value of a segvn_data's swresv.
1831 1831 */
1832 1832 static size_t
1833 1833 segvn_count_swap_by_vpages(struct seg *seg)
1834 1834 {
1835 1835 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1836 1836 struct vpage *vp, *evp;
1837 1837 size_t nswappages = 0;
1838 1838
1839 1839 ASSERT(svd->pageswap);
1840 1840 ASSERT(svd->vpage != NULL);
1841 1841
1842 1842 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
1843 1843
1844 1844 for (vp = svd->vpage; vp < evp; vp++) {
1845 1845 if (VPP_ISSWAPRES(vp))
1846 1846 nswappages++;
1847 1847 }
1848 1848
1849 1849 return (nswappages << PAGESHIFT);
1850 1850 }
1851 1851
1852 1852 static int
1853 1853 segvn_unmap(struct seg *seg, caddr_t addr, size_t len)
1854 1854 {
1855 1855 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1856 1856 struct segvn_data *nsvd;
1857 1857 struct seg *nseg;
1858 1858 struct anon_map *amp;
1859 1859 pgcnt_t opages; /* old segment size in pages */
1860 1860 pgcnt_t npages; /* new segment size in pages */
1861 1861 pgcnt_t dpages; /* pages being deleted (unmapped) */
1862 1862 hat_callback_t callback; /* used for free_vp_pages() */
1863 1863 hat_callback_t *cbp = NULL;
1864 1864 caddr_t nbase;
1865 1865 size_t nsize;
1866 1866 size_t oswresv;
1867 1867 int reclaim = 1;
1868 1868
1869 1869 /*
1870 1870 * We don't need any segment level locks for "segvn" data
1871 1871 * since the address space is "write" locked.
1872 1872 */
1873 1873 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1874 1874
1875 1875 /*
1876 1876 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1877 1877 * softlockcnt is protected from change by the as write lock.
1878 1878 */
1879 1879 retry:
1880 1880 if (svd->softlockcnt > 0) {
1881 1881 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1882 1882
1883 1883 /*
1884 1884 * If this is shared segment non 0 softlockcnt
1885 1885 * means locked pages are still in use.
1886 1886 */
1887 1887 if (svd->type == MAP_SHARED) {
1888 1888 return (EAGAIN);
1889 1889 }
1890 1890
1891 1891 /*
1892 1892 * since we do have the writers lock nobody can fill
1893 1893 * the cache during the purge. The flush either succeeds
1894 1894 * or we still have pending I/Os.
1895 1895 */
1896 1896 if (reclaim == 1) {
1897 1897 segvn_purge(seg);
1898 1898 reclaim = 0;
1899 1899 goto retry;
1900 1900 }
1901 1901 return (EAGAIN);
1902 1902 }
1903 1903
1904 1904 /*
1905 1905 * Check for bad sizes
1906 1906 */
1907 1907 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
1908 1908 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
1909 1909 panic("segvn_unmap");
1910 1910 /*NOTREACHED*/
1911 1911 }
1912 1912
1913 1913 if (seg->s_szc != 0) {
1914 1914 size_t pgsz = page_get_pagesize(seg->s_szc);
1915 1915 int err;
1916 1916 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
1917 1917 ASSERT(seg->s_base != addr || seg->s_size != len);
1918 1918 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1919 1919 ASSERT(svd->amp == NULL);
1920 1920 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1921 1921 hat_leave_region(seg->s_as->a_hat,
1922 1922 svd->rcookie, HAT_REGION_TEXT);
1923 1923 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1924 1924 /*
1925 1925 * could pass a flag to segvn_demote_range()
1926 1926 * below to tell it not to do any unloads but
1927 1927 * this case is rare enough to not bother for
1928 1928 * now.
1929 1929 */
1930 1930 } else if (svd->tr_state == SEGVN_TR_INIT) {
1931 1931 svd->tr_state = SEGVN_TR_OFF;
1932 1932 } else if (svd->tr_state == SEGVN_TR_ON) {
1933 1933 ASSERT(svd->amp != NULL);
1934 1934 segvn_textunrepl(seg, 1);
1935 1935 ASSERT(svd->amp == NULL);
1936 1936 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1937 1937 }
1938 1938 VM_STAT_ADD(segvnvmstats.demoterange[0]);
1939 1939 err = segvn_demote_range(seg, addr, len, SDR_END, 0);
1940 1940 if (err == 0) {
1941 1941 return (IE_RETRY);
1942 1942 }
1943 1943 return (err);
1944 1944 }
1945 1945 }
1946 1946
1947 1947 /* Inform the vnode of the unmapping. */
1948 1948 if (svd->vp) {
1949 1949 int error;
1950 1950
1951 1951 error = VOP_DELMAP(svd->vp,
1952 1952 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1953 1953 seg->s_as, addr, len, svd->prot, svd->maxprot,
1954 1954 svd->type, svd->cred, NULL);
1955 1955
1956 1956 if (error == EAGAIN)
1957 1957 return (error);
1958 1958 }
1959 1959
1960 1960 /*
1961 1961 * Remove any page locks set through this mapping.
1962 1962 * If text replication is not off no page locks could have been
1963 1963 * established via this mapping.
1964 1964 */
1965 1965 if (svd->tr_state == SEGVN_TR_OFF) {
1966 1966 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0);
1967 1967 }
1968 1968
1969 1969 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1970 1970 ASSERT(svd->amp == NULL);
1971 1971 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1972 1972 ASSERT(svd->type == MAP_PRIVATE);
1973 1973 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
1974 1974 HAT_REGION_TEXT);
1975 1975 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1976 1976 } else if (svd->tr_state == SEGVN_TR_ON) {
1977 1977 ASSERT(svd->amp != NULL);
1978 1978 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE));
1979 1979 segvn_textunrepl(seg, 1);
1980 1980 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
1981 1981 } else {
1982 1982 if (svd->tr_state != SEGVN_TR_OFF) {
1983 1983 ASSERT(svd->tr_state == SEGVN_TR_INIT);
1984 1984 svd->tr_state = SEGVN_TR_OFF;
1985 1985 }
1986 1986 /*
1987 1987 * Unload any hardware translations in the range to be taken
1988 1988 * out. Use a callback to invoke free_vp_pages() effectively.
1989 1989 */
1990 1990 if (svd->vp != NULL && free_pages != 0) {
1991 1991 callback.hcb_data = seg;
1992 1992 callback.hcb_function = segvn_hat_unload_callback;
1993 1993 cbp = &callback;
1994 1994 }
1995 1995 hat_unload_callback(seg->s_as->a_hat, addr, len,
1996 1996 HAT_UNLOAD_UNMAP, cbp);
1997 1997
1998 1998 if (svd->type == MAP_SHARED && svd->vp != NULL &&
1999 1999 (svd->vp->v_flag & VVMEXEC) &&
2000 2000 ((svd->prot & PROT_WRITE) || svd->pageprot)) {
2001 2001 segvn_inval_trcache(svd->vp);
2002 2002 }
2003 2003 }
2004 2004
2005 2005 /*
2006 2006 * Check for entire segment
2007 2007 */
2008 2008 if (addr == seg->s_base && len == seg->s_size) {
2009 2009 seg_free(seg);
2010 2010 return (0);
2011 2011 }
2012 2012
2013 2013 opages = seg_pages(seg);
2014 2014 dpages = btop(len);
2015 2015 npages = opages - dpages;
2016 2016 amp = svd->amp;
2017 2017 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc);
2018 2018
2019 2019 /*
2020 2020 * Check for beginning of segment
2021 2021 */
2022 2022 if (addr == seg->s_base) {
2023 2023 if (svd->vpage != NULL) {
2024 2024 size_t nbytes;
2025 2025 struct vpage *ovpage;
2026 2026
2027 2027 ovpage = svd->vpage; /* keep pointer to vpage */
2028 2028
2029 2029 nbytes = vpgtob(npages);
2030 2030 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2031 2031 bcopy(&ovpage[dpages], svd->vpage, nbytes);
2032 2032
2033 2033 /* free up old vpage */
2034 2034 kmem_free(ovpage, vpgtob(opages));
2035 2035 }
2036 2036 if (amp != NULL) {
2037 2037 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2038 2038 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2039 2039 /*
2040 2040 * Shared anon map is no longer in use. Before
2041 2041 * freeing its pages purge all entries from
2042 2042 * pcache that belong to this amp.
2043 2043 */
2044 2044 if (svd->type == MAP_SHARED) {
2045 2045 ASSERT(amp->refcnt == 1);
2046 2046 ASSERT(svd->softlockcnt == 0);
2047 2047 anonmap_purge(amp);
2048 2048 }
2049 2049 /*
2050 2050 * Free up now unused parts of anon_map array.
2051 2051 */
2052 2052 if (amp->a_szc == seg->s_szc) {
2053 2053 if (seg->s_szc != 0) {
2054 2054 anon_free_pages(amp->ahp,
2055 2055 svd->anon_index, len,
2056 2056 seg->s_szc);
2057 2057 } else {
2058 2058 anon_free(amp->ahp,
2059 2059 svd->anon_index,
2060 2060 len);
2061 2061 }
2062 2062 } else {
2063 2063 ASSERT(svd->type == MAP_SHARED);
2064 2064 ASSERT(amp->a_szc > seg->s_szc);
2065 2065 anon_shmap_free_pages(amp,
2066 2066 svd->anon_index, len);
2067 2067 }
2068 2068
2069 2069 /*
2070 2070 * Unreserve swap space for the
2071 2071 * unmapped chunk of this segment in
2072 2072 * case it's MAP_SHARED
2073 2073 */
2074 2074 if (svd->type == MAP_SHARED) {
2075 2075 anon_unresv_zone(len,
2076 2076 seg->s_as->a_proc->p_zone);
2077 2077 amp->swresv -= len;
2078 2078 }
2079 2079 }
2080 2080 ANON_LOCK_EXIT(&->a_rwlock);
2081 2081 svd->anon_index += dpages;
2082 2082 }
2083 2083 if (svd->vp != NULL)
2084 2084 svd->offset += len;
2085 2085
2086 2086 seg->s_base += len;
2087 2087 seg->s_size -= len;
2088 2088
2089 2089 if (svd->swresv) {
2090 2090 if (svd->flags & MAP_NORESERVE) {
2091 2091 ASSERT(amp);
2092 2092 oswresv = svd->swresv;
2093 2093
2094 2094 svd->swresv = ptob(anon_pages(amp->ahp,
2095 2095 svd->anon_index, npages));
2096 2096 anon_unresv_zone(oswresv - svd->swresv,
2097 2097 seg->s_as->a_proc->p_zone);
2098 2098 if (SEG_IS_PARTIAL_RESV(seg))
2099 2099 seg->s_as->a_resvsize -= oswresv -
2100 2100 svd->swresv;
2101 2101 } else {
2102 2102 size_t unlen;
2103 2103
2104 2104 if (svd->pageswap) {
2105 2105 oswresv = svd->swresv;
2106 2106 svd->swresv =
2107 2107 segvn_count_swap_by_vpages(seg);
2108 2108 ASSERT(oswresv >= svd->swresv);
2109 2109 unlen = oswresv - svd->swresv;
2110 2110 } else {
2111 2111 svd->swresv -= len;
2112 2112 ASSERT(svd->swresv == seg->s_size);
2113 2113 unlen = len;
2114 2114 }
2115 2115 anon_unresv_zone(unlen,
2116 2116 seg->s_as->a_proc->p_zone);
2117 2117 }
2118 2118 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2119 2119 seg, len, 0);
2120 2120 }
2121 2121
2122 2122 return (0);
2123 2123 }
2124 2124
2125 2125 /*
2126 2126 * Check for end of segment
2127 2127 */
2128 2128 if (addr + len == seg->s_base + seg->s_size) {
2129 2129 if (svd->vpage != NULL) {
2130 2130 size_t nbytes;
2131 2131 struct vpage *ovpage;
2132 2132
2133 2133 ovpage = svd->vpage; /* keep pointer to vpage */
2134 2134
2135 2135 nbytes = vpgtob(npages);
2136 2136 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2137 2137 bcopy(ovpage, svd->vpage, nbytes);
2138 2138
2139 2139 /* free up old vpage */
2140 2140 kmem_free(ovpage, vpgtob(opages));
2141 2141
2142 2142 }
2143 2143 if (amp != NULL) {
2144 2144 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2145 2145 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2146 2146 /*
2147 2147 * Free up now unused parts of anon_map array.
2148 2148 */
2149 2149 ulong_t an_idx = svd->anon_index + npages;
2150 2150
2151 2151 /*
2152 2152 * Shared anon map is no longer in use. Before
2153 2153 * freeing its pages purge all entries from
2154 2154 * pcache that belong to this amp.
2155 2155 */
2156 2156 if (svd->type == MAP_SHARED) {
2157 2157 ASSERT(amp->refcnt == 1);
2158 2158 ASSERT(svd->softlockcnt == 0);
2159 2159 anonmap_purge(amp);
2160 2160 }
2161 2161
2162 2162 if (amp->a_szc == seg->s_szc) {
2163 2163 if (seg->s_szc != 0) {
2164 2164 anon_free_pages(amp->ahp,
2165 2165 an_idx, len,
2166 2166 seg->s_szc);
2167 2167 } else {
2168 2168 anon_free(amp->ahp, an_idx,
2169 2169 len);
2170 2170 }
2171 2171 } else {
2172 2172 ASSERT(svd->type == MAP_SHARED);
2173 2173 ASSERT(amp->a_szc > seg->s_szc);
2174 2174 anon_shmap_free_pages(amp,
2175 2175 an_idx, len);
2176 2176 }
2177 2177
2178 2178 /*
2179 2179 * Unreserve swap space for the
2180 2180 * unmapped chunk of this segment in
2181 2181 * case it's MAP_SHARED
2182 2182 */
2183 2183 if (svd->type == MAP_SHARED) {
2184 2184 anon_unresv_zone(len,
2185 2185 seg->s_as->a_proc->p_zone);
2186 2186 amp->swresv -= len;
2187 2187 }
2188 2188 }
2189 2189 ANON_LOCK_EXIT(&->a_rwlock);
2190 2190 }
2191 2191
2192 2192 seg->s_size -= len;
2193 2193
2194 2194 if (svd->swresv) {
2195 2195 if (svd->flags & MAP_NORESERVE) {
2196 2196 ASSERT(amp);
2197 2197 oswresv = svd->swresv;
2198 2198 svd->swresv = ptob(anon_pages(amp->ahp,
2199 2199 svd->anon_index, npages));
2200 2200 anon_unresv_zone(oswresv - svd->swresv,
2201 2201 seg->s_as->a_proc->p_zone);
2202 2202 if (SEG_IS_PARTIAL_RESV(seg))
2203 2203 seg->s_as->a_resvsize -= oswresv -
2204 2204 svd->swresv;
2205 2205 } else {
2206 2206 size_t unlen;
2207 2207
2208 2208 if (svd->pageswap) {
2209 2209 oswresv = svd->swresv;
2210 2210 svd->swresv =
2211 2211 segvn_count_swap_by_vpages(seg);
2212 2212 ASSERT(oswresv >= svd->swresv);
2213 2213 unlen = oswresv - svd->swresv;
2214 2214 } else {
2215 2215 svd->swresv -= len;
2216 2216 ASSERT(svd->swresv == seg->s_size);
2217 2217 unlen = len;
2218 2218 }
2219 2219 anon_unresv_zone(unlen,
2220 2220 seg->s_as->a_proc->p_zone);
2221 2221 }
2222 2222 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2223 2223 "anon proc:%p %lu %u", seg, len, 0);
2224 2224 }
2225 2225
2226 2226 return (0);
2227 2227 }
2228 2228
2229 2229 /*
2230 2230 * The section to go is in the middle of the segment,
2231 2231 * have to make it into two segments. nseg is made for
2232 2232 * the high end while seg is cut down at the low end.
2233 2233 */
2234 2234 nbase = addr + len; /* new seg base */
2235 2235 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
2236 2236 seg->s_size = addr - seg->s_base; /* shrink old seg */
2237 2237 nseg = seg_alloc(seg->s_as, nbase, nsize);
2238 2238 if (nseg == NULL) {
2239 2239 panic("segvn_unmap seg_alloc");
2240 2240 /*NOTREACHED*/
2241 2241 }
2242 2242 nseg->s_ops = seg->s_ops;
2243 2243 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
2244 2244 nseg->s_data = (void *)nsvd;
2245 2245 nseg->s_szc = seg->s_szc;
2246 2246 *nsvd = *svd;
2247 2247 nsvd->seg = nseg;
2248 2248 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base);
2249 2249 nsvd->swresv = 0;
2250 2250 nsvd->softlockcnt = 0;
2251 2251 nsvd->softlockcnt_sbase = 0;
2252 2252 nsvd->softlockcnt_send = 0;
2253 2253 nsvd->svn_inz = svd->svn_inz;
2254 2254 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
2255 2255
2256 2256 if (svd->vp != NULL) {
2257 2257 VN_HOLD(nsvd->vp);
2258 2258 if (nsvd->type == MAP_SHARED)
2259 2259 lgrp_shm_policy_init(NULL, nsvd->vp);
2260 2260 }
2261 2261 crhold(svd->cred);
2262 2262
2263 2263 if (svd->vpage == NULL) {
2264 2264 nsvd->vpage = NULL;
2265 2265 } else {
2266 2266 /* need to split vpage into two arrays */
2267 2267 size_t nbytes;
2268 2268 struct vpage *ovpage;
2269 2269
2270 2270 ovpage = svd->vpage; /* keep pointer to vpage */
2271 2271
2272 2272 npages = seg_pages(seg); /* seg has shrunk */
2273 2273 nbytes = vpgtob(npages);
2274 2274 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2275 2275
2276 2276 bcopy(ovpage, svd->vpage, nbytes);
2277 2277
2278 2278 npages = seg_pages(nseg);
2279 2279 nbytes = vpgtob(npages);
2280 2280 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2281 2281
2282 2282 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes);
2283 2283
2284 2284 /* free up old vpage */
2285 2285 kmem_free(ovpage, vpgtob(opages));
2286 2286 }
2287 2287
2288 2288 if (amp == NULL) {
2289 2289 nsvd->amp = NULL;
2290 2290 nsvd->anon_index = 0;
2291 2291 } else {
2292 2292 /*
2293 2293 * Need to create a new anon map for the new segment.
2294 2294 * We'll also allocate a new smaller array for the old
2295 2295 * smaller segment to save space.
2296 2296 */
2297 2297 opages = btop((uintptr_t)(addr - seg->s_base));
2298 2298 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2299 2299 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2300 2300 /*
2301 2301 * Free up now unused parts of anon_map array.
2302 2302 */
2303 2303 ulong_t an_idx = svd->anon_index + opages;
2304 2304
2305 2305 /*
2306 2306 * Shared anon map is no longer in use. Before
2307 2307 * freeing its pages purge all entries from
2308 2308 * pcache that belong to this amp.
2309 2309 */
2310 2310 if (svd->type == MAP_SHARED) {
2311 2311 ASSERT(amp->refcnt == 1);
2312 2312 ASSERT(svd->softlockcnt == 0);
2313 2313 anonmap_purge(amp);
2314 2314 }
2315 2315
2316 2316 if (amp->a_szc == seg->s_szc) {
2317 2317 if (seg->s_szc != 0) {
2318 2318 anon_free_pages(amp->ahp, an_idx, len,
2319 2319 seg->s_szc);
2320 2320 } else {
2321 2321 anon_free(amp->ahp, an_idx,
2322 2322 len);
2323 2323 }
2324 2324 } else {
2325 2325 ASSERT(svd->type == MAP_SHARED);
2326 2326 ASSERT(amp->a_szc > seg->s_szc);
2327 2327 anon_shmap_free_pages(amp, an_idx, len);
2328 2328 }
2329 2329
2330 2330 /*
2331 2331 * Unreserve swap space for the
2332 2332 * unmapped chunk of this segment in
2333 2333 * case it's MAP_SHARED
2334 2334 */
2335 2335 if (svd->type == MAP_SHARED) {
2336 2336 anon_unresv_zone(len,
2337 2337 seg->s_as->a_proc->p_zone);
2338 2338 amp->swresv -= len;
2339 2339 }
2340 2340 }
2341 2341 nsvd->anon_index = svd->anon_index +
2342 2342 btop((uintptr_t)(nseg->s_base - seg->s_base));
2343 2343 if (svd->type == MAP_SHARED) {
2344 2344 amp->refcnt++;
2345 2345 nsvd->amp = amp;
2346 2346 } else {
2347 2347 struct anon_map *namp;
2348 2348 struct anon_hdr *nahp;
2349 2349
2350 2350 ASSERT(svd->type == MAP_PRIVATE);
2351 2351 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
2352 2352 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
2353 2353 namp->a_szc = seg->s_szc;
2354 2354 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp,
2355 2355 0, btop(seg->s_size), ANON_SLEEP);
2356 2356 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index,
2357 2357 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
2358 2358 anon_release(amp->ahp, btop(amp->size));
2359 2359 svd->anon_index = 0;
2360 2360 nsvd->anon_index = 0;
2361 2361 amp->ahp = nahp;
2362 2362 amp->size = seg->s_size;
2363 2363 nsvd->amp = namp;
2364 2364 }
2365 2365 ANON_LOCK_EXIT(&->a_rwlock);
2366 2366 }
2367 2367 if (svd->swresv) {
2368 2368 if (svd->flags & MAP_NORESERVE) {
2369 2369 ASSERT(amp);
2370 2370 oswresv = svd->swresv;
2371 2371 svd->swresv = ptob(anon_pages(amp->ahp,
2372 2372 svd->anon_index, btop(seg->s_size)));
2373 2373 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
2374 2374 nsvd->anon_index, btop(nseg->s_size)));
2375 2375 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2376 2376 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv),
2377 2377 seg->s_as->a_proc->p_zone);
2378 2378 if (SEG_IS_PARTIAL_RESV(seg))
2379 2379 seg->s_as->a_resvsize -= oswresv -
2380 2380 (svd->swresv + nsvd->swresv);
2381 2381 } else {
2382 2382 size_t unlen;
2383 2383
2384 2384 if (svd->pageswap) {
2385 2385 oswresv = svd->swresv;
2386 2386 svd->swresv = segvn_count_swap_by_vpages(seg);
2387 2387 nsvd->swresv = segvn_count_swap_by_vpages(nseg);
2388 2388 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2389 2389 unlen = oswresv - (svd->swresv + nsvd->swresv);
2390 2390 } else {
2391 2391 if (seg->s_size + nseg->s_size + len !=
2392 2392 svd->swresv) {
2393 2393 panic("segvn_unmap: cannot split "
2394 2394 "swap reservation");
2395 2395 /*NOTREACHED*/
2396 2396 }
2397 2397 svd->swresv = seg->s_size;
2398 2398 nsvd->swresv = nseg->s_size;
2399 2399 unlen = len;
2400 2400 }
2401 2401 anon_unresv_zone(unlen,
2402 2402 seg->s_as->a_proc->p_zone);
2403 2403 }
2404 2404 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2405 2405 seg, len, 0);
2406 2406 }
2407 2407
2408 2408 return (0); /* I'm glad that's all over with! */
2409 2409 }
2410 2410
2411 2411 static void
2412 2412 segvn_free(struct seg *seg)
2413 2413 {
2414 2414 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2415 2415 pgcnt_t npages = seg_pages(seg);
2416 2416 struct anon_map *amp;
2417 2417 size_t len;
2418 2418
2419 2419 /*
2420 2420 * We don't need any segment level locks for "segvn" data
2421 2421 * since the address space is "write" locked.
2422 2422 */
2423 2423 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2424 2424 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2425 2425
2426 2426 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2427 2427
2428 2428 /*
2429 2429 * Be sure to unlock pages. XXX Why do things get free'ed instead
2430 2430 * of unmapped? XXX
2431 2431 */
2432 2432 (void) segvn_lockop(seg, seg->s_base, seg->s_size,
2433 2433 0, MC_UNLOCK, NULL, 0);
2434 2434
2435 2435 /*
2436 2436 * Deallocate the vpage and anon pointers if necessary and possible.
2437 2437 */
2438 2438 if (svd->vpage != NULL) {
2439 2439 kmem_free(svd->vpage, vpgtob(npages));
2440 2440 svd->vpage = NULL;
2441 2441 }
2442 2442 if ((amp = svd->amp) != NULL) {
2443 2443 /*
2444 2444 * If there are no more references to this anon_map
2445 2445 * structure, then deallocate the structure after freeing
2446 2446 * up all the anon slot pointers that we can.
2447 2447 */
2448 2448 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2449 2449 ASSERT(amp->a_szc >= seg->s_szc);
2450 2450 if (--amp->refcnt == 0) {
2451 2451 if (svd->type == MAP_PRIVATE) {
2452 2452 /*
2453 2453 * Private - we only need to anon_free
2454 2454 * the part that this segment refers to.
2455 2455 */
2456 2456 if (seg->s_szc != 0) {
2457 2457 anon_free_pages(amp->ahp,
2458 2458 svd->anon_index, seg->s_size,
2459 2459 seg->s_szc);
2460 2460 } else {
2461 2461 anon_free(amp->ahp, svd->anon_index,
2462 2462 seg->s_size);
2463 2463 }
2464 2464 } else {
2465 2465
2466 2466 /*
2467 2467 * Shared anon map is no longer in use. Before
2468 2468 * freeing its pages purge all entries from
2469 2469 * pcache that belong to this amp.
2470 2470 */
2471 2471 ASSERT(svd->softlockcnt == 0);
2472 2472 anonmap_purge(amp);
2473 2473
2474 2474 /*
2475 2475 * Shared - anon_free the entire
2476 2476 * anon_map's worth of stuff and
2477 2477 * release any swap reservation.
2478 2478 */
2479 2479 if (amp->a_szc != 0) {
2480 2480 anon_shmap_free_pages(amp, 0,
2481 2481 amp->size);
2482 2482 } else {
2483 2483 anon_free(amp->ahp, 0, amp->size);
2484 2484 }
2485 2485 if ((len = amp->swresv) != 0) {
2486 2486 anon_unresv_zone(len,
2487 2487 seg->s_as->a_proc->p_zone);
2488 2488 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2489 2489 "anon proc:%p %lu %u", seg, len, 0);
2490 2490 }
2491 2491 }
2492 2492 svd->amp = NULL;
2493 2493 ANON_LOCK_EXIT(&->a_rwlock);
2494 2494 anonmap_free(amp);
2495 2495 } else if (svd->type == MAP_PRIVATE) {
2496 2496 /*
2497 2497 * We had a private mapping which still has
2498 2498 * a held anon_map so just free up all the
2499 2499 * anon slot pointers that we were using.
2500 2500 */
2501 2501 if (seg->s_szc != 0) {
2502 2502 anon_free_pages(amp->ahp, svd->anon_index,
2503 2503 seg->s_size, seg->s_szc);
2504 2504 } else {
2505 2505 anon_free(amp->ahp, svd->anon_index,
2506 2506 seg->s_size);
2507 2507 }
2508 2508 ANON_LOCK_EXIT(&->a_rwlock);
2509 2509 } else {
2510 2510 ANON_LOCK_EXIT(&->a_rwlock);
2511 2511 }
2512 2512 }
2513 2513
2514 2514 /*
2515 2515 * Release swap reservation.
2516 2516 */
2517 2517 if ((len = svd->swresv) != 0) {
2518 2518 anon_unresv_zone(svd->swresv,
2519 2519 seg->s_as->a_proc->p_zone);
2520 2520 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2521 2521 seg, len, 0);
2522 2522 if (SEG_IS_PARTIAL_RESV(seg))
2523 2523 seg->s_as->a_resvsize -= svd->swresv;
2524 2524 svd->swresv = 0;
2525 2525 }
2526 2526 /*
2527 2527 * Release claim on vnode, credentials, and finally free the
2528 2528 * private data.
2529 2529 */
2530 2530 if (svd->vp != NULL) {
2531 2531 if (svd->type == MAP_SHARED)
2532 2532 lgrp_shm_policy_fini(NULL, svd->vp);
2533 2533 VN_RELE(svd->vp);
2534 2534 svd->vp = NULL;
2535 2535 }
2536 2536 crfree(svd->cred);
2537 2537 svd->pageprot = 0;
2538 2538 svd->pageadvice = 0;
2539 2539 svd->pageswap = 0;
2540 2540 svd->cred = NULL;
2541 2541
2542 2542 /*
2543 2543 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2544 2544 * still working with this segment without holding as lock (in case
2545 2545 * it's called by pcache async thread).
2546 2546 */
2547 2547 ASSERT(svd->softlockcnt == 0);
2548 2548 mutex_enter(&svd->segfree_syncmtx);
2549 2549 mutex_exit(&svd->segfree_syncmtx);
2550 2550
2551 2551 seg->s_data = NULL;
2552 2552 kmem_cache_free(segvn_cache, svd);
2553 2553 }
2554 2554
2555 2555 /*
2556 2556 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2557 2557 * already been F_SOFTLOCK'ed.
2558 2558 * Caller must always match addr and len of a softunlock with a previous
2559 2559 * softlock with exactly the same addr and len.
2560 2560 */
2561 2561 static void
2562 2562 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
2563 2563 {
2564 2564 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2565 2565 page_t *pp;
2566 2566 caddr_t adr;
2567 2567 struct vnode *vp;
2568 2568 u_offset_t offset;
2569 2569 ulong_t anon_index;
2570 2570 struct anon_map *amp;
2571 2571 struct anon *ap = NULL;
2572 2572
2573 2573 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2574 2574 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2575 2575
2576 2576 if ((amp = svd->amp) != NULL)
2577 2577 anon_index = svd->anon_index + seg_page(seg, addr);
2578 2578
2579 2579 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2580 2580 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2581 2581 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2582 2582 } else {
2583 2583 hat_unlock(seg->s_as->a_hat, addr, len);
2584 2584 }
2585 2585 for (adr = addr; adr < addr + len; adr += PAGESIZE) {
2586 2586 if (amp != NULL) {
2587 2587 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2588 2588 if ((ap = anon_get_ptr(amp->ahp, anon_index++))
2589 2589 != NULL) {
2590 2590 swap_xlate(ap, &vp, &offset);
2591 2591 } else {
2592 2592 vp = svd->vp;
2593 2593 offset = svd->offset +
2594 2594 (uintptr_t)(adr - seg->s_base);
2595 2595 }
2596 2596 ANON_LOCK_EXIT(&->a_rwlock);
2597 2597 } else {
2598 2598 vp = svd->vp;
2599 2599 offset = svd->offset +
2600 2600 (uintptr_t)(adr - seg->s_base);
2601 2601 }
2602 2602
2603 2603 /*
2604 2604 * Use page_find() instead of page_lookup() to
2605 2605 * find the page since we know that it is locked.
2606 2606 */
2607 2607 pp = page_find(vp, offset);
2608 2608 if (pp == NULL) {
2609 2609 panic(
2610 2610 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2611 2611 (void *)adr, (void *)ap, (void *)vp, offset);
2612 2612 /*NOTREACHED*/
2613 2613 }
2614 2614
2615 2615 if (rw == S_WRITE) {
2616 2616 hat_setrefmod(pp);
2617 2617 if (seg->s_as->a_vbits)
2618 2618 hat_setstat(seg->s_as, adr, PAGESIZE,
2619 2619 P_REF | P_MOD);
2620 2620 } else if (rw != S_OTHER) {
2621 2621 hat_setref(pp);
2622 2622 if (seg->s_as->a_vbits)
2623 2623 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF);
2624 2624 }
2625 2625 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2626 2626 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset);
2627 2627 page_unlock(pp);
2628 2628 }
2629 2629 ASSERT(svd->softlockcnt >= btop(len));
2630 2630 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) {
2631 2631 /*
2632 2632 * All SOFTLOCKS are gone. Wakeup any waiting
2633 2633 * unmappers so they can try again to unmap.
2634 2634 * Check for waiters first without the mutex
2635 2635 * held so we don't always grab the mutex on
2636 2636 * softunlocks.
2637 2637 */
2638 2638 if (AS_ISUNMAPWAIT(seg->s_as)) {
2639 2639 mutex_enter(&seg->s_as->a_contents);
2640 2640 if (AS_ISUNMAPWAIT(seg->s_as)) {
2641 2641 AS_CLRUNMAPWAIT(seg->s_as);
2642 2642 cv_broadcast(&seg->s_as->a_cv);
2643 2643 }
2644 2644 mutex_exit(&seg->s_as->a_contents);
2645 2645 }
2646 2646 }
2647 2647 }
2648 2648
2649 2649 #define PAGE_HANDLED ((page_t *)-1)
2650 2650
2651 2651 /*
2652 2652 * Release all the pages in the NULL terminated ppp list
2653 2653 * which haven't already been converted to PAGE_HANDLED.
2654 2654 */
2655 2655 static void
2656 2656 segvn_pagelist_rele(page_t **ppp)
2657 2657 {
2658 2658 for (; *ppp != NULL; ppp++) {
2659 2659 if (*ppp != PAGE_HANDLED)
2660 2660 page_unlock(*ppp);
2661 2661 }
2662 2662 }
2663 2663
2664 2664 static int stealcow = 1;
2665 2665
2666 2666 /*
2667 2667 * Workaround for viking chip bug. See bug id 1220902.
2668 2668 * To fix this down in pagefault() would require importing so
2669 2669 * much as and segvn code as to be unmaintainable.
2670 2670 */
2671 2671 int enable_mbit_wa = 0;
2672 2672
2673 2673 /*
2674 2674 * Handles all the dirty work of getting the right
2675 2675 * anonymous pages and loading up the translations.
2676 2676 * This routine is called only from segvn_fault()
2677 2677 * when looping over the range of addresses requested.
2678 2678 *
2679 2679 * The basic algorithm here is:
2680 2680 * If this is an anon_zero case
2681 2681 * Call anon_zero to allocate page
2682 2682 * Load up translation
2683 2683 * Return
2684 2684 * endif
2685 2685 * If this is an anon page
2686 2686 * Use anon_getpage to get the page
2687 2687 * else
2688 2688 * Find page in pl[] list passed in
2689 2689 * endif
2690 2690 * If not a cow
2691 2691 * Load up the translation to the page
2692 2692 * return
2693 2693 * endif
2694 2694 * Call anon_private to handle cow
2695 2695 * Load up (writable) translation to new page
2696 2696 */
2697 2697 static faultcode_t
2698 2698 segvn_faultpage(
2699 2699 struct hat *hat, /* the hat to use for mapping */
2700 2700 struct seg *seg, /* seg_vn of interest */
2701 2701 caddr_t addr, /* address in as */
2702 2702 u_offset_t off, /* offset in vp */
2703 2703 struct vpage *vpage, /* pointer to vpage for vp, off */
2704 2704 page_t *pl[], /* object source page pointer */
2705 2705 uint_t vpprot, /* access allowed to object pages */
2706 2706 enum fault_type type, /* type of fault */
2707 2707 enum seg_rw rw, /* type of access at fault */
2708 2708 int brkcow) /* we may need to break cow */
2709 2709 {
2710 2710 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2711 2711 page_t *pp, **ppp;
2712 2712 uint_t pageflags = 0;
2713 2713 page_t *anon_pl[1 + 1];
2714 2714 page_t *opp = NULL; /* original page */
2715 2715 uint_t prot;
2716 2716 int err;
2717 2717 int cow;
2718 2718 int claim;
2719 2719 int steal = 0;
2720 2720 ulong_t anon_index;
2721 2721 struct anon *ap, *oldap;
2722 2722 struct anon_map *amp;
2723 2723 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
2724 2724 int anon_lock = 0;
2725 2725 anon_sync_obj_t cookie;
2726 2726
2727 2727 if (svd->flags & MAP_TEXT) {
2728 2728 hat_flag |= HAT_LOAD_TEXT;
2729 2729 }
2730 2730
2731 2731 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
2732 2732 ASSERT(seg->s_szc == 0);
2733 2733 ASSERT(svd->tr_state != SEGVN_TR_INIT);
2734 2734
2735 2735 /*
2736 2736 * Initialize protection value for this page.
2737 2737 * If we have per page protection values check it now.
2738 2738 */
2739 2739 if (svd->pageprot) {
2740 2740 uint_t protchk;
2741 2741
2742 2742 switch (rw) {
2743 2743 case S_READ:
2744 2744 protchk = PROT_READ;
2745 2745 break;
2746 2746 case S_WRITE:
2747 2747 protchk = PROT_WRITE;
2748 2748 break;
2749 2749 case S_EXEC:
2750 2750 protchk = PROT_EXEC;
2751 2751 break;
2752 2752 case S_OTHER:
2753 2753 default:
2754 2754 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2755 2755 break;
2756 2756 }
2757 2757
2758 2758 prot = VPP_PROT(vpage);
2759 2759 if ((prot & protchk) == 0)
2760 2760 return (FC_PROT); /* illegal access type */
2761 2761 } else {
2762 2762 prot = svd->prot;
2763 2763 }
2764 2764
2765 2765 if (type == F_SOFTLOCK) {
2766 2766 atomic_inc_ulong((ulong_t *)&svd->softlockcnt);
2767 2767 }
2768 2768
2769 2769 /*
2770 2770 * Always acquire the anon array lock to prevent 2 threads from
2771 2771 * allocating separate anon slots for the same "addr".
2772 2772 */
2773 2773
2774 2774 if ((amp = svd->amp) != NULL) {
2775 2775 ASSERT(RW_READ_HELD(&->a_rwlock));
2776 2776 anon_index = svd->anon_index + seg_page(seg, addr);
2777 2777 anon_array_enter(amp, anon_index, &cookie);
2778 2778 anon_lock = 1;
2779 2779 }
2780 2780
2781 2781 if (svd->vp == NULL && amp != NULL) {
2782 2782 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2783 2783 /*
2784 2784 * Allocate a (normally) writable anonymous page of
2785 2785 * zeroes. If no advance reservations, reserve now.
2786 2786 */
2787 2787 if (svd->flags & MAP_NORESERVE) {
2788 2788 if (anon_resv_zone(ptob(1),
2789 2789 seg->s_as->a_proc->p_zone)) {
2790 2790 atomic_add_long(&svd->swresv, ptob(1));
2791 2791 atomic_add_long(&seg->s_as->a_resvsize,
2792 2792 ptob(1));
2793 2793 } else {
2794 2794 err = ENOMEM;
2795 2795 goto out;
2796 2796 }
2797 2797 }
2798 2798 if ((pp = anon_zero(seg, addr, &ap,
2799 2799 svd->cred)) == NULL) {
2800 2800 err = ENOMEM;
2801 2801 goto out; /* out of swap space */
2802 2802 }
2803 2803 /*
2804 2804 * Re-acquire the anon_map lock and
2805 2805 * initialize the anon array entry.
2806 2806 */
2807 2807 (void) anon_set_ptr(amp->ahp, anon_index, ap,
2808 2808 ANON_SLEEP);
2809 2809
2810 2810 ASSERT(pp->p_szc == 0);
2811 2811
2812 2812 /*
2813 2813 * Handle pages that have been marked for migration
2814 2814 */
2815 2815 if (lgrp_optimizations())
2816 2816 page_migrate(seg, addr, &pp, 1);
2817 2817
2818 2818 if (enable_mbit_wa) {
2819 2819 if (rw == S_WRITE)
2820 2820 hat_setmod(pp);
2821 2821 else if (!hat_ismod(pp))
2822 2822 prot &= ~PROT_WRITE;
2823 2823 }
2824 2824 /*
2825 2825 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2826 2826 * with MC_LOCKAS, MCL_FUTURE) and this is a
2827 2827 * MAP_NORESERVE segment, we may need to
2828 2828 * permanently lock the page as it is being faulted
2829 2829 * for the first time. The following text applies
2830 2830 * only to MAP_NORESERVE segments:
2831 2831 *
2832 2832 * As per memcntl(2), if this segment was created
2833 2833 * after MCL_FUTURE was applied (a "future"
2834 2834 * segment), its pages must be locked. If this
2835 2835 * segment existed at MCL_FUTURE application (a
2836 2836 * "past" segment), the interface is unclear.
2837 2837 *
2838 2838 * We decide to lock only if vpage is present:
2839 2839 *
2840 2840 * - "future" segments will have a vpage array (see
2841 2841 * as_map), and so will be locked as required
2842 2842 *
2843 2843 * - "past" segments may not have a vpage array,
2844 2844 * depending on whether events (such as
2845 2845 * mprotect) have occurred. Locking if vpage
2846 2846 * exists will preserve legacy behavior. Not
2847 2847 * locking if vpage is absent, will not break
2848 2848 * the interface or legacy behavior. Note that
2849 2849 * allocating vpage here if it's absent requires
2850 2850 * upgrading the segvn reader lock, the cost of
2851 2851 * which does not seem worthwhile.
2852 2852 *
2853 2853 * Usually testing and setting VPP_ISPPLOCK and
2854 2854 * VPP_SETPPLOCK requires holding the segvn lock as
2855 2855 * writer, but in this case all readers are
2856 2856 * serializing on the anon array lock.
2857 2857 */
2858 2858 if (AS_ISPGLCK(seg->s_as) && vpage != NULL &&
2859 2859 (svd->flags & MAP_NORESERVE) &&
2860 2860 !VPP_ISPPLOCK(vpage)) {
2861 2861 proc_t *p = seg->s_as->a_proc;
2862 2862 ASSERT(svd->type == MAP_PRIVATE);
2863 2863 mutex_enter(&p->p_lock);
2864 2864 if (rctl_incr_locked_mem(p, NULL, PAGESIZE,
2865 2865 1) == 0) {
2866 2866 claim = VPP_PROT(vpage) & PROT_WRITE;
2867 2867 if (page_pp_lock(pp, claim, 0)) {
2868 2868 VPP_SETPPLOCK(vpage);
2869 2869 } else {
2870 2870 rctl_decr_locked_mem(p, NULL,
2871 2871 PAGESIZE, 1);
2872 2872 }
2873 2873 }
2874 2874 mutex_exit(&p->p_lock);
2875 2875 }
2876 2876
2877 2877 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2878 2878 hat_memload(hat, addr, pp, prot, hat_flag);
2879 2879
2880 2880 if (!(hat_flag & HAT_LOAD_LOCK))
2881 2881 page_unlock(pp);
2882 2882
2883 2883 anon_array_exit(&cookie);
2884 2884 return (0);
2885 2885 }
2886 2886 }
2887 2887
2888 2888 /*
2889 2889 * Obtain the page structure via anon_getpage() if it is
2890 2890 * a private copy of an object (the result of a previous
2891 2891 * copy-on-write).
2892 2892 */
2893 2893 if (amp != NULL) {
2894 2894 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) {
2895 2895 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE,
2896 2896 seg, addr, rw, svd->cred);
2897 2897 if (err)
2898 2898 goto out;
2899 2899
2900 2900 if (svd->type == MAP_SHARED) {
2901 2901 /*
2902 2902 * If this is a shared mapping to an
2903 2903 * anon_map, then ignore the write
2904 2904 * permissions returned by anon_getpage().
2905 2905 * They apply to the private mappings
2906 2906 * of this anon_map.
2907 2907 */
2908 2908 vpprot |= PROT_WRITE;
2909 2909 }
2910 2910 opp = anon_pl[0];
2911 2911 }
2912 2912 }
2913 2913
2914 2914 /*
2915 2915 * Search the pl[] list passed in if it is from the
2916 2916 * original object (i.e., not a private copy).
2917 2917 */
2918 2918 if (opp == NULL) {
2919 2919 /*
2920 2920 * Find original page. We must be bringing it in
2921 2921 * from the list in pl[].
2922 2922 */
2923 2923 for (ppp = pl; (opp = *ppp) != NULL; ppp++) {
2924 2924 if (opp == PAGE_HANDLED)
2925 2925 continue;
2926 2926 ASSERT(opp->p_vnode == svd->vp); /* XXX */
2927 2927 if (opp->p_offset == off)
2928 2928 break;
2929 2929 }
2930 2930 if (opp == NULL) {
2931 2931 panic("segvn_faultpage not found");
2932 2932 /*NOTREACHED*/
2933 2933 }
2934 2934 *ppp = PAGE_HANDLED;
2935 2935
2936 2936 }
2937 2937
2938 2938 ASSERT(PAGE_LOCKED(opp));
2939 2939
2940 2940 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2941 2941 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0);
2942 2942
2943 2943 /*
2944 2944 * The fault is treated as a copy-on-write fault if a
2945 2945 * write occurs on a private segment and the object
2946 2946 * page (i.e., mapping) is write protected. We assume
2947 2947 * that fatal protection checks have already been made.
2948 2948 */
2949 2949
2950 2950 if (brkcow) {
2951 2951 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2952 2952 cow = !(vpprot & PROT_WRITE);
2953 2953 } else if (svd->tr_state == SEGVN_TR_ON) {
2954 2954 /*
2955 2955 * If we are doing text replication COW on first touch.
2956 2956 */
2957 2957 ASSERT(amp != NULL);
2958 2958 ASSERT(svd->vp != NULL);
2959 2959 ASSERT(rw != S_WRITE);
2960 2960 cow = (ap == NULL);
2961 2961 } else {
2962 2962 cow = 0;
2963 2963 }
2964 2964
2965 2965 /*
2966 2966 * If not a copy-on-write case load the translation
2967 2967 * and return.
2968 2968 */
2969 2969 if (cow == 0) {
2970 2970
2971 2971 /*
2972 2972 * Handle pages that have been marked for migration
2973 2973 */
2974 2974 if (lgrp_optimizations())
2975 2975 page_migrate(seg, addr, &opp, 1);
2976 2976
2977 2977 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) {
2978 2978 if (rw == S_WRITE)
2979 2979 hat_setmod(opp);
2980 2980 else if (rw != S_OTHER && !hat_ismod(opp))
2981 2981 prot &= ~PROT_WRITE;
2982 2982 }
2983 2983
2984 2984 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
2985 2985 (!svd->pageprot && svd->prot == (prot & vpprot)));
2986 2986 ASSERT(amp == NULL ||
2987 2987 svd->rcookie == HAT_INVALID_REGION_COOKIE);
2988 2988 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag,
2989 2989 svd->rcookie);
2990 2990
2991 2991 if (!(hat_flag & HAT_LOAD_LOCK))
2992 2992 page_unlock(opp);
2993 2993
2994 2994 if (anon_lock) {
2995 2995 anon_array_exit(&cookie);
2996 2996 }
2997 2997 return (0);
2998 2998 }
2999 2999
3000 3000 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3001 3001
3002 3002 hat_setref(opp);
3003 3003
3004 3004 ASSERT(amp != NULL && anon_lock);
3005 3005
3006 3006 /*
3007 3007 * Steal the page only if it isn't a private page
3008 3008 * since stealing a private page is not worth the effort.
3009 3009 */
3010 3010 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL)
3011 3011 steal = 1;
3012 3012
3013 3013 /*
3014 3014 * Steal the original page if the following conditions are true:
3015 3015 *
3016 3016 * We are low on memory, the page is not private, page is not large,
3017 3017 * not shared, not modified, not `locked' or if we have it `locked'
3018 3018 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
3019 3019 * that the page is not shared) and if it doesn't have any
3020 3020 * translations. page_struct_lock isn't needed to look at p_cowcnt
3021 3021 * and p_lckcnt because we first get exclusive lock on page.
3022 3022 */
3023 3023 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD);
3024 3024
3025 3025 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 &&
3026 3026 page_tryupgrade(opp) && !hat_ismod(opp) &&
3027 3027 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) ||
3028 3028 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 &&
3029 3029 vpage != NULL && VPP_ISPPLOCK(vpage)))) {
3030 3030 /*
3031 3031 * Check if this page has other translations
3032 3032 * after unloading our translation.
3033 3033 */
3034 3034 if (hat_page_is_mapped(opp)) {
3035 3035 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3036 3036 hat_unload(seg->s_as->a_hat, addr, PAGESIZE,
3037 3037 HAT_UNLOAD);
3038 3038 }
3039 3039
3040 3040 /*
3041 3041 * hat_unload() might sync back someone else's recent
3042 3042 * modification, so check again.
3043 3043 */
3044 3044 if (!hat_ismod(opp) && !hat_page_is_mapped(opp))
3045 3045 pageflags |= STEAL_PAGE;
3046 3046 }
3047 3047
3048 3048 /*
3049 3049 * If we have a vpage pointer, see if it indicates that we have
3050 3050 * ``locked'' the page we map -- if so, tell anon_private to
3051 3051 * transfer the locking resource to the new page.
3052 3052 *
3053 3053 * See Statement at the beginning of segvn_lockop regarding
3054 3054 * the way lockcnts/cowcnts are handled during COW.
3055 3055 *
3056 3056 */
3057 3057 if (vpage != NULL && VPP_ISPPLOCK(vpage))
3058 3058 pageflags |= LOCK_PAGE;
3059 3059
3060 3060 /*
3061 3061 * Allocate a private page and perform the copy.
3062 3062 * For MAP_NORESERVE reserve swap space now, unless this
3063 3063 * is a cow fault on an existing anon page in which case
3064 3064 * MAP_NORESERVE will have made advance reservations.
3065 3065 */
3066 3066 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) {
3067 3067 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) {
3068 3068 atomic_add_long(&svd->swresv, ptob(1));
3069 3069 atomic_add_long(&seg->s_as->a_resvsize, ptob(1));
3070 3070 } else {
3071 3071 page_unlock(opp);
3072 3072 err = ENOMEM;
3073 3073 goto out;
3074 3074 }
3075 3075 }
3076 3076 oldap = ap;
3077 3077 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3078 3078 if (pp == NULL) {
3079 3079 err = ENOMEM; /* out of swap space */
3080 3080 goto out;
3081 3081 }
3082 3082
3083 3083 /*
3084 3084 * If we copied away from an anonymous page, then
3085 3085 * we are one step closer to freeing up an anon slot.
3086 3086 *
3087 3087 * NOTE: The original anon slot must be released while
3088 3088 * holding the "anon_map" lock. This is necessary to prevent
3089 3089 * other threads from obtaining a pointer to the anon slot
3090 3090 * which may be freed if its "refcnt" is 1.
3091 3091 */
3092 3092 if (oldap != NULL)
3093 3093 anon_decref(oldap);
3094 3094
3095 3095 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3096 3096
3097 3097 /*
3098 3098 * Handle pages that have been marked for migration
3099 3099 */
3100 3100 if (lgrp_optimizations())
3101 3101 page_migrate(seg, addr, &pp, 1);
3102 3102
3103 3103 ASSERT(pp->p_szc == 0);
3104 3104
3105 3105 ASSERT(!IS_VMODSORT(pp->p_vnode));
3106 3106 if (enable_mbit_wa) {
3107 3107 if (rw == S_WRITE)
3108 3108 hat_setmod(pp);
3109 3109 else if (!hat_ismod(pp))
3110 3110 prot &= ~PROT_WRITE;
3111 3111 }
3112 3112
3113 3113 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3114 3114 hat_memload(hat, addr, pp, prot, hat_flag);
3115 3115
3116 3116 if (!(hat_flag & HAT_LOAD_LOCK))
3117 3117 page_unlock(pp);
3118 3118
3119 3119 ASSERT(anon_lock);
3120 3120 anon_array_exit(&cookie);
3121 3121 return (0);
3122 3122 out:
3123 3123 if (anon_lock)
3124 3124 anon_array_exit(&cookie);
3125 3125
3126 3126 if (type == F_SOFTLOCK) {
3127 3127 atomic_dec_ulong((ulong_t *)&svd->softlockcnt);
3128 3128 }
3129 3129 return (FC_MAKE_ERR(err));
3130 3130 }
3131 3131
3132 3132 /*
3133 3133 * relocate a bunch of smaller targ pages into one large repl page. all targ
3134 3134 * pages must be complete pages smaller than replacement pages.
3135 3135 * it's assumed that no page's szc can change since they are all PAGESIZE or
3136 3136 * complete large pages locked SHARED.
3137 3137 */
3138 3138 static void
3139 3139 segvn_relocate_pages(page_t **targ, page_t *replacement)
3140 3140 {
3141 3141 page_t *pp;
3142 3142 pgcnt_t repl_npgs, curnpgs;
3143 3143 pgcnt_t i;
3144 3144 uint_t repl_szc = replacement->p_szc;
3145 3145 page_t *first_repl = replacement;
3146 3146 page_t *repl;
3147 3147 spgcnt_t npgs;
3148 3148
3149 3149 VM_STAT_ADD(segvnvmstats.relocatepages[0]);
3150 3150
3151 3151 ASSERT(repl_szc != 0);
3152 3152 npgs = repl_npgs = page_get_pagecnt(repl_szc);
3153 3153
3154 3154 i = 0;
3155 3155 while (repl_npgs) {
3156 3156 spgcnt_t nreloc;
3157 3157 int err;
3158 3158 ASSERT(replacement != NULL);
3159 3159 pp = targ[i];
3160 3160 ASSERT(pp->p_szc < repl_szc);
3161 3161 ASSERT(PAGE_EXCL(pp));
3162 3162 ASSERT(!PP_ISFREE(pp));
3163 3163 curnpgs = page_get_pagecnt(pp->p_szc);
3164 3164 if (curnpgs == 1) {
3165 3165 VM_STAT_ADD(segvnvmstats.relocatepages[1]);
3166 3166 repl = replacement;
3167 3167 page_sub(&replacement, repl);
3168 3168 ASSERT(PAGE_EXCL(repl));
3169 3169 ASSERT(!PP_ISFREE(repl));
3170 3170 ASSERT(repl->p_szc == repl_szc);
3171 3171 } else {
3172 3172 page_t *repl_savepp;
3173 3173 int j;
3174 3174 VM_STAT_ADD(segvnvmstats.relocatepages[2]);
3175 3175 repl_savepp = replacement;
3176 3176 for (j = 0; j < curnpgs; j++) {
3177 3177 repl = replacement;
3178 3178 page_sub(&replacement, repl);
3179 3179 ASSERT(PAGE_EXCL(repl));
3180 3180 ASSERT(!PP_ISFREE(repl));
3181 3181 ASSERT(repl->p_szc == repl_szc);
3182 3182 ASSERT(page_pptonum(targ[i + j]) ==
3183 3183 page_pptonum(targ[i]) + j);
3184 3184 }
3185 3185 repl = repl_savepp;
3186 3186 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs));
3187 3187 }
3188 3188 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL);
3189 3189 if (err || nreloc != curnpgs) {
3190 3190 panic("segvn_relocate_pages: "
3191 3191 "page_relocate failed err=%d curnpgs=%ld "
3192 3192 "nreloc=%ld", err, curnpgs, nreloc);
3193 3193 }
3194 3194 ASSERT(curnpgs <= repl_npgs);
3195 3195 repl_npgs -= curnpgs;
3196 3196 i += curnpgs;
3197 3197 }
3198 3198 ASSERT(replacement == NULL);
3199 3199
3200 3200 repl = first_repl;
3201 3201 repl_npgs = npgs;
3202 3202 for (i = 0; i < repl_npgs; i++) {
3203 3203 ASSERT(PAGE_EXCL(repl));
3204 3204 ASSERT(!PP_ISFREE(repl));
3205 3205 targ[i] = repl;
3206 3206 page_downgrade(targ[i]);
3207 3207 repl++;
3208 3208 }
3209 3209 }
3210 3210
3211 3211 /*
3212 3212 * Check if all pages in ppa array are complete smaller than szc pages and
3213 3213 * their roots will still be aligned relative to their current size if the
3214 3214 * entire ppa array is relocated into one szc page. If these conditions are
3215 3215 * not met return 0.
3216 3216 *
3217 3217 * If all pages are properly aligned attempt to upgrade their locks
3218 3218 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3219 3219 * upgrdfail was set to 0 by caller.
3220 3220 *
3221 3221 * Return 1 if all pages are aligned and locked exclusively.
3222 3222 *
3223 3223 * If all pages in ppa array happen to be physically contiguous to make one
3224 3224 * szc page and all exclusive locks are successfully obtained promote the page
3225 3225 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3226 3226 */
3227 3227 static int
3228 3228 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)
3229 3229 {
3230 3230 page_t *pp;
3231 3231 pfn_t pfn;
3232 3232 pgcnt_t totnpgs = page_get_pagecnt(szc);
3233 3233 pfn_t first_pfn;
3234 3234 int contig = 1;
3235 3235 pgcnt_t i;
3236 3236 pgcnt_t j;
3237 3237 uint_t curszc;
3238 3238 pgcnt_t curnpgs;
3239 3239 int root = 0;
3240 3240
3241 3241 ASSERT(szc > 0);
3242 3242
3243 3243 VM_STAT_ADD(segvnvmstats.fullszcpages[0]);
3244 3244
3245 3245 for (i = 0; i < totnpgs; i++) {
3246 3246 pp = ppa[i];
3247 3247 ASSERT(PAGE_SHARED(pp));
3248 3248 ASSERT(!PP_ISFREE(pp));
3249 3249 pfn = page_pptonum(pp);
3250 3250 if (i == 0) {
3251 3251 if (!IS_P2ALIGNED(pfn, totnpgs)) {
3252 3252 contig = 0;
3253 3253 } else {
3254 3254 first_pfn = pfn;
3255 3255 }
3256 3256 } else if (contig && pfn != first_pfn + i) {
3257 3257 contig = 0;
3258 3258 }
3259 3259 if (pp->p_szc == 0) {
3260 3260 if (root) {
3261 3261 VM_STAT_ADD(segvnvmstats.fullszcpages[1]);
3262 3262 return (0);
3263 3263 }
3264 3264 } else if (!root) {
3265 3265 if ((curszc = pp->p_szc) >= szc) {
3266 3266 VM_STAT_ADD(segvnvmstats.fullszcpages[2]);
3267 3267 return (0);
3268 3268 }
3269 3269 if (curszc == 0) {
3270 3270 /*
3271 3271 * p_szc changed means we don't have all pages
3272 3272 * locked. return failure.
3273 3273 */
3274 3274 VM_STAT_ADD(segvnvmstats.fullszcpages[3]);
3275 3275 return (0);
3276 3276 }
3277 3277 curnpgs = page_get_pagecnt(curszc);
3278 3278 if (!IS_P2ALIGNED(pfn, curnpgs) ||
3279 3279 !IS_P2ALIGNED(i, curnpgs)) {
3280 3280 VM_STAT_ADD(segvnvmstats.fullszcpages[4]);
3281 3281 return (0);
3282 3282 }
3283 3283 root = 1;
3284 3284 } else {
3285 3285 ASSERT(i > 0);
3286 3286 VM_STAT_ADD(segvnvmstats.fullszcpages[5]);
3287 3287 if (pp->p_szc != curszc) {
3288 3288 VM_STAT_ADD(segvnvmstats.fullszcpages[6]);
3289 3289 return (0);
3290 3290 }
3291 3291 if (pfn - 1 != page_pptonum(ppa[i - 1])) {
3292 3292 panic("segvn_full_szcpages: "
3293 3293 "large page not physically contiguous");
3294 3294 }
3295 3295 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) {
3296 3296 root = 0;
3297 3297 }
3298 3298 }
3299 3299 }
3300 3300
3301 3301 for (i = 0; i < totnpgs; i++) {
3302 3302 ASSERT(ppa[i]->p_szc < szc);
3303 3303 if (!page_tryupgrade(ppa[i])) {
3304 3304 for (j = 0; j < i; j++) {
3305 3305 page_downgrade(ppa[j]);
3306 3306 }
3307 3307 *pszc = ppa[i]->p_szc;
3308 3308 *upgrdfail = 1;
3309 3309 VM_STAT_ADD(segvnvmstats.fullszcpages[7]);
3310 3310 return (0);
3311 3311 }
3312 3312 }
3313 3313
3314 3314 /*
3315 3315 * When a page is put a free cachelist its szc is set to 0. if file
3316 3316 * system reclaimed pages from cachelist targ pages will be physically
3317 3317 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3318 3318 * pages without any relocations.
3319 3319 * To avoid any hat issues with previous small mappings
3320 3320 * hat_pageunload() the target pages first.
3321 3321 */
3322 3322 if (contig) {
3323 3323 VM_STAT_ADD(segvnvmstats.fullszcpages[8]);
3324 3324 for (i = 0; i < totnpgs; i++) {
3325 3325 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD);
3326 3326 }
3327 3327 for (i = 0; i < totnpgs; i++) {
3328 3328 ppa[i]->p_szc = szc;
3329 3329 }
3330 3330 for (i = 0; i < totnpgs; i++) {
3331 3331 ASSERT(PAGE_EXCL(ppa[i]));
3332 3332 page_downgrade(ppa[i]);
3333 3333 }
3334 3334 if (pszc != NULL) {
3335 3335 *pszc = szc;
3336 3336 }
3337 3337 }
3338 3338 VM_STAT_ADD(segvnvmstats.fullszcpages[9]);
3339 3339 return (1);
3340 3340 }
3341 3341
3342 3342 /*
3343 3343 * Create physically contiguous pages for [vp, off] - [vp, off +
3344 3344 * page_size(szc)) range and for private segment return them in ppa array.
3345 3345 * Pages are created either via IO or relocations.
3346 3346 *
3347 3347 * Return 1 on success and 0 on failure.
3348 3348 *
3349 3349 * If physically contiguous pages already exist for this range return 1 without
3350 3350 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3351 3351 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3352 3352 */
3353 3353
3354 3354 static int
3355 3355 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
3356 3356 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc,
3357 3357 int *downsize)
3358 3358
3359 3359 {
3360 3360 page_t *pplist = *ppplist;
3361 3361 size_t pgsz = page_get_pagesize(szc);
3362 3362 pgcnt_t pages = btop(pgsz);
3363 3363 ulong_t start_off = off;
3364 3364 u_offset_t eoff = off + pgsz;
3365 3365 spgcnt_t nreloc;
3366 3366 u_offset_t io_off = off;
3367 3367 size_t io_len;
3368 3368 page_t *io_pplist = NULL;
3369 3369 page_t *done_pplist = NULL;
3370 3370 pgcnt_t pgidx = 0;
3371 3371 page_t *pp;
3372 3372 page_t *newpp;
3373 3373 page_t *targpp;
3374 3374 int io_err = 0;
3375 3375 int i;
3376 3376 pfn_t pfn;
3377 3377 ulong_t ppages;
3378 3378 page_t *targ_pplist = NULL;
3379 3379 page_t *repl_pplist = NULL;
3380 3380 page_t *tmp_pplist;
3381 3381 int nios = 0;
3382 3382 uint_t pszc;
3383 3383 struct vattr va;
3384 3384
3385 3385 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]);
3386 3386
3387 3387 ASSERT(szc != 0);
3388 3388 ASSERT(pplist->p_szc == szc);
3389 3389
3390 3390 /*
3391 3391 * downsize will be set to 1 only if we fail to lock pages. this will
3392 3392 * allow subsequent faults to try to relocate the page again. If we
3393 3393 * fail due to misalignment don't downsize and let the caller map the
3394 3394 * whole region with small mappings to avoid more faults into the area
3395 3395 * where we can't get large pages anyway.
3396 3396 */
3397 3397 *downsize = 0;
3398 3398
3399 3399 while (off < eoff) {
3400 3400 newpp = pplist;
3401 3401 ASSERT(newpp != NULL);
3402 3402 ASSERT(PAGE_EXCL(newpp));
3403 3403 ASSERT(!PP_ISFREE(newpp));
3404 3404 /*
3405 3405 * we pass NULL for nrelocp to page_lookup_create()
3406 3406 * so that it doesn't relocate. We relocate here
3407 3407 * later only after we make sure we can lock all
3408 3408 * pages in the range we handle and they are all
3409 3409 * aligned.
3410 3410 */
3411 3411 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0);
3412 3412 ASSERT(pp != NULL);
3413 3413 ASSERT(!PP_ISFREE(pp));
3414 3414 ASSERT(pp->p_vnode == vp);
3415 3415 ASSERT(pp->p_offset == off);
3416 3416 if (pp == newpp) {
3417 3417 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]);
3418 3418 page_sub(&pplist, pp);
3419 3419 ASSERT(PAGE_EXCL(pp));
3420 3420 ASSERT(page_iolock_assert(pp));
3421 3421 page_list_concat(&io_pplist, &pp);
3422 3422 off += PAGESIZE;
3423 3423 continue;
3424 3424 }
3425 3425 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]);
3426 3426 pfn = page_pptonum(pp);
3427 3427 pszc = pp->p_szc;
3428 3428 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL &&
3429 3429 IS_P2ALIGNED(pfn, pages)) {
3430 3430 ASSERT(repl_pplist == NULL);
3431 3431 ASSERT(done_pplist == NULL);
3432 3432 ASSERT(pplist == *ppplist);
3433 3433 page_unlock(pp);
3434 3434 page_free_replacement_page(pplist);
3435 3435 page_create_putback(pages);
3436 3436 *ppplist = NULL;
3437 3437 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]);
3438 3438 return (1);
3439 3439 }
3440 3440 if (pszc >= szc) {
3441 3441 page_unlock(pp);
3442 3442 segvn_faultvnmpss_align_err1++;
3443 3443 goto out;
3444 3444 }
3445 3445 ppages = page_get_pagecnt(pszc);
3446 3446 if (!IS_P2ALIGNED(pfn, ppages)) {
3447 3447 ASSERT(pszc > 0);
3448 3448 /*
3449 3449 * sizing down to pszc won't help.
3450 3450 */
3451 3451 page_unlock(pp);
3452 3452 segvn_faultvnmpss_align_err2++;
3453 3453 goto out;
3454 3454 }
3455 3455 pfn = page_pptonum(newpp);
3456 3456 if (!IS_P2ALIGNED(pfn, ppages)) {
3457 3457 ASSERT(pszc > 0);
3458 3458 /*
3459 3459 * sizing down to pszc won't help.
3460 3460 */
3461 3461 page_unlock(pp);
3462 3462 segvn_faultvnmpss_align_err3++;
3463 3463 goto out;
3464 3464 }
3465 3465 if (!PAGE_EXCL(pp)) {
3466 3466 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]);
3467 3467 page_unlock(pp);
3468 3468 *downsize = 1;
3469 3469 *ret_pszc = pp->p_szc;
3470 3470 goto out;
3471 3471 }
3472 3472 targpp = pp;
3473 3473 if (io_pplist != NULL) {
3474 3474 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]);
3475 3475 io_len = off - io_off;
3476 3476 /*
3477 3477 * Some file systems like NFS don't check EOF
3478 3478 * conditions in VOP_PAGEIO(). Check it here
3479 3479 * now that pages are locked SE_EXCL. Any file
3480 3480 * truncation will wait until the pages are
3481 3481 * unlocked so no need to worry that file will
3482 3482 * be truncated after we check its size here.
3483 3483 * XXX fix NFS to remove this check.
3484 3484 */
3485 3485 va.va_mask = AT_SIZE;
3486 3486 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) {
3487 3487 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]);
3488 3488 page_unlock(targpp);
3489 3489 goto out;
3490 3490 }
3491 3491 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3492 3492 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]);
3493 3493 *downsize = 1;
3494 3494 *ret_pszc = 0;
3495 3495 page_unlock(targpp);
3496 3496 goto out;
3497 3497 }
3498 3498 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3499 3499 B_READ, svd->cred, NULL);
3500 3500 if (io_err) {
3501 3501 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]);
3502 3502 page_unlock(targpp);
3503 3503 if (io_err == EDEADLK) {
3504 3504 segvn_vmpss_pageio_deadlk_err++;
3505 3505 }
3506 3506 goto out;
3507 3507 }
3508 3508 nios++;
3509 3509 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]);
3510 3510 while (io_pplist != NULL) {
3511 3511 pp = io_pplist;
3512 3512 page_sub(&io_pplist, pp);
3513 3513 ASSERT(page_iolock_assert(pp));
3514 3514 page_io_unlock(pp);
3515 3515 pgidx = (pp->p_offset - start_off) >>
3516 3516 PAGESHIFT;
3517 3517 ASSERT(pgidx < pages);
3518 3518 ppa[pgidx] = pp;
3519 3519 page_list_concat(&done_pplist, &pp);
3520 3520 }
3521 3521 }
3522 3522 pp = targpp;
3523 3523 ASSERT(PAGE_EXCL(pp));
3524 3524 ASSERT(pp->p_szc <= pszc);
3525 3525 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) {
3526 3526 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]);
3527 3527 page_unlock(pp);
3528 3528 *downsize = 1;
3529 3529 *ret_pszc = pp->p_szc;
3530 3530 goto out;
3531 3531 }
3532 3532 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]);
3533 3533 /*
3534 3534 * page szc chould have changed before the entire group was
3535 3535 * locked. reread page szc.
3536 3536 */
3537 3537 pszc = pp->p_szc;
3538 3538 ppages = page_get_pagecnt(pszc);
3539 3539
3540 3540 /* link just the roots */
3541 3541 page_list_concat(&targ_pplist, &pp);
3542 3542 page_sub(&pplist, newpp);
3543 3543 page_list_concat(&repl_pplist, &newpp);
3544 3544 off += PAGESIZE;
3545 3545 while (--ppages != 0) {
3546 3546 newpp = pplist;
3547 3547 page_sub(&pplist, newpp);
3548 3548 off += PAGESIZE;
3549 3549 }
3550 3550 io_off = off;
3551 3551 }
3552 3552 if (io_pplist != NULL) {
3553 3553 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]);
3554 3554 io_len = eoff - io_off;
3555 3555 va.va_mask = AT_SIZE;
3556 3556 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) {
3557 3557 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]);
3558 3558 goto out;
3559 3559 }
3560 3560 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3561 3561 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]);
3562 3562 *downsize = 1;
3563 3563 *ret_pszc = 0;
3564 3564 goto out;
3565 3565 }
3566 3566 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3567 3567 B_READ, svd->cred, NULL);
3568 3568 if (io_err) {
3569 3569 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]);
3570 3570 if (io_err == EDEADLK) {
3571 3571 segvn_vmpss_pageio_deadlk_err++;
3572 3572 }
3573 3573 goto out;
3574 3574 }
3575 3575 nios++;
3576 3576 while (io_pplist != NULL) {
3577 3577 pp = io_pplist;
3578 3578 page_sub(&io_pplist, pp);
3579 3579 ASSERT(page_iolock_assert(pp));
3580 3580 page_io_unlock(pp);
3581 3581 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3582 3582 ASSERT(pgidx < pages);
3583 3583 ppa[pgidx] = pp;
3584 3584 }
3585 3585 }
3586 3586 /*
3587 3587 * we're now bound to succeed or panic.
3588 3588 * remove pages from done_pplist. it's not needed anymore.
3589 3589 */
3590 3590 while (done_pplist != NULL) {
3591 3591 pp = done_pplist;
3592 3592 page_sub(&done_pplist, pp);
3593 3593 }
3594 3594 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]);
3595 3595 ASSERT(pplist == NULL);
3596 3596 *ppplist = NULL;
3597 3597 while (targ_pplist != NULL) {
3598 3598 int ret;
3599 3599 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]);
3600 3600 ASSERT(repl_pplist);
3601 3601 pp = targ_pplist;
3602 3602 page_sub(&targ_pplist, pp);
3603 3603 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3604 3604 newpp = repl_pplist;
3605 3605 page_sub(&repl_pplist, newpp);
3606 3606 #ifdef DEBUG
3607 3607 pfn = page_pptonum(pp);
3608 3608 pszc = pp->p_szc;
3609 3609 ppages = page_get_pagecnt(pszc);
3610 3610 ASSERT(IS_P2ALIGNED(pfn, ppages));
3611 3611 pfn = page_pptonum(newpp);
3612 3612 ASSERT(IS_P2ALIGNED(pfn, ppages));
3613 3613 ASSERT(P2PHASE(pfn, pages) == pgidx);
3614 3614 #endif
3615 3615 nreloc = 0;
3616 3616 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL);
3617 3617 if (ret != 0 || nreloc == 0) {
3618 3618 panic("segvn_fill_vp_pages: "
3619 3619 "page_relocate failed");
3620 3620 }
3621 3621 pp = newpp;
3622 3622 while (nreloc-- != 0) {
3623 3623 ASSERT(PAGE_EXCL(pp));
3624 3624 ASSERT(pp->p_vnode == vp);
3625 3625 ASSERT(pgidx ==
3626 3626 ((pp->p_offset - start_off) >> PAGESHIFT));
3627 3627 ppa[pgidx++] = pp;
3628 3628 pp++;
3629 3629 }
3630 3630 }
3631 3631
3632 3632 if (svd->type == MAP_PRIVATE) {
3633 3633 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]);
3634 3634 for (i = 0; i < pages; i++) {
3635 3635 ASSERT(ppa[i] != NULL);
3636 3636 ASSERT(PAGE_EXCL(ppa[i]));
3637 3637 ASSERT(ppa[i]->p_vnode == vp);
3638 3638 ASSERT(ppa[i]->p_offset ==
3639 3639 start_off + (i << PAGESHIFT));
3640 3640 page_downgrade(ppa[i]);
3641 3641 }
3642 3642 ppa[pages] = NULL;
3643 3643 } else {
3644 3644 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]);
3645 3645 /*
3646 3646 * the caller will still call VOP_GETPAGE() for shared segments
3647 3647 * to check FS write permissions. For private segments we map
3648 3648 * file read only anyway. so no VOP_GETPAGE is needed.
3649 3649 */
3650 3650 for (i = 0; i < pages; i++) {
3651 3651 ASSERT(ppa[i] != NULL);
3652 3652 ASSERT(PAGE_EXCL(ppa[i]));
3653 3653 ASSERT(ppa[i]->p_vnode == vp);
3654 3654 ASSERT(ppa[i]->p_offset ==
3655 3655 start_off + (i << PAGESHIFT));
3656 3656 page_unlock(ppa[i]);
3657 3657 }
3658 3658 ppa[0] = NULL;
3659 3659 }
3660 3660
3661 3661 return (1);
3662 3662 out:
3663 3663 /*
3664 3664 * Do the cleanup. Unlock target pages we didn't relocate. They are
3665 3665 * linked on targ_pplist by root pages. reassemble unused replacement
3666 3666 * and io pages back to pplist.
3667 3667 */
3668 3668 if (io_pplist != NULL) {
3669 3669 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]);
3670 3670 pp = io_pplist;
3671 3671 do {
3672 3672 ASSERT(pp->p_vnode == vp);
3673 3673 ASSERT(pp->p_offset == io_off);
3674 3674 ASSERT(page_iolock_assert(pp));
3675 3675 page_io_unlock(pp);
3676 3676 page_hashout(pp, NULL);
3677 3677 io_off += PAGESIZE;
3678 3678 } while ((pp = pp->p_next) != io_pplist);
3679 3679 page_list_concat(&io_pplist, &pplist);
3680 3680 pplist = io_pplist;
3681 3681 }
3682 3682 tmp_pplist = NULL;
3683 3683 while (targ_pplist != NULL) {
3684 3684 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]);
3685 3685 pp = targ_pplist;
3686 3686 ASSERT(PAGE_EXCL(pp));
3687 3687 page_sub(&targ_pplist, pp);
3688 3688
3689 3689 pszc = pp->p_szc;
3690 3690 ppages = page_get_pagecnt(pszc);
3691 3691 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3692 3692
3693 3693 if (pszc != 0) {
3694 3694 group_page_unlock(pp);
3695 3695 }
3696 3696 page_unlock(pp);
3697 3697
3698 3698 pp = repl_pplist;
3699 3699 ASSERT(pp != NULL);
3700 3700 ASSERT(PAGE_EXCL(pp));
3701 3701 ASSERT(pp->p_szc == szc);
3702 3702 page_sub(&repl_pplist, pp);
3703 3703
3704 3704 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3705 3705
3706 3706 /* relink replacement page */
3707 3707 page_list_concat(&tmp_pplist, &pp);
3708 3708 while (--ppages != 0) {
3709 3709 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]);
3710 3710 pp++;
3711 3711 ASSERT(PAGE_EXCL(pp));
3712 3712 ASSERT(pp->p_szc == szc);
3713 3713 page_list_concat(&tmp_pplist, &pp);
3714 3714 }
3715 3715 }
3716 3716 if (tmp_pplist != NULL) {
3717 3717 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]);
3718 3718 page_list_concat(&tmp_pplist, &pplist);
3719 3719 pplist = tmp_pplist;
3720 3720 }
3721 3721 /*
3722 3722 * at this point all pages are either on done_pplist or
3723 3723 * pplist. They can't be all on done_pplist otherwise
3724 3724 * we'd've been done.
3725 3725 */
3726 3726 ASSERT(pplist != NULL);
3727 3727 if (nios != 0) {
3728 3728 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]);
3729 3729 pp = pplist;
3730 3730 do {
3731 3731 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]);
3732 3732 ASSERT(pp->p_szc == szc);
3733 3733 ASSERT(PAGE_EXCL(pp));
3734 3734 ASSERT(pp->p_vnode != vp);
3735 3735 pp->p_szc = 0;
3736 3736 } while ((pp = pp->p_next) != pplist);
3737 3737
3738 3738 pp = done_pplist;
3739 3739 do {
3740 3740 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]);
3741 3741 ASSERT(pp->p_szc == szc);
3742 3742 ASSERT(PAGE_EXCL(pp));
3743 3743 ASSERT(pp->p_vnode == vp);
3744 3744 pp->p_szc = 0;
3745 3745 } while ((pp = pp->p_next) != done_pplist);
3746 3746
3747 3747 while (pplist != NULL) {
3748 3748 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]);
3749 3749 pp = pplist;
3750 3750 page_sub(&pplist, pp);
3751 3751 page_free(pp, 0);
3752 3752 }
3753 3753
3754 3754 while (done_pplist != NULL) {
3755 3755 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]);
3756 3756 pp = done_pplist;
3757 3757 page_sub(&done_pplist, pp);
3758 3758 page_unlock(pp);
3759 3759 }
3760 3760 *ppplist = NULL;
3761 3761 return (0);
3762 3762 }
3763 3763 ASSERT(pplist == *ppplist);
3764 3764 if (io_err) {
3765 3765 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]);
3766 3766 /*
3767 3767 * don't downsize on io error.
3768 3768 * see if vop_getpage succeeds.
3769 3769 * pplist may still be used in this case
3770 3770 * for relocations.
3771 3771 */
3772 3772 return (0);
3773 3773 }
3774 3774 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]);
3775 3775 page_free_replacement_page(pplist);
3776 3776 page_create_putback(pages);
3777 3777 *ppplist = NULL;
3778 3778 return (0);
3779 3779 }
3780 3780
3781 3781 int segvn_anypgsz = 0;
3782 3782
3783 3783 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3784 3784 if ((type) == F_SOFTLOCK) { \
3785 3785 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3786 3786 -(pages)); \
3787 3787 }
3788 3788
3789 3789 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3790 3790 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3791 3791 if ((rw) == S_WRITE) { \
3792 3792 for (i = 0; i < (pages); i++) { \
3793 3793 ASSERT((ppa)[i]->p_vnode == \
3794 3794 (ppa)[0]->p_vnode); \
3795 3795 hat_setmod((ppa)[i]); \
3796 3796 } \
3797 3797 } else if ((rw) != S_OTHER && \
3798 3798 ((prot) & (vpprot) & PROT_WRITE)) { \
3799 3799 for (i = 0; i < (pages); i++) { \
3800 3800 ASSERT((ppa)[i]->p_vnode == \
3801 3801 (ppa)[0]->p_vnode); \
3802 3802 if (!hat_ismod((ppa)[i])) { \
3803 3803 prot &= ~PROT_WRITE; \
3804 3804 break; \
3805 3805 } \
3806 3806 } \
3807 3807 } \
3808 3808 }
3809 3809
3810 3810 #ifdef VM_STATS
3811 3811
3812 3812 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3813 3813 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3814 3814
3815 3815 #else /* VM_STATS */
3816 3816
3817 3817 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3818 3818
3819 3819 #endif
3820 3820
3821 3821 static faultcode_t
3822 3822 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
3823 3823 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
3824 3824 caddr_t eaddr, int brkcow)
3825 3825 {
3826 3826 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
3827 3827 struct anon_map *amp = svd->amp;
3828 3828 uchar_t segtype = svd->type;
3829 3829 uint_t szc = seg->s_szc;
3830 3830 size_t pgsz = page_get_pagesize(szc);
3831 3831 size_t maxpgsz = pgsz;
3832 3832 pgcnt_t pages = btop(pgsz);
3833 3833 pgcnt_t maxpages = pages;
3834 3834 size_t ppasize = (pages + 1) * sizeof (page_t *);
3835 3835 caddr_t a = lpgaddr;
3836 3836 caddr_t maxlpgeaddr = lpgeaddr;
3837 3837 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3838 3838 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3839 3839 struct vpage *vpage = (svd->vpage != NULL) ?
3840 3840 &svd->vpage[seg_page(seg, a)] : NULL;
3841 3841 vnode_t *vp = svd->vp;
3842 3842 page_t **ppa;
3843 3843 uint_t pszc;
3844 3844 size_t ppgsz;
3845 3845 pgcnt_t ppages;
3846 3846 faultcode_t err = 0;
3847 3847 int ierr;
3848 3848 int vop_size_err = 0;
3849 3849 uint_t protchk, prot, vpprot;
3850 3850 ulong_t i;
3851 3851 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3852 3852 anon_sync_obj_t an_cookie;
3853 3853 enum seg_rw arw;
3854 3854 int alloc_failed = 0;
3855 3855 int adjszc_chk;
3856 3856 struct vattr va;
3857 3857 int xhat = 0;
3858 3858 page_t *pplist;
3859 3859 pfn_t pfn;
3860 3860 int physcontig;
3861 3861 int upgrdfail;
3862 3862 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3863 3863 int tron = (svd->tr_state == SEGVN_TR_ON);
3864 3864
3865 3865 ASSERT(szc != 0);
3866 3866 ASSERT(vp != NULL);
3867 3867 ASSERT(brkcow == 0 || amp != NULL);
3868 3868 ASSERT(tron == 0 || amp != NULL);
3869 3869 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3870 3870 ASSERT(!(svd->flags & MAP_NORESERVE));
3871 3871 ASSERT(type != F_SOFTUNLOCK);
3872 3872 ASSERT(IS_P2ALIGNED(a, maxpgsz));
3873 3873 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3874 3874 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3875 3875 ASSERT(seg->s_szc < NBBY * sizeof (int));
3876 3876 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3877 3877 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3878 3878
3879 3879 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]);
3880 3880 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]);
3881 3881
3882 3882 if (svd->flags & MAP_TEXT) {
3883 3883 hat_flag |= HAT_LOAD_TEXT;
3884 3884 }
3885 3885
3886 3886 if (svd->pageprot) {
3887 3887 switch (rw) {
3888 3888 case S_READ:
3889 3889 protchk = PROT_READ;
3890 3890 break;
3891 3891 case S_WRITE:
3892 3892 protchk = PROT_WRITE;
3893 3893 break;
3894 3894 case S_EXEC:
3895 3895 protchk = PROT_EXEC;
3896 3896 break;
3897 3897 case S_OTHER:
3898 3898 default:
3899 3899 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3900 3900 break;
3901 3901 }
3902 3902 } else {
3903 3903 prot = svd->prot;
3904 3904 /* caller has already done segment level protection check. */
3905 3905 }
3906 3906
3907 3907 if (seg->s_as->a_hat != hat) {
3908 3908 xhat = 1;
3909 3909 }
3910 3910
3911 3911 if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3912 3912 SEGVN_VMSTAT_FLTVNPAGES(2);
3913 3913 arw = S_READ;
3914 3914 } else {
3915 3915 arw = rw;
3916 3916 }
3917 3917
3918 3918 ppa = kmem_alloc(ppasize, KM_SLEEP);
3919 3919
3920 3920 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3921 3921
3922 3922 for (;;) {
3923 3923 adjszc_chk = 0;
3924 3924 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3925 3925 if (adjszc_chk) {
3926 3926 while (szc < seg->s_szc) {
3927 3927 uintptr_t e;
3928 3928 uint_t tszc;
3929 3929 tszc = segvn_anypgsz_vnode ? szc + 1 :
3930 3930 seg->s_szc;
3931 3931 ppgsz = page_get_pagesize(tszc);
3932 3932 if (!IS_P2ALIGNED(a, ppgsz) ||
3933 3933 ((alloc_failed >> tszc) & 0x1)) {
3934 3934 break;
3935 3935 }
3936 3936 SEGVN_VMSTAT_FLTVNPAGES(4);
3937 3937 szc = tszc;
3938 3938 pgsz = ppgsz;
3939 3939 pages = btop(pgsz);
3940 3940 e = P2ROUNDUP((uintptr_t)eaddr, pgsz);
3941 3941 lpgeaddr = (caddr_t)e;
3942 3942 }
3943 3943 }
3944 3944
3945 3945 again:
3946 3946 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) {
3947 3947 ASSERT(IS_P2ALIGNED(aindx, maxpages));
3948 3948 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3949 3949 anon_array_enter(amp, aindx, &an_cookie);
3950 3950 if (anon_get_ptr(amp->ahp, aindx) != NULL) {
3951 3951 SEGVN_VMSTAT_FLTVNPAGES(5);
3952 3952 ASSERT(anon_pages(amp->ahp, aindx,
3953 3953 maxpages) == maxpages);
3954 3954 anon_array_exit(&an_cookie);
3955 3955 ANON_LOCK_EXIT(&->a_rwlock);
3956 3956 err = segvn_fault_anonpages(hat, seg,
3957 3957 a, a + maxpgsz, type, rw,
3958 3958 MAX(a, addr),
3959 3959 MIN(a + maxpgsz, eaddr), brkcow);
3960 3960 if (err != 0) {
3961 3961 SEGVN_VMSTAT_FLTVNPAGES(6);
3962 3962 goto out;
3963 3963 }
3964 3964 if (szc < seg->s_szc) {
3965 3965 szc = seg->s_szc;
3966 3966 pgsz = maxpgsz;
3967 3967 pages = maxpages;
3968 3968 lpgeaddr = maxlpgeaddr;
3969 3969 }
3970 3970 goto next;
3971 3971 } else {
3972 3972 ASSERT(anon_pages(amp->ahp, aindx,
3973 3973 maxpages) == 0);
3974 3974 SEGVN_VMSTAT_FLTVNPAGES(7);
3975 3975 anon_array_exit(&an_cookie);
3976 3976 ANON_LOCK_EXIT(&->a_rwlock);
3977 3977 }
3978 3978 }
3979 3979 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz));
3980 3980 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz));
3981 3981
3982 3982 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
3983 3983 ASSERT(vpage != NULL);
3984 3984 prot = VPP_PROT(vpage);
3985 3985 ASSERT(sameprot(seg, a, maxpgsz));
3986 3986 if ((prot & protchk) == 0) {
3987 3987 SEGVN_VMSTAT_FLTVNPAGES(8);
3988 3988 err = FC_PROT;
3989 3989 goto out;
3990 3990 }
3991 3991 }
3992 3992 if (type == F_SOFTLOCK) {
3993 3993 atomic_add_long((ulong_t *)&svd->softlockcnt,
3994 3994 pages);
3995 3995 }
3996 3996
3997 3997 pplist = NULL;
3998 3998 physcontig = 0;
3999 3999 ppa[0] = NULL;
4000 4000 if (!brkcow && !tron && szc &&
4001 4001 !page_exists_physcontig(vp, off, szc,
4002 4002 segtype == MAP_PRIVATE ? ppa : NULL)) {
4003 4003 SEGVN_VMSTAT_FLTVNPAGES(9);
4004 4004 if (page_alloc_pages(vp, seg, a, &pplist, NULL,
4005 4005 szc, 0, 0) && type != F_SOFTLOCK) {
4006 4006 SEGVN_VMSTAT_FLTVNPAGES(10);
4007 4007 pszc = 0;
4008 4008 ierr = -1;
4009 4009 alloc_failed |= (1 << szc);
4010 4010 break;
4011 4011 }
4012 4012 if (pplist != NULL &&
4013 4013 vp->v_mpssdata == SEGVN_PAGEIO) {
4014 4014 int downsize;
4015 4015 SEGVN_VMSTAT_FLTVNPAGES(11);
4016 4016 physcontig = segvn_fill_vp_pages(svd,
4017 4017 vp, off, szc, ppa, &pplist,
4018 4018 &pszc, &downsize);
4019 4019 ASSERT(!physcontig || pplist == NULL);
4020 4020 if (!physcontig && downsize &&
4021 4021 type != F_SOFTLOCK) {
4022 4022 ASSERT(pplist == NULL);
4023 4023 SEGVN_VMSTAT_FLTVNPAGES(12);
4024 4024 ierr = -1;
4025 4025 break;
4026 4026 }
4027 4027 ASSERT(!physcontig ||
4028 4028 segtype == MAP_PRIVATE ||
4029 4029 ppa[0] == NULL);
4030 4030 if (physcontig && ppa[0] == NULL) {
4031 4031 physcontig = 0;
4032 4032 }
4033 4033 }
4034 4034 } else if (!brkcow && !tron && szc && ppa[0] != NULL) {
4035 4035 SEGVN_VMSTAT_FLTVNPAGES(13);
4036 4036 ASSERT(segtype == MAP_PRIVATE);
4037 4037 physcontig = 1;
4038 4038 }
4039 4039
4040 4040 if (!physcontig) {
4041 4041 SEGVN_VMSTAT_FLTVNPAGES(14);
4042 4042 ppa[0] = NULL;
4043 4043 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz,
4044 4044 &vpprot, ppa, pgsz, seg, a, arw,
4045 4045 svd->cred, NULL);
4046 4046 #ifdef DEBUG
4047 4047 if (ierr == 0) {
4048 4048 for (i = 0; i < pages; i++) {
4049 4049 ASSERT(PAGE_LOCKED(ppa[i]));
4050 4050 ASSERT(!PP_ISFREE(ppa[i]));
4051 4051 ASSERT(ppa[i]->p_vnode == vp);
4052 4052 ASSERT(ppa[i]->p_offset ==
4053 4053 off + (i << PAGESHIFT));
4054 4054 }
4055 4055 }
4056 4056 #endif /* DEBUG */
4057 4057 if (segtype == MAP_PRIVATE) {
4058 4058 SEGVN_VMSTAT_FLTVNPAGES(15);
4059 4059 vpprot &= ~PROT_WRITE;
4060 4060 }
4061 4061 } else {
4062 4062 ASSERT(segtype == MAP_PRIVATE);
4063 4063 SEGVN_VMSTAT_FLTVNPAGES(16);
4064 4064 vpprot = PROT_ALL & ~PROT_WRITE;
4065 4065 ierr = 0;
4066 4066 }
4067 4067
4068 4068 if (ierr != 0) {
4069 4069 SEGVN_VMSTAT_FLTVNPAGES(17);
4070 4070 if (pplist != NULL) {
4071 4071 SEGVN_VMSTAT_FLTVNPAGES(18);
4072 4072 page_free_replacement_page(pplist);
4073 4073 page_create_putback(pages);
4074 4074 }
4075 4075 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4076 4076 if (a + pgsz <= eaddr) {
4077 4077 SEGVN_VMSTAT_FLTVNPAGES(19);
4078 4078 err = FC_MAKE_ERR(ierr);
4079 4079 goto out;
4080 4080 }
4081 4081 va.va_mask = AT_SIZE;
4082 4082 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) {
4083 4083 SEGVN_VMSTAT_FLTVNPAGES(20);
4084 4084 err = FC_MAKE_ERR(EIO);
4085 4085 goto out;
4086 4086 }
4087 4087 if (btopr(va.va_size) >= btopr(off + pgsz)) {
4088 4088 SEGVN_VMSTAT_FLTVNPAGES(21);
4089 4089 err = FC_MAKE_ERR(ierr);
4090 4090 goto out;
4091 4091 }
4092 4092 if (btopr(va.va_size) <
4093 4093 btopr(off + (eaddr - a))) {
4094 4094 SEGVN_VMSTAT_FLTVNPAGES(22);
4095 4095 err = FC_MAKE_ERR(ierr);
4096 4096 goto out;
4097 4097 }
4098 4098 if (brkcow || tron || type == F_SOFTLOCK) {
4099 4099 /* can't reduce map area */
4100 4100 SEGVN_VMSTAT_FLTVNPAGES(23);
4101 4101 vop_size_err = 1;
4102 4102 goto out;
4103 4103 }
4104 4104 SEGVN_VMSTAT_FLTVNPAGES(24);
4105 4105 ASSERT(szc != 0);
4106 4106 pszc = 0;
4107 4107 ierr = -1;
4108 4108 break;
4109 4109 }
4110 4110
4111 4111 if (amp != NULL) {
4112 4112 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4113 4113 anon_array_enter(amp, aindx, &an_cookie);
4114 4114 }
4115 4115 if (amp != NULL &&
4116 4116 anon_get_ptr(amp->ahp, aindx) != NULL) {
4117 4117 ulong_t taindx = P2ALIGN(aindx, maxpages);
4118 4118
4119 4119 SEGVN_VMSTAT_FLTVNPAGES(25);
4120 4120 ASSERT(anon_pages(amp->ahp, taindx,
4121 4121 maxpages) == maxpages);
4122 4122 for (i = 0; i < pages; i++) {
4123 4123 page_unlock(ppa[i]);
4124 4124 }
4125 4125 anon_array_exit(&an_cookie);
4126 4126 ANON_LOCK_EXIT(&->a_rwlock);
4127 4127 if (pplist != NULL) {
4128 4128 page_free_replacement_page(pplist);
4129 4129 page_create_putback(pages);
4130 4130 }
4131 4131 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4132 4132 if (szc < seg->s_szc) {
4133 4133 SEGVN_VMSTAT_FLTVNPAGES(26);
4134 4134 /*
4135 4135 * For private segments SOFTLOCK
4136 4136 * either always breaks cow (any rw
4137 4137 * type except S_READ_NOCOW) or
4138 4138 * address space is locked as writer
4139 4139 * (S_READ_NOCOW case) and anon slots
4140 4140 * can't show up on second check.
4141 4141 * Therefore if we are here for
4142 4142 * SOFTLOCK case it must be a cow
4143 4143 * break but cow break never reduces
4144 4144 * szc. text replication (tron) in
4145 4145 * this case works as cow break.
4146 4146 * Thus the assert below.
4147 4147 */
4148 4148 ASSERT(!brkcow && !tron &&
4149 4149 type != F_SOFTLOCK);
4150 4150 pszc = seg->s_szc;
4151 4151 ierr = -2;
4152 4152 break;
4153 4153 }
4154 4154 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4155 4155 goto again;
4156 4156 }
4157 4157 #ifdef DEBUG
4158 4158 if (amp != NULL) {
4159 4159 ulong_t taindx = P2ALIGN(aindx, maxpages);
4160 4160 ASSERT(!anon_pages(amp->ahp, taindx, maxpages));
4161 4161 }
4162 4162 #endif /* DEBUG */
4163 4163
4164 4164 if (brkcow || tron) {
4165 4165 ASSERT(amp != NULL);
4166 4166 ASSERT(pplist == NULL);
4167 4167 ASSERT(szc == seg->s_szc);
4168 4168 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4169 4169 ASSERT(IS_P2ALIGNED(aindx, maxpages));
4170 4170 SEGVN_VMSTAT_FLTVNPAGES(27);
4171 4171 ierr = anon_map_privatepages(amp, aindx, szc,
4172 4172 seg, a, prot, ppa, vpage, segvn_anypgsz,
4173 4173 tron ? PG_LOCAL : 0, svd->cred);
4174 4174 if (ierr != 0) {
4175 4175 SEGVN_VMSTAT_FLTVNPAGES(28);
4176 4176 anon_array_exit(&an_cookie);
4177 4177 ANON_LOCK_EXIT(&->a_rwlock);
4178 4178 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4179 4179 err = FC_MAKE_ERR(ierr);
4180 4180 goto out;
4181 4181 }
4182 4182
4183 4183 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4184 4184 /*
4185 4185 * p_szc can't be changed for locked
4186 4186 * swapfs pages.
4187 4187 */
4188 4188 ASSERT(svd->rcookie ==
4189 4189 HAT_INVALID_REGION_COOKIE);
4190 4190 hat_memload_array(hat, a, pgsz, ppa, prot,
4191 4191 hat_flag);
4192 4192
4193 4193 if (!(hat_flag & HAT_LOAD_LOCK)) {
4194 4194 SEGVN_VMSTAT_FLTVNPAGES(29);
4195 4195 for (i = 0; i < pages; i++) {
4196 4196 page_unlock(ppa[i]);
4197 4197 }
4198 4198 }
4199 4199 anon_array_exit(&an_cookie);
4200 4200 ANON_LOCK_EXIT(&->a_rwlock);
4201 4201 goto next;
4202 4202 }
4203 4203
4204 4204 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
4205 4205 (!svd->pageprot && svd->prot == (prot & vpprot)));
4206 4206
4207 4207 pfn = page_pptonum(ppa[0]);
4208 4208 /*
4209 4209 * hat_page_demote() needs an SE_EXCL lock on one of
4210 4210 * constituent page_t's and it decreases root's p_szc
4211 4211 * last. This means if root's p_szc is equal szc and
4212 4212 * all its constituent pages are locked
4213 4213 * hat_page_demote() that could have changed p_szc to
4214 4214 * szc is already done and no new have page_demote()
4215 4215 * can start for this large page.
4216 4216 */
4217 4217
4218 4218 /*
4219 4219 * we need to make sure same mapping size is used for
4220 4220 * the same address range if there's a possibility the
4221 4221 * adddress is already mapped because hat layer panics
4222 4222 * when translation is loaded for the range already
4223 4223 * mapped with a different page size. We achieve it
4224 4224 * by always using largest page size possible subject
4225 4225 * to the constraints of page size, segment page size
4226 4226 * and page alignment. Since mappings are invalidated
4227 4227 * when those constraints change and make it
4228 4228 * impossible to use previously used mapping size no
4229 4229 * mapping size conflicts should happen.
4230 4230 */
4231 4231
4232 4232 chkszc:
4233 4233 if ((pszc = ppa[0]->p_szc) == szc &&
4234 4234 IS_P2ALIGNED(pfn, pages)) {
4235 4235
4236 4236 SEGVN_VMSTAT_FLTVNPAGES(30);
4237 4237 #ifdef DEBUG
4238 4238 for (i = 0; i < pages; i++) {
4239 4239 ASSERT(PAGE_LOCKED(ppa[i]));
4240 4240 ASSERT(!PP_ISFREE(ppa[i]));
4241 4241 ASSERT(page_pptonum(ppa[i]) ==
4242 4242 pfn + i);
4243 4243 ASSERT(ppa[i]->p_szc == szc);
4244 4244 ASSERT(ppa[i]->p_vnode == vp);
4245 4245 ASSERT(ppa[i]->p_offset ==
4246 4246 off + (i << PAGESHIFT));
4247 4247 }
4248 4248 #endif /* DEBUG */
4249 4249 /*
4250 4250 * All pages are of szc we need and they are
4251 4251 * all locked so they can't change szc. load
4252 4252 * translations.
4253 4253 *
4254 4254 * if page got promoted since last check
4255 4255 * we don't need pplist.
4256 4256 */
4257 4257 if (pplist != NULL) {
4258 4258 page_free_replacement_page(pplist);
4259 4259 page_create_putback(pages);
4260 4260 }
4261 4261 if (PP_ISMIGRATE(ppa[0])) {
4262 4262 page_migrate(seg, a, ppa, pages);
4263 4263 }
4264 4264 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4265 4265 prot, vpprot);
4266 4266 if (!xhat) {
4267 4267 hat_memload_array_region(hat, a, pgsz,
4268 4268 ppa, prot & vpprot, hat_flag,
4269 4269 svd->rcookie);
4270 4270 } else {
4271 4271 /*
4272 4272 * avoid large xhat mappings to FS
4273 4273 * pages so that hat_page_demote()
4274 4274 * doesn't need to check for xhat
4275 4275 * large mappings.
4276 4276 * Don't use regions with xhats.
4277 4277 */
4278 4278 for (i = 0; i < pages; i++) {
4279 4279 hat_memload(hat,
4280 4280 a + (i << PAGESHIFT),
4281 4281 ppa[i], prot & vpprot,
4282 4282 hat_flag);
4283 4283 }
4284 4284 }
4285 4285
4286 4286 if (!(hat_flag & HAT_LOAD_LOCK)) {
4287 4287 for (i = 0; i < pages; i++) {
4288 4288 page_unlock(ppa[i]);
4289 4289 }
4290 4290 }
4291 4291 if (amp != NULL) {
4292 4292 anon_array_exit(&an_cookie);
4293 4293 ANON_LOCK_EXIT(&->a_rwlock);
4294 4294 }
4295 4295 goto next;
4296 4296 }
4297 4297
4298 4298 /*
4299 4299 * See if upsize is possible.
4300 4300 */
4301 4301 if (pszc > szc && szc < seg->s_szc &&
4302 4302 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4303 4303 pgcnt_t aphase;
4304 4304 uint_t pszc1 = MIN(pszc, seg->s_szc);
4305 4305 ppgsz = page_get_pagesize(pszc1);
4306 4306 ppages = btop(ppgsz);
4307 4307 aphase = btop(P2PHASE((uintptr_t)a, ppgsz));
4308 4308
4309 4309 ASSERT(type != F_SOFTLOCK);
4310 4310
4311 4311 SEGVN_VMSTAT_FLTVNPAGES(31);
4312 4312 if (aphase != P2PHASE(pfn, ppages)) {
4313 4313 segvn_faultvnmpss_align_err4++;
4314 4314 } else {
4315 4315 SEGVN_VMSTAT_FLTVNPAGES(32);
4316 4316 if (pplist != NULL) {
4317 4317 page_t *pl = pplist;
4318 4318 page_free_replacement_page(pl);
4319 4319 page_create_putback(pages);
4320 4320 }
4321 4321 for (i = 0; i < pages; i++) {
4322 4322 page_unlock(ppa[i]);
4323 4323 }
4324 4324 if (amp != NULL) {
4325 4325 anon_array_exit(&an_cookie);
4326 4326 ANON_LOCK_EXIT(&->a_rwlock);
4327 4327 }
4328 4328 pszc = pszc1;
4329 4329 ierr = -2;
4330 4330 break;
4331 4331 }
4332 4332 }
4333 4333
4334 4334 /*
4335 4335 * check if we should use smallest mapping size.
4336 4336 */
4337 4337 upgrdfail = 0;
4338 4338 if (szc == 0 || xhat ||
4339 4339 (pszc >= szc &&
4340 4340 !IS_P2ALIGNED(pfn, pages)) ||
4341 4341 (pszc < szc &&
4342 4342 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4343 4343 &pszc))) {
4344 4344
4345 4345 if (upgrdfail && type != F_SOFTLOCK) {
4346 4346 /*
4347 4347 * segvn_full_szcpages failed to lock
4348 4348 * all pages EXCL. Size down.
4349 4349 */
4350 4350 ASSERT(pszc < szc);
4351 4351
4352 4352 SEGVN_VMSTAT_FLTVNPAGES(33);
4353 4353
4354 4354 if (pplist != NULL) {
4355 4355 page_t *pl = pplist;
4356 4356 page_free_replacement_page(pl);
4357 4357 page_create_putback(pages);
4358 4358 }
4359 4359
4360 4360 for (i = 0; i < pages; i++) {
4361 4361 page_unlock(ppa[i]);
4362 4362 }
4363 4363 if (amp != NULL) {
4364 4364 anon_array_exit(&an_cookie);
4365 4365 ANON_LOCK_EXIT(&->a_rwlock);
4366 4366 }
4367 4367 ierr = -1;
4368 4368 break;
4369 4369 }
4370 4370 if (szc != 0 && !xhat && !upgrdfail) {
4371 4371 segvn_faultvnmpss_align_err5++;
4372 4372 }
4373 4373 SEGVN_VMSTAT_FLTVNPAGES(34);
4374 4374 if (pplist != NULL) {
4375 4375 page_free_replacement_page(pplist);
4376 4376 page_create_putback(pages);
4377 4377 }
4378 4378 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4379 4379 prot, vpprot);
4380 4380 if (upgrdfail && segvn_anypgsz_vnode) {
4381 4381 /* SOFTLOCK case */
4382 4382 hat_memload_array_region(hat, a, pgsz,
4383 4383 ppa, prot & vpprot, hat_flag,
4384 4384 svd->rcookie);
4385 4385 } else {
4386 4386 for (i = 0; i < pages; i++) {
4387 4387 hat_memload_region(hat,
4388 4388 a + (i << PAGESHIFT),
4389 4389 ppa[i], prot & vpprot,
4390 4390 hat_flag, svd->rcookie);
4391 4391 }
4392 4392 }
4393 4393 if (!(hat_flag & HAT_LOAD_LOCK)) {
4394 4394 for (i = 0; i < pages; i++) {
4395 4395 page_unlock(ppa[i]);
4396 4396 }
4397 4397 }
4398 4398 if (amp != NULL) {
4399 4399 anon_array_exit(&an_cookie);
4400 4400 ANON_LOCK_EXIT(&->a_rwlock);
4401 4401 }
4402 4402 goto next;
4403 4403 }
4404 4404
4405 4405 if (pszc == szc) {
4406 4406 /*
4407 4407 * segvn_full_szcpages() upgraded pages szc.
4408 4408 */
4409 4409 ASSERT(pszc == ppa[0]->p_szc);
4410 4410 ASSERT(IS_P2ALIGNED(pfn, pages));
4411 4411 goto chkszc;
4412 4412 }
4413 4413
4414 4414 if (pszc > szc) {
4415 4415 kmutex_t *szcmtx;
4416 4416 SEGVN_VMSTAT_FLTVNPAGES(35);
4417 4417 /*
4418 4418 * p_szc of ppa[0] can change since we haven't
4419 4419 * locked all constituent pages. Call
4420 4420 * page_lock_szc() to prevent szc changes.
4421 4421 * This should be a rare case that happens when
4422 4422 * multiple segments use a different page size
4423 4423 * to map the same file offsets.
4424 4424 */
4425 4425 szcmtx = page_szc_lock(ppa[0]);
4426 4426 pszc = ppa[0]->p_szc;
4427 4427 ASSERT(szcmtx != NULL || pszc == 0);
4428 4428 ASSERT(ppa[0]->p_szc <= pszc);
4429 4429 if (pszc <= szc) {
4430 4430 SEGVN_VMSTAT_FLTVNPAGES(36);
4431 4431 if (szcmtx != NULL) {
4432 4432 mutex_exit(szcmtx);
4433 4433 }
4434 4434 goto chkszc;
4435 4435 }
4436 4436 if (pplist != NULL) {
4437 4437 /*
4438 4438 * page got promoted since last check.
4439 4439 * we don't need preaalocated large
4440 4440 * page.
4441 4441 */
4442 4442 SEGVN_VMSTAT_FLTVNPAGES(37);
4443 4443 page_free_replacement_page(pplist);
4444 4444 page_create_putback(pages);
4445 4445 }
4446 4446 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4447 4447 prot, vpprot);
4448 4448 hat_memload_array_region(hat, a, pgsz, ppa,
4449 4449 prot & vpprot, hat_flag, svd->rcookie);
4450 4450 mutex_exit(szcmtx);
4451 4451 if (!(hat_flag & HAT_LOAD_LOCK)) {
4452 4452 for (i = 0; i < pages; i++) {
4453 4453 page_unlock(ppa[i]);
4454 4454 }
4455 4455 }
4456 4456 if (amp != NULL) {
4457 4457 anon_array_exit(&an_cookie);
4458 4458 ANON_LOCK_EXIT(&->a_rwlock);
4459 4459 }
4460 4460 goto next;
4461 4461 }
4462 4462
4463 4463 /*
4464 4464 * if page got demoted since last check
4465 4465 * we could have not allocated larger page.
4466 4466 * allocate now.
4467 4467 */
4468 4468 if (pplist == NULL &&
4469 4469 page_alloc_pages(vp, seg, a, &pplist, NULL,
4470 4470 szc, 0, 0) && type != F_SOFTLOCK) {
4471 4471 SEGVN_VMSTAT_FLTVNPAGES(38);
4472 4472 for (i = 0; i < pages; i++) {
4473 4473 page_unlock(ppa[i]);
4474 4474 }
4475 4475 if (amp != NULL) {
4476 4476 anon_array_exit(&an_cookie);
4477 4477 ANON_LOCK_EXIT(&->a_rwlock);
4478 4478 }
4479 4479 ierr = -1;
4480 4480 alloc_failed |= (1 << szc);
4481 4481 break;
4482 4482 }
4483 4483
4484 4484 SEGVN_VMSTAT_FLTVNPAGES(39);
4485 4485
4486 4486 if (pplist != NULL) {
4487 4487 segvn_relocate_pages(ppa, pplist);
4488 4488 #ifdef DEBUG
4489 4489 } else {
4490 4490 ASSERT(type == F_SOFTLOCK);
4491 4491 SEGVN_VMSTAT_FLTVNPAGES(40);
4492 4492 #endif /* DEBUG */
4493 4493 }
4494 4494
4495 4495 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot);
4496 4496
4497 4497 if (pplist == NULL && segvn_anypgsz_vnode == 0) {
4498 4498 ASSERT(type == F_SOFTLOCK);
4499 4499 for (i = 0; i < pages; i++) {
4500 4500 ASSERT(ppa[i]->p_szc < szc);
4501 4501 hat_memload_region(hat,
4502 4502 a + (i << PAGESHIFT),
4503 4503 ppa[i], prot & vpprot, hat_flag,
4504 4504 svd->rcookie);
4505 4505 }
4506 4506 } else {
4507 4507 ASSERT(pplist != NULL || type == F_SOFTLOCK);
4508 4508 hat_memload_array_region(hat, a, pgsz, ppa,
4509 4509 prot & vpprot, hat_flag, svd->rcookie);
4510 4510 }
4511 4511 if (!(hat_flag & HAT_LOAD_LOCK)) {
4512 4512 for (i = 0; i < pages; i++) {
4513 4513 ASSERT(PAGE_SHARED(ppa[i]));
4514 4514 page_unlock(ppa[i]);
4515 4515 }
4516 4516 }
4517 4517 if (amp != NULL) {
4518 4518 anon_array_exit(&an_cookie);
4519 4519 ANON_LOCK_EXIT(&->a_rwlock);
4520 4520 }
4521 4521
4522 4522 next:
4523 4523 if (vpage != NULL) {
4524 4524 vpage += pages;
4525 4525 }
4526 4526 adjszc_chk = 1;
4527 4527 }
4528 4528 if (a == lpgeaddr)
4529 4529 break;
4530 4530 ASSERT(a < lpgeaddr);
4531 4531
4532 4532 ASSERT(!brkcow && !tron && type != F_SOFTLOCK);
4533 4533
4534 4534 /*
4535 4535 * ierr == -1 means we failed to map with a large page.
4536 4536 * (either due to allocation/relocation failures or
4537 4537 * misalignment with other mappings to this file.
4538 4538 *
4539 4539 * ierr == -2 means some other thread allocated a large page
4540 4540 * after we gave up tp map with a large page. retry with
4541 4541 * larger mapping.
4542 4542 */
4543 4543 ASSERT(ierr == -1 || ierr == -2);
4544 4544 ASSERT(ierr == -2 || szc != 0);
4545 4545 ASSERT(ierr == -1 || szc < seg->s_szc);
4546 4546 if (ierr == -2) {
4547 4547 SEGVN_VMSTAT_FLTVNPAGES(41);
4548 4548 ASSERT(pszc > szc && pszc <= seg->s_szc);
4549 4549 szc = pszc;
4550 4550 } else if (segvn_anypgsz_vnode) {
4551 4551 SEGVN_VMSTAT_FLTVNPAGES(42);
4552 4552 szc--;
4553 4553 } else {
4554 4554 SEGVN_VMSTAT_FLTVNPAGES(43);
4555 4555 ASSERT(pszc < szc);
4556 4556 /*
4557 4557 * other process created pszc large page.
4558 4558 * but we still have to drop to 0 szc.
4559 4559 */
4560 4560 szc = 0;
4561 4561 }
4562 4562
4563 4563 pgsz = page_get_pagesize(szc);
4564 4564 pages = btop(pgsz);
4565 4565 if (ierr == -2) {
4566 4566 /*
4567 4567 * Size up case. Note lpgaddr may only be needed for
4568 4568 * softlock case so we don't adjust it here.
4569 4569 */
4570 4570 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4571 4571 ASSERT(a >= lpgaddr);
4572 4572 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4573 4573 off = svd->offset + (uintptr_t)(a - seg->s_base);
4574 4574 aindx = svd->anon_index + seg_page(seg, a);
4575 4575 vpage = (svd->vpage != NULL) ?
4576 4576 &svd->vpage[seg_page(seg, a)] : NULL;
4577 4577 } else {
4578 4578 /*
4579 4579 * Size down case. Note lpgaddr may only be needed for
4580 4580 * softlock case so we don't adjust it here.
4581 4581 */
4582 4582 ASSERT(IS_P2ALIGNED(a, pgsz));
4583 4583 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4584 4584 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4585 4585 ASSERT(a < lpgeaddr);
4586 4586 if (a < addr) {
4587 4587 SEGVN_VMSTAT_FLTVNPAGES(44);
4588 4588 /*
4589 4589 * The beginning of the large page region can
4590 4590 * be pulled to the right to make a smaller
4591 4591 * region. We haven't yet faulted a single
4592 4592 * page.
4593 4593 */
4594 4594 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4595 4595 ASSERT(a >= lpgaddr);
4596 4596 off = svd->offset +
4597 4597 (uintptr_t)(a - seg->s_base);
4598 4598 aindx = svd->anon_index + seg_page(seg, a);
4599 4599 vpage = (svd->vpage != NULL) ?
4600 4600 &svd->vpage[seg_page(seg, a)] : NULL;
4601 4601 }
4602 4602 }
4603 4603 }
4604 4604 out:
4605 4605 kmem_free(ppa, ppasize);
4606 4606 if (!err && !vop_size_err) {
4607 4607 SEGVN_VMSTAT_FLTVNPAGES(45);
4608 4608 return (0);
4609 4609 }
4610 4610 if (type == F_SOFTLOCK && a > lpgaddr) {
4611 4611 SEGVN_VMSTAT_FLTVNPAGES(46);
4612 4612 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4613 4613 }
4614 4614 if (!vop_size_err) {
4615 4615 SEGVN_VMSTAT_FLTVNPAGES(47);
4616 4616 return (err);
4617 4617 }
4618 4618 ASSERT(brkcow || tron || type == F_SOFTLOCK);
4619 4619 /*
4620 4620 * Large page end is mapped beyond the end of file and it's a cow
4621 4621 * fault (can be a text replication induced cow) or softlock so we can't
4622 4622 * reduce the map area. For now just demote the segment. This should
4623 4623 * really only happen if the end of the file changed after the mapping
4624 4624 * was established since when large page segments are created we make
4625 4625 * sure they don't extend beyond the end of the file.
4626 4626 */
4627 4627 SEGVN_VMSTAT_FLTVNPAGES(48);
4628 4628
4629 4629 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4630 4630 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4631 4631 err = 0;
4632 4632 if (seg->s_szc != 0) {
4633 4633 segvn_fltvnpages_clrszc_cnt++;
4634 4634 ASSERT(svd->softlockcnt == 0);
4635 4635 err = segvn_clrszc(seg);
4636 4636 if (err != 0) {
4637 4637 segvn_fltvnpages_clrszc_err++;
4638 4638 }
4639 4639 }
4640 4640 ASSERT(err || seg->s_szc == 0);
4641 4641 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock);
4642 4642 /* segvn_fault will do its job as if szc had been zero to begin with */
4643 4643 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err));
4644 4644 }
4645 4645
4646 4646 /*
4647 4647 * This routine will attempt to fault in one large page.
4648 4648 * it will use smaller pages if that fails.
4649 4649 * It should only be called for pure anonymous segments.
4650 4650 */
4651 4651 static faultcode_t
4652 4652 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
4653 4653 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
4654 4654 caddr_t eaddr, int brkcow)
4655 4655 {
4656 4656 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4657 4657 struct anon_map *amp = svd->amp;
4658 4658 uchar_t segtype = svd->type;
4659 4659 uint_t szc = seg->s_szc;
4660 4660 size_t pgsz = page_get_pagesize(szc);
4661 4661 size_t maxpgsz = pgsz;
4662 4662 pgcnt_t pages = btop(pgsz);
4663 4663 uint_t ppaszc = szc;
4664 4664 caddr_t a = lpgaddr;
4665 4665 ulong_t aindx = svd->anon_index + seg_page(seg, a);
4666 4666 struct vpage *vpage = (svd->vpage != NULL) ?
4667 4667 &svd->vpage[seg_page(seg, a)] : NULL;
4668 4668 page_t **ppa;
4669 4669 uint_t ppa_szc;
4670 4670 faultcode_t err;
4671 4671 int ierr;
4672 4672 uint_t protchk, prot, vpprot;
4673 4673 ulong_t i;
4674 4674 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
4675 4675 anon_sync_obj_t cookie;
4676 4676 int adjszc_chk;
4677 4677 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0;
4678 4678
4679 4679 ASSERT(szc != 0);
4680 4680 ASSERT(amp != NULL);
4681 4681 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
4682 4682 ASSERT(!(svd->flags & MAP_NORESERVE));
4683 4683 ASSERT(type != F_SOFTUNLOCK);
4684 4684 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4685 4685 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF);
4686 4686 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4687 4687
4688 4688 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
4689 4689
4690 4690 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]);
4691 4691 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]);
4692 4692
4693 4693 if (svd->flags & MAP_TEXT) {
4694 4694 hat_flag |= HAT_LOAD_TEXT;
4695 4695 }
4696 4696
4697 4697 if (svd->pageprot) {
4698 4698 switch (rw) {
4699 4699 case S_READ:
4700 4700 protchk = PROT_READ;
4701 4701 break;
4702 4702 case S_WRITE:
4703 4703 protchk = PROT_WRITE;
4704 4704 break;
4705 4705 case S_EXEC:
4706 4706 protchk = PROT_EXEC;
4707 4707 break;
4708 4708 case S_OTHER:
4709 4709 default:
4710 4710 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4711 4711 break;
4712 4712 }
4713 4713 VM_STAT_ADD(segvnvmstats.fltanpages[2]);
4714 4714 } else {
4715 4715 prot = svd->prot;
4716 4716 /* caller has already done segment level protection check. */
4717 4717 }
4718 4718
4719 4719 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP);
4720 4720 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4721 4721 for (;;) {
4722 4722 adjszc_chk = 0;
4723 4723 for (; a < lpgeaddr; a += pgsz, aindx += pages) {
4724 4724 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
4725 4725 VM_STAT_ADD(segvnvmstats.fltanpages[3]);
4726 4726 ASSERT(vpage != NULL);
4727 4727 prot = VPP_PROT(vpage);
4728 4728 ASSERT(sameprot(seg, a, maxpgsz));
4729 4729 if ((prot & protchk) == 0) {
4730 4730 err = FC_PROT;
4731 4731 goto error;
4732 4732 }
4733 4733 }
4734 4734 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) &&
4735 4735 pgsz < maxpgsz) {
4736 4736 ASSERT(a > lpgaddr);
4737 4737 szc = seg->s_szc;
4738 4738 pgsz = maxpgsz;
4739 4739 pages = btop(pgsz);
4740 4740 ASSERT(IS_P2ALIGNED(aindx, pages));
4741 4741 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr,
4742 4742 pgsz);
4743 4743 }
4744 4744 if (type == F_SOFTLOCK) {
4745 4745 atomic_add_long((ulong_t *)&svd->softlockcnt,
4746 4746 pages);
4747 4747 }
4748 4748 anon_array_enter(amp, aindx, &cookie);
4749 4749 ppa_szc = (uint_t)-1;
4750 4750 ierr = anon_map_getpages(amp, aindx, szc, seg, a,
4751 4751 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow,
4752 4752 segvn_anypgsz, pgflags, svd->cred);
4753 4753 if (ierr != 0) {
4754 4754 anon_array_exit(&cookie);
4755 4755 VM_STAT_ADD(segvnvmstats.fltanpages[4]);
4756 4756 if (type == F_SOFTLOCK) {
4757 4757 atomic_add_long(
4758 4758 (ulong_t *)&svd->softlockcnt,
4759 4759 -pages);
4760 4760 }
4761 4761 if (ierr > 0) {
4762 4762 VM_STAT_ADD(segvnvmstats.fltanpages[6]);
4763 4763 err = FC_MAKE_ERR(ierr);
4764 4764 goto error;
4765 4765 }
4766 4766 break;
4767 4767 }
4768 4768
4769 4769 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4770 4770
4771 4771 ASSERT(segtype == MAP_SHARED ||
4772 4772 ppa[0]->p_szc <= szc);
4773 4773 ASSERT(segtype == MAP_PRIVATE ||
4774 4774 ppa[0]->p_szc >= szc);
4775 4775
4776 4776 /*
4777 4777 * Handle pages that have been marked for migration
4778 4778 */
4779 4779 if (lgrp_optimizations())
4780 4780 page_migrate(seg, a, ppa, pages);
4781 4781
4782 4782 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
4783 4783
4784 4784 if (segtype == MAP_SHARED) {
4785 4785 vpprot |= PROT_WRITE;
4786 4786 }
4787 4787
4788 4788 hat_memload_array(hat, a, pgsz, ppa,
4789 4789 prot & vpprot, hat_flag);
4790 4790
4791 4791 if (hat_flag & HAT_LOAD_LOCK) {
4792 4792 VM_STAT_ADD(segvnvmstats.fltanpages[7]);
4793 4793 } else {
4794 4794 VM_STAT_ADD(segvnvmstats.fltanpages[8]);
4795 4795 for (i = 0; i < pages; i++)
4796 4796 page_unlock(ppa[i]);
4797 4797 }
4798 4798 if (vpage != NULL)
4799 4799 vpage += pages;
4800 4800
4801 4801 anon_array_exit(&cookie);
4802 4802 adjszc_chk = 1;
4803 4803 }
4804 4804 if (a == lpgeaddr)
4805 4805 break;
4806 4806 ASSERT(a < lpgeaddr);
4807 4807 /*
4808 4808 * ierr == -1 means we failed to allocate a large page.
4809 4809 * so do a size down operation.
4810 4810 *
4811 4811 * ierr == -2 means some other process that privately shares
4812 4812 * pages with this process has allocated a larger page and we
4813 4813 * need to retry with larger pages. So do a size up
4814 4814 * operation. This relies on the fact that large pages are
4815 4815 * never partially shared i.e. if we share any constituent
4816 4816 * page of a large page with another process we must share the
4817 4817 * entire large page. Note this cannot happen for SOFTLOCK
4818 4818 * case, unless current address (a) is at the beginning of the
4819 4819 * next page size boundary because the other process couldn't
4820 4820 * have relocated locked pages.
4821 4821 */
4822 4822 ASSERT(ierr == -1 || ierr == -2);
4823 4823
4824 4824 if (segvn_anypgsz) {
4825 4825 ASSERT(ierr == -2 || szc != 0);
4826 4826 ASSERT(ierr == -1 || szc < seg->s_szc);
4827 4827 szc = (ierr == -1) ? szc - 1 : szc + 1;
4828 4828 } else {
4829 4829 /*
4830 4830 * For non COW faults and segvn_anypgsz == 0
4831 4831 * we need to be careful not to loop forever
4832 4832 * if existing page is found with szc other
4833 4833 * than 0 or seg->s_szc. This could be due
4834 4834 * to page relocations on behalf of DR or
4835 4835 * more likely large page creation. For this
4836 4836 * case simply re-size to existing page's szc
4837 4837 * if returned by anon_map_getpages().
4838 4838 */
4839 4839 if (ppa_szc == (uint_t)-1) {
4840 4840 szc = (ierr == -1) ? 0 : seg->s_szc;
4841 4841 } else {
4842 4842 ASSERT(ppa_szc <= seg->s_szc);
4843 4843 ASSERT(ierr == -2 || ppa_szc < szc);
4844 4844 ASSERT(ierr == -1 || ppa_szc > szc);
4845 4845 szc = ppa_szc;
4846 4846 }
4847 4847 }
4848 4848
4849 4849 pgsz = page_get_pagesize(szc);
4850 4850 pages = btop(pgsz);
4851 4851 ASSERT(type != F_SOFTLOCK || ierr == -1 ||
4852 4852 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz)));
4853 4853 if (type == F_SOFTLOCK) {
4854 4854 /*
4855 4855 * For softlocks we cannot reduce the fault area
4856 4856 * (calculated based on the largest page size for this
4857 4857 * segment) for size down and a is already next
4858 4858 * page size aligned as assertted above for size
4859 4859 * ups. Therefore just continue in case of softlock.
4860 4860 */
4861 4861 VM_STAT_ADD(segvnvmstats.fltanpages[9]);
4862 4862 continue; /* keep lint happy */
4863 4863 } else if (ierr == -2) {
4864 4864
4865 4865 /*
4866 4866 * Size up case. Note lpgaddr may only be needed for
4867 4867 * softlock case so we don't adjust it here.
4868 4868 */
4869 4869 VM_STAT_ADD(segvnvmstats.fltanpages[10]);
4870 4870 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4871 4871 ASSERT(a >= lpgaddr);
4872 4872 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4873 4873 aindx = svd->anon_index + seg_page(seg, a);
4874 4874 vpage = (svd->vpage != NULL) ?
4875 4875 &svd->vpage[seg_page(seg, a)] : NULL;
4876 4876 } else {
4877 4877 /*
4878 4878 * Size down case. Note lpgaddr may only be needed for
4879 4879 * softlock case so we don't adjust it here.
4880 4880 */
4881 4881 VM_STAT_ADD(segvnvmstats.fltanpages[11]);
4882 4882 ASSERT(IS_P2ALIGNED(a, pgsz));
4883 4883 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4884 4884 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4885 4885 ASSERT(a < lpgeaddr);
4886 4886 if (a < addr) {
4887 4887 /*
4888 4888 * The beginning of the large page region can
4889 4889 * be pulled to the right to make a smaller
4890 4890 * region. We haven't yet faulted a single
4891 4891 * page.
4892 4892 */
4893 4893 VM_STAT_ADD(segvnvmstats.fltanpages[12]);
4894 4894 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4895 4895 ASSERT(a >= lpgaddr);
4896 4896 aindx = svd->anon_index + seg_page(seg, a);
4897 4897 vpage = (svd->vpage != NULL) ?
4898 4898 &svd->vpage[seg_page(seg, a)] : NULL;
4899 4899 }
4900 4900 }
4901 4901 }
4902 4902 VM_STAT_ADD(segvnvmstats.fltanpages[13]);
4903 4903 ANON_LOCK_EXIT(&->a_rwlock);
4904 4904 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4905 4905 return (0);
4906 4906 error:
4907 4907 VM_STAT_ADD(segvnvmstats.fltanpages[14]);
4908 4908 ANON_LOCK_EXIT(&->a_rwlock);
4909 4909 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4910 4910 if (type == F_SOFTLOCK && a > lpgaddr) {
4911 4911 VM_STAT_ADD(segvnvmstats.fltanpages[15]);
4912 4912 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4913 4913 }
4914 4914 return (err);
4915 4915 }
4916 4916
4917 4917 int fltadvice = 1; /* set to free behind pages for sequential access */
4918 4918
4919 4919 /*
4920 4920 * This routine is called via a machine specific fault handling routine.
4921 4921 * It is also called by software routines wishing to lock or unlock
4922 4922 * a range of addresses.
4923 4923 *
4924 4924 * Here is the basic algorithm:
4925 4925 * If unlocking
4926 4926 * Call segvn_softunlock
4927 4927 * Return
4928 4928 * endif
4929 4929 * Checking and set up work
4930 4930 * If we will need some non-anonymous pages
4931 4931 * Call VOP_GETPAGE over the range of non-anonymous pages
4932 4932 * endif
4933 4933 * Loop over all addresses requested
4934 4934 * Call segvn_faultpage passing in page list
4935 4935 * to load up translations and handle anonymous pages
4936 4936 * endloop
4937 4937 * Load up translation to any additional pages in page list not
4938 4938 * already handled that fit into this segment
4939 4939 */
4940 4940 static faultcode_t
4941 4941 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
4942 4942 enum fault_type type, enum seg_rw rw)
4943 4943 {
4944 4944 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4945 4945 page_t **plp, **ppp, *pp;
4946 4946 u_offset_t off;
4947 4947 caddr_t a;
4948 4948 struct vpage *vpage;
4949 4949 uint_t vpprot, prot;
4950 4950 int err;
4951 4951 page_t *pl[PVN_GETPAGE_NUM + 1];
4952 4952 size_t plsz, pl_alloc_sz;
4953 4953 size_t page;
4954 4954 ulong_t anon_index;
4955 4955 struct anon_map *amp;
4956 4956 int dogetpage = 0;
4957 4957 caddr_t lpgaddr, lpgeaddr;
4958 4958 size_t pgsz;
4959 4959 anon_sync_obj_t cookie;
4960 4960 int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4961 4961
4962 4962 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
4963 4963 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4964 4964
4965 4965 /*
4966 4966 * First handle the easy stuff
4967 4967 */
4968 4968 if (type == F_SOFTUNLOCK) {
4969 4969 if (rw == S_READ_NOCOW) {
4970 4970 rw = S_READ;
4971 4971 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
4972 4972 }
4973 4973 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4974 4974 pgsz = (seg->s_szc == 0) ? PAGESIZE :
4975 4975 page_get_pagesize(seg->s_szc);
4976 4976 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]);
4977 4977 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
4978 4978 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw);
4979 4979 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4980 4980 return (0);
4981 4981 }
4982 4982
4983 4983 ASSERT(svd->tr_state == SEGVN_TR_OFF ||
4984 4984 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
4985 4985 if (brkcow == 0) {
4986 4986 if (svd->tr_state == SEGVN_TR_INIT) {
4987 4987 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4988 4988 if (svd->tr_state == SEGVN_TR_INIT) {
4989 4989 ASSERT(svd->vp != NULL && svd->amp == NULL);
4990 4990 ASSERT(svd->flags & MAP_TEXT);
4991 4991 ASSERT(svd->type == MAP_PRIVATE);
4992 4992 segvn_textrepl(seg);
4993 4993 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4994 4994 ASSERT(svd->tr_state != SEGVN_TR_ON ||
4995 4995 svd->amp != NULL);
4996 4996 }
4997 4997 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4998 4998 }
4999 4999 } else if (svd->tr_state != SEGVN_TR_OFF) {
5000 5000 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5001 5001
5002 5002 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) {
5003 5003 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
5004 5004 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5005 5005 return (FC_PROT);
5006 5006 }
5007 5007
5008 5008 if (svd->tr_state == SEGVN_TR_ON) {
5009 5009 ASSERT(svd->vp != NULL && svd->amp != NULL);
5010 5010 segvn_textunrepl(seg, 0);
5011 5011 ASSERT(svd->amp == NULL &&
5012 5012 svd->tr_state == SEGVN_TR_OFF);
5013 5013 } else if (svd->tr_state != SEGVN_TR_OFF) {
5014 5014 svd->tr_state = SEGVN_TR_OFF;
5015 5015 }
5016 5016 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5017 5017 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5018 5018 }
5019 5019
5020 5020 top:
5021 5021 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5022 5022
5023 5023 /*
5024 5024 * If we have the same protections for the entire segment,
5025 5025 * insure that the access being attempted is legitimate.
5026 5026 */
5027 5027
5028 5028 if (svd->pageprot == 0) {
5029 5029 uint_t protchk;
5030 5030
5031 5031 switch (rw) {
5032 5032 case S_READ:
5033 5033 case S_READ_NOCOW:
5034 5034 protchk = PROT_READ;
5035 5035 break;
5036 5036 case S_WRITE:
5037 5037 protchk = PROT_WRITE;
5038 5038 break;
5039 5039 case S_EXEC:
5040 5040 protchk = PROT_EXEC;
5041 5041 break;
5042 5042 case S_OTHER:
5043 5043 default:
5044 5044 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
5045 5045 break;
5046 5046 }
5047 5047
5048 5048 if ((svd->prot & protchk) == 0) {
5049 5049 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5050 5050 return (FC_PROT); /* illegal access type */
5051 5051 }
5052 5052 }
5053 5053
5054 5054 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5055 5055 /* this must be SOFTLOCK S_READ fault */
5056 5056 ASSERT(svd->amp == NULL);
5057 5057 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5058 5058 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5059 5059 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5060 5060 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5061 5061 /*
5062 5062 * this must be the first ever non S_READ_NOCOW
5063 5063 * softlock for this segment.
5064 5064 */
5065 5065 ASSERT(svd->softlockcnt == 0);
5066 5066 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5067 5067 HAT_REGION_TEXT);
5068 5068 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5069 5069 }
5070 5070 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5071 5071 goto top;
5072 5072 }
5073 5073
5074 5074 /*
5075 5075 * We can't allow the long term use of softlocks for vmpss segments,
5076 5076 * because in some file truncation cases we should be able to demote
5077 5077 * the segment, which requires that there are no softlocks. The
5078 5078 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5079 5079 * segment is S_READ_NOCOW, where the caller holds the address space
5080 5080 * locked as writer and calls softunlock before dropping the as lock.
5081 5081 * S_READ_NOCOW is used by /proc to read memory from another user.
5082 5082 *
5083 5083 * Another deadlock between SOFTLOCK and file truncation can happen
5084 5084 * because segvn_fault_vnodepages() calls the FS one pagesize at
5085 5085 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5086 5086 * can cause a deadlock because the first set of page_t's remain
5087 5087 * locked SE_SHARED. To avoid this, we demote segments on a first
5088 5088 * SOFTLOCK if they have a length greater than the segment's
5089 5089 * page size.
5090 5090 *
5091 5091 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5092 5092 * the access type is S_READ_NOCOW and the fault length is less than
5093 5093 * or equal to the segment's page size. While this is quite restrictive,
5094 5094 * it should be the most common case of SOFTLOCK against a vmpss
5095 5095 * segment.
5096 5096 *
5097 5097 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5098 5098 * caller makes sure no COW will be caused by another thread for a
5099 5099 * softlocked page.
5100 5100 */
5101 5101 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) {
5102 5102 int demote = 0;
5103 5103
5104 5104 if (rw != S_READ_NOCOW) {
5105 5105 demote = 1;
5106 5106 }
5107 5107 if (!demote && len > PAGESIZE) {
5108 5108 pgsz = page_get_pagesize(seg->s_szc);
5109 5109 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5110 5110 lpgeaddr);
5111 5111 if (lpgeaddr - lpgaddr > pgsz) {
5112 5112 demote = 1;
5113 5113 }
5114 5114 }
5115 5115
5116 5116 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5117 5117
5118 5118 if (demote) {
5119 5119 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5120 5120 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5121 5121 if (seg->s_szc != 0) {
5122 5122 segvn_vmpss_clrszc_cnt++;
5123 5123 ASSERT(svd->softlockcnt == 0);
5124 5124 err = segvn_clrszc(seg);
5125 5125 if (err) {
5126 5126 segvn_vmpss_clrszc_err++;
5127 5127 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5128 5128 return (FC_MAKE_ERR(err));
5129 5129 }
5130 5130 }
5131 5131 ASSERT(seg->s_szc == 0);
5132 5132 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5133 5133 goto top;
5134 5134 }
5135 5135 }
5136 5136
5137 5137 /*
5138 5138 * Check to see if we need to allocate an anon_map structure.
5139 5139 */
5140 5140 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) {
5141 5141 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5142 5142 /*
5143 5143 * Drop the "read" lock on the segment and acquire
5144 5144 * the "write" version since we have to allocate the
5145 5145 * anon_map.
5146 5146 */
5147 5147 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5148 5148 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5149 5149
5150 5150 if (svd->amp == NULL) {
5151 5151 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
5152 5152 svd->amp->a_szc = seg->s_szc;
5153 5153 }
5154 5154 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5155 5155
5156 5156 /*
5157 5157 * Start all over again since segment protections
5158 5158 * may have changed after we dropped the "read" lock.
5159 5159 */
5160 5160 goto top;
5161 5161 }
5162 5162
5163 5163 /*
5164 5164 * S_READ_NOCOW vs S_READ distinction was
5165 5165 * only needed for the code above. After
5166 5166 * that we treat it as S_READ.
5167 5167 */
5168 5168 if (rw == S_READ_NOCOW) {
5169 5169 ASSERT(type == F_SOFTLOCK);
5170 5170 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5171 5171 rw = S_READ;
5172 5172 }
5173 5173
5174 5174 amp = svd->amp;
5175 5175
5176 5176 /*
5177 5177 * MADV_SEQUENTIAL work is ignored for large page segments.
5178 5178 */
5179 5179 if (seg->s_szc != 0) {
5180 5180 pgsz = page_get_pagesize(seg->s_szc);
5181 5181 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
5182 5182 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5183 5183 if (svd->vp == NULL) {
5184 5184 err = segvn_fault_anonpages(hat, seg, lpgaddr,
5185 5185 lpgeaddr, type, rw, addr, addr + len, brkcow);
5186 5186 } else {
5187 5187 err = segvn_fault_vnodepages(hat, seg, lpgaddr,
5188 5188 lpgeaddr, type, rw, addr, addr + len, brkcow);
5189 5189 if (err == IE_RETRY) {
5190 5190 ASSERT(seg->s_szc == 0);
5191 5191 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
5192 5192 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5193 5193 goto top;
5194 5194 }
5195 5195 }
5196 5196 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5197 5197 return (err);
5198 5198 }
5199 5199
5200 5200 page = seg_page(seg, addr);
5201 5201 if (amp != NULL) {
5202 5202 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5203 5203 anon_index = svd->anon_index + page;
5204 5204
5205 5205 if (type == F_PROT && rw == S_READ &&
5206 5206 svd->tr_state == SEGVN_TR_OFF &&
5207 5207 svd->type == MAP_PRIVATE && svd->pageprot == 0) {
5208 5208 size_t index = anon_index;
5209 5209 struct anon *ap;
5210 5210
5211 5211 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5212 5212 /*
5213 5213 * The fast path could apply to S_WRITE also, except
5214 5214 * that the protection fault could be caused by lazy
5215 5215 * tlb flush when ro->rw. In this case, the pte is
5216 5216 * RW already. But RO in the other cpu's tlb causes
5217 5217 * the fault. Since hat_chgprot won't do anything if
5218 5218 * pte doesn't change, we may end up faulting
5219 5219 * indefinitely until the RO tlb entry gets replaced.
5220 5220 */
5221 5221 for (a = addr; a < addr + len; a += PAGESIZE, index++) {
5222 5222 anon_array_enter(amp, index, &cookie);
5223 5223 ap = anon_get_ptr(amp->ahp, index);
5224 5224 anon_array_exit(&cookie);
5225 5225 if ((ap == NULL) || (ap->an_refcnt != 1)) {
5226 5226 ANON_LOCK_EXIT(&->a_rwlock);
5227 5227 goto slow;
5228 5228 }
5229 5229 }
5230 5230 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5231 5231 ANON_LOCK_EXIT(&->a_rwlock);
5232 5232 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5233 5233 return (0);
5234 5234 }
5235 5235 }
5236 5236 slow:
5237 5237
5238 5238 if (svd->vpage == NULL)
5239 5239 vpage = NULL;
5240 5240 else
5241 5241 vpage = &svd->vpage[page];
5242 5242
5243 5243 off = svd->offset + (uintptr_t)(addr - seg->s_base);
5244 5244
5245 5245 /*
5246 5246 * If MADV_SEQUENTIAL has been set for the particular page we
5247 5247 * are faulting on, free behind all pages in the segment and put
5248 5248 * them on the free list.
5249 5249 */
5250 5250
5251 5251 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) {
5252 5252 struct vpage *vpp;
5253 5253 ulong_t fanon_index;
5254 5254 size_t fpage;
5255 5255 u_offset_t pgoff, fpgoff;
5256 5256 struct vnode *fvp;
5257 5257 struct anon *fap = NULL;
5258 5258
5259 5259 if (svd->advice == MADV_SEQUENTIAL ||
5260 5260 (svd->pageadvice &&
5261 5261 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) {
5262 5262 pgoff = off - PAGESIZE;
5263 5263 fpage = page - 1;
5264 5264 if (vpage != NULL)
5265 5265 vpp = &svd->vpage[fpage];
5266 5266 if (amp != NULL)
5267 5267 fanon_index = svd->anon_index + fpage;
5268 5268
5269 5269 while (pgoff > svd->offset) {
5270 5270 if (svd->advice != MADV_SEQUENTIAL &&
5271 5271 (!svd->pageadvice || (vpage &&
5272 5272 VPP_ADVICE(vpp) != MADV_SEQUENTIAL)))
5273 5273 break;
5274 5274
5275 5275 /*
5276 5276 * If this is an anon page, we must find the
5277 5277 * correct <vp, offset> for it
5278 5278 */
5279 5279 fap = NULL;
5280 5280 if (amp != NULL) {
5281 5281 ANON_LOCK_ENTER(&->a_rwlock,
5282 5282 RW_READER);
5283 5283 anon_array_enter(amp, fanon_index,
5284 5284 &cookie);
5285 5285 fap = anon_get_ptr(amp->ahp,
5286 5286 fanon_index);
5287 5287 if (fap != NULL) {
5288 5288 swap_xlate(fap, &fvp, &fpgoff);
5289 5289 } else {
5290 5290 fpgoff = pgoff;
5291 5291 fvp = svd->vp;
5292 5292 }
5293 5293 anon_array_exit(&cookie);
5294 5294 ANON_LOCK_EXIT(&->a_rwlock);
5295 5295 } else {
5296 5296 fpgoff = pgoff;
5297 5297 fvp = svd->vp;
5298 5298 }
5299 5299 if (fvp == NULL)
5300 5300 break; /* XXX */
5301 5301 /*
5302 5302 * Skip pages that are free or have an
5303 5303 * "exclusive" lock.
5304 5304 */
5305 5305 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED);
5306 5306 if (pp == NULL)
5307 5307 break;
5308 5308 /*
5309 5309 * We don't need the page_struct_lock to test
5310 5310 * as this is only advisory; even if we
5311 5311 * acquire it someone might race in and lock
5312 5312 * the page after we unlock and before the
5313 5313 * PUTPAGE, then VOP_PUTPAGE will do nothing.
5314 5314 */
5315 5315 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) {
5316 5316 /*
5317 5317 * Hold the vnode before releasing
5318 5318 * the page lock to prevent it from
5319 5319 * being freed and re-used by some
5320 5320 * other thread.
5321 5321 */
5322 5322 VN_HOLD(fvp);
5323 5323 page_unlock(pp);
5324 5324 /*
5325 5325 * We should build a page list
5326 5326 * to kluster putpages XXX
5327 5327 */
5328 5328 (void) VOP_PUTPAGE(fvp,
5329 5329 (offset_t)fpgoff, PAGESIZE,
5330 5330 (B_DONTNEED|B_FREE|B_ASYNC),
5331 5331 svd->cred, NULL);
5332 5332 VN_RELE(fvp);
5333 5333 } else {
5334 5334 /*
5335 5335 * XXX - Should the loop terminate if
5336 5336 * the page is `locked'?
5337 5337 */
5338 5338 page_unlock(pp);
5339 5339 }
5340 5340 --vpp;
5341 5341 --fanon_index;
5342 5342 pgoff -= PAGESIZE;
5343 5343 }
5344 5344 }
5345 5345 }
5346 5346
5347 5347 plp = pl;
5348 5348 *plp = NULL;
5349 5349 pl_alloc_sz = 0;
5350 5350
5351 5351 /*
5352 5352 * See if we need to call VOP_GETPAGE for
5353 5353 * *any* of the range being faulted on.
5354 5354 * We can skip all of this work if there
5355 5355 * was no original vnode.
5356 5356 */
5357 5357 if (svd->vp != NULL) {
5358 5358 u_offset_t vp_off;
5359 5359 size_t vp_len;
5360 5360 struct anon *ap;
5361 5361 vnode_t *vp;
5362 5362
5363 5363 vp_off = off;
5364 5364 vp_len = len;
5365 5365
5366 5366 if (amp == NULL)
5367 5367 dogetpage = 1;
5368 5368 else {
5369 5369 /*
5370 5370 * Only acquire reader lock to prevent amp->ahp
5371 5371 * from being changed. It's ok to miss pages,
5372 5372 * hence we don't do anon_array_enter
5373 5373 */
5374 5374 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5375 5375 ap = anon_get_ptr(amp->ahp, anon_index);
5376 5376
5377 5377 if (len <= PAGESIZE)
5378 5378 /* inline non_anon() */
5379 5379 dogetpage = (ap == NULL);
5380 5380 else
5381 5381 dogetpage = non_anon(amp->ahp, anon_index,
5382 5382 &vp_off, &vp_len);
5383 5383 ANON_LOCK_EXIT(&->a_rwlock);
5384 5384 }
5385 5385
5386 5386 if (dogetpage) {
5387 5387 enum seg_rw arw;
5388 5388 struct as *as = seg->s_as;
5389 5389
5390 5390 if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) {
5391 5391 /*
5392 5392 * Page list won't fit in local array,
5393 5393 * allocate one of the needed size.
5394 5394 */
5395 5395 pl_alloc_sz =
5396 5396 (btop(len) + 1) * sizeof (page_t *);
5397 5397 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP);
5398 5398 plp[0] = NULL;
5399 5399 plsz = len;
5400 5400 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE ||
5401 5401 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER ||
5402 5402 (((size_t)(addr + PAGESIZE) <
5403 5403 (size_t)(seg->s_base + seg->s_size)) &&
5404 5404 hat_probe(as->a_hat, addr + PAGESIZE))) {
5405 5405 /*
5406 5406 * Ask VOP_GETPAGE to return the exact number
5407 5407 * of pages if
5408 5408 * (a) this is a COW fault, or
5409 5409 * (b) this is a software fault, or
5410 5410 * (c) next page is already mapped.
5411 5411 */
5412 5412 plsz = len;
5413 5413 } else {
5414 5414 /*
5415 5415 * Ask VOP_GETPAGE to return adjacent pages
5416 5416 * within the segment.
5417 5417 */
5418 5418 plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t)
5419 5419 ((seg->s_base + seg->s_size) - addr));
5420 5420 ASSERT((addr + plsz) <=
5421 5421 (seg->s_base + seg->s_size));
5422 5422 }
5423 5423
5424 5424 /*
5425 5425 * Need to get some non-anonymous pages.
5426 5426 * We need to make only one call to GETPAGE to do
5427 5427 * this to prevent certain deadlocking conditions
5428 5428 * when we are doing locking. In this case
5429 5429 * non_anon() should have picked up the smallest
5430 5430 * range which includes all the non-anonymous
5431 5431 * pages in the requested range. We have to
5432 5432 * be careful regarding which rw flag to pass in
5433 5433 * because on a private mapping, the underlying
5434 5434 * object is never allowed to be written.
5435 5435 */
5436 5436 if (rw == S_WRITE && svd->type == MAP_PRIVATE) {
5437 5437 arw = S_READ;
5438 5438 } else {
5439 5439 arw = rw;
5440 5440 }
5441 5441 vp = svd->vp;
5442 5442 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5443 5443 "segvn_getpage:seg %p addr %p vp %p",
5444 5444 seg, addr, vp);
5445 5445 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len,
5446 5446 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw,
5447 5447 svd->cred, NULL);
5448 5448 if (err) {
5449 5449 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5450 5450 segvn_pagelist_rele(plp);
5451 5451 if (pl_alloc_sz)
5452 5452 kmem_free(plp, pl_alloc_sz);
5453 5453 return (FC_MAKE_ERR(err));
5454 5454 }
5455 5455 if (svd->type == MAP_PRIVATE)
5456 5456 vpprot &= ~PROT_WRITE;
5457 5457 }
5458 5458 }
5459 5459
5460 5460 /*
5461 5461 * N.B. at this time the plp array has all the needed non-anon
5462 5462 * pages in addition to (possibly) having some adjacent pages.
5463 5463 */
5464 5464
5465 5465 /*
5466 5466 * Always acquire the anon_array_lock to prevent
5467 5467 * 2 threads from allocating separate anon slots for
5468 5468 * the same "addr".
5469 5469 *
5470 5470 * If this is a copy-on-write fault and we don't already
5471 5471 * have the anon_array_lock, acquire it to prevent the
5472 5472 * fault routine from handling multiple copy-on-write faults
5473 5473 * on the same "addr" in the same address space.
5474 5474 *
5475 5475 * Only one thread should deal with the fault since after
5476 5476 * it is handled, the other threads can acquire a translation
5477 5477 * to the newly created private page. This prevents two or
5478 5478 * more threads from creating different private pages for the
5479 5479 * same fault.
5480 5480 *
5481 5481 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5482 5482 * to prevent deadlock between this thread and another thread
5483 5483 * which has soft-locked this page and wants to acquire serial_lock.
5484 5484 * ( bug 4026339 )
5485 5485 *
5486 5486 * The fix for bug 4026339 becomes unnecessary when using the
5487 5487 * locking scheme with per amp rwlock and a global set of hash
5488 5488 * lock, anon_array_lock. If we steal a vnode page when low
5489 5489 * on memory and upgrad the page lock through page_rename,
5490 5490 * then the page is PAGE_HANDLED, nothing needs to be done
5491 5491 * for this page after returning from segvn_faultpage.
5492 5492 *
5493 5493 * But really, the page lock should be downgraded after
5494 5494 * the stolen page is page_rename'd.
5495 5495 */
5496 5496
5497 5497 if (amp != NULL)
5498 5498 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5499 5499
5500 5500 /*
5501 5501 * Ok, now loop over the address range and handle faults
5502 5502 */
5503 5503 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) {
5504 5504 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot,
5505 5505 type, rw, brkcow);
5506 5506 if (err) {
5507 5507 if (amp != NULL)
5508 5508 ANON_LOCK_EXIT(&->a_rwlock);
5509 5509 if (type == F_SOFTLOCK && a > addr) {
5510 5510 segvn_softunlock(seg, addr, (a - addr),
5511 5511 S_OTHER);
5512 5512 }
5513 5513 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5514 5514 segvn_pagelist_rele(plp);
5515 5515 if (pl_alloc_sz)
5516 5516 kmem_free(plp, pl_alloc_sz);
5517 5517 return (err);
5518 5518 }
5519 5519 if (vpage) {
5520 5520 vpage++;
5521 5521 } else if (svd->vpage) {
5522 5522 page = seg_page(seg, addr);
5523 5523 vpage = &svd->vpage[++page];
5524 5524 }
5525 5525 }
5526 5526
5527 5527 /* Didn't get pages from the underlying fs so we're done */
5528 5528 if (!dogetpage)
5529 5529 goto done;
5530 5530
5531 5531 /*
5532 5532 * Now handle any other pages in the list returned.
5533 5533 * If the page can be used, load up the translations now.
5534 5534 * Note that the for loop will only be entered if "plp"
5535 5535 * is pointing to a non-NULL page pointer which means that
5536 5536 * VOP_GETPAGE() was called and vpprot has been initialized.
5537 5537 */
5538 5538 if (svd->pageprot == 0)
5539 5539 prot = svd->prot & vpprot;
5540 5540
5541 5541
5542 5542 /*
5543 5543 * Large Files: diff should be unsigned value because we started
5544 5544 * supporting > 2GB segment sizes from 2.5.1 and when a
5545 5545 * large file of size > 2GB gets mapped to address space
5546 5546 * the diff value can be > 2GB.
5547 5547 */
5548 5548
5549 5549 for (ppp = plp; (pp = *ppp) != NULL; ppp++) {
5550 5550 size_t diff;
5551 5551 struct anon *ap;
5552 5552 int anon_index;
5553 5553 anon_sync_obj_t cookie;
5554 5554 int hat_flag = HAT_LOAD_ADV;
5555 5555
5556 5556 if (svd->flags & MAP_TEXT) {
5557 5557 hat_flag |= HAT_LOAD_TEXT;
5558 5558 }
5559 5559
5560 5560 if (pp == PAGE_HANDLED)
5561 5561 continue;
5562 5562
5563 5563 if (svd->tr_state != SEGVN_TR_ON &&
5564 5564 pp->p_offset >= svd->offset &&
5565 5565 pp->p_offset < svd->offset + seg->s_size) {
5566 5566
5567 5567 diff = pp->p_offset - svd->offset;
5568 5568
5569 5569 /*
5570 5570 * Large Files: Following is the assertion
5571 5571 * validating the above cast.
5572 5572 */
5573 5573 ASSERT(svd->vp == pp->p_vnode);
5574 5574
5575 5575 page = btop(diff);
5576 5576 if (svd->pageprot)
5577 5577 prot = VPP_PROT(&svd->vpage[page]) & vpprot;
5578 5578
5579 5579 /*
5580 5580 * Prevent other threads in the address space from
5581 5581 * creating private pages (i.e., allocating anon slots)
5582 5582 * while we are in the process of loading translations
5583 5583 * to additional pages returned by the underlying
5584 5584 * object.
5585 5585 */
5586 5586 if (amp != NULL) {
5587 5587 anon_index = svd->anon_index + page;
5588 5588 anon_array_enter(amp, anon_index, &cookie);
5589 5589 ap = anon_get_ptr(amp->ahp, anon_index);
5590 5590 }
5591 5591 if ((amp == NULL) || (ap == NULL)) {
5592 5592 if (IS_VMODSORT(pp->p_vnode) ||
5593 5593 enable_mbit_wa) {
5594 5594 if (rw == S_WRITE)
5595 5595 hat_setmod(pp);
5596 5596 else if (rw != S_OTHER &&
5597 5597 !hat_ismod(pp))
5598 5598 prot &= ~PROT_WRITE;
5599 5599 }
5600 5600 /*
5601 5601 * Skip mapping read ahead pages marked
5602 5602 * for migration, so they will get migrated
5603 5603 * properly on fault
5604 5604 */
5605 5605 ASSERT(amp == NULL ||
5606 5606 svd->rcookie == HAT_INVALID_REGION_COOKIE);
5607 5607 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) {
5608 5608 hat_memload_region(hat,
5609 5609 seg->s_base + diff,
5610 5610 pp, prot, hat_flag,
5611 5611 svd->rcookie);
5612 5612 }
5613 5613 }
5614 5614 if (amp != NULL)
5615 5615 anon_array_exit(&cookie);
5616 5616 }
5617 5617 page_unlock(pp);
5618 5618 }
5619 5619 done:
5620 5620 if (amp != NULL)
5621 5621 ANON_LOCK_EXIT(&->a_rwlock);
5622 5622 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5623 5623 if (pl_alloc_sz)
5624 5624 kmem_free(plp, pl_alloc_sz);
5625 5625 return (0);
5626 5626 }
5627 5627
5628 5628 /*
5629 5629 * This routine is used to start I/O on pages asynchronously. XXX it will
5630 5630 * only create PAGESIZE pages. At fault time they will be relocated into
5631 5631 * larger pages.
5632 5632 */
5633 5633 static faultcode_t
5634 5634 segvn_faulta(struct seg *seg, caddr_t addr)
5635 5635 {
5636 5636 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5637 5637 int err;
5638 5638 struct anon_map *amp;
5639 5639 vnode_t *vp;
5640 5640
5641 5641 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5642 5642
5643 5643 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5644 5644 if ((amp = svd->amp) != NULL) {
5645 5645 struct anon *ap;
5646 5646
5647 5647 /*
5648 5648 * Reader lock to prevent amp->ahp from being changed.
5649 5649 * This is advisory, it's ok to miss a page, so
5650 5650 * we don't do anon_array_enter lock.
5651 5651 */
5652 5652 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5653 5653 if ((ap = anon_get_ptr(amp->ahp,
5654 5654 svd->anon_index + seg_page(seg, addr))) != NULL) {
5655 5655
5656 5656 err = anon_getpage(&ap, NULL, NULL,
5657 5657 0, seg, addr, S_READ, svd->cred);
5658 5658
5659 5659 ANON_LOCK_EXIT(&->a_rwlock);
5660 5660 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5661 5661 if (err)
5662 5662 return (FC_MAKE_ERR(err));
5663 5663 return (0);
5664 5664 }
5665 5665 ANON_LOCK_EXIT(&->a_rwlock);
5666 5666 }
5667 5667
5668 5668 if (svd->vp == NULL) {
5669 5669 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5670 5670 return (0); /* zfod page - do nothing now */
5671 5671 }
5672 5672
5673 5673 vp = svd->vp;
5674 5674 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5675 5675 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp);
5676 5676 err = VOP_GETPAGE(vp,
5677 5677 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5678 5678 PAGESIZE, NULL, NULL, 0, seg, addr,
5679 5679 S_OTHER, svd->cred, NULL);
5680 5680
5681 5681 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5682 5682 if (err)
5683 5683 return (FC_MAKE_ERR(err));
5684 5684 return (0);
5685 5685 }
5686 5686
5687 5687 static int
5688 5688 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5689 5689 {
5690 5690 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5691 5691 struct vpage *cvp, *svp, *evp;
5692 5692 struct vnode *vp;
5693 5693 size_t pgsz;
5694 5694 pgcnt_t pgcnt;
5695 5695 anon_sync_obj_t cookie;
5696 5696 int unload_done = 0;
5697 5697
5698 5698 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5699 5699
5700 5700 if ((svd->maxprot & prot) != prot)
5701 5701 return (EACCES); /* violated maxprot */
5702 5702
5703 5703 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5704 5704
5705 5705 /* return if prot is the same */
5706 5706 if (!svd->pageprot && svd->prot == prot) {
5707 5707 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5708 5708 return (0);
5709 5709 }
5710 5710
5711 5711 /*
5712 5712 * Since we change protections we first have to flush the cache.
5713 5713 * This makes sure all the pagelock calls have to recheck
5714 5714 * protections.
5715 5715 */
5716 5716 if (svd->softlockcnt > 0) {
5717 5717 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5718 5718
5719 5719 /*
5720 5720 * If this is shared segment non 0 softlockcnt
5721 5721 * means locked pages are still in use.
5722 5722 */
5723 5723 if (svd->type == MAP_SHARED) {
5724 5724 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5725 5725 return (EAGAIN);
5726 5726 }
5727 5727
5728 5728 /*
5729 5729 * Since we do have the segvn writers lock nobody can fill
5730 5730 * the cache with entries belonging to this seg during
5731 5731 * the purge. The flush either succeeds or we still have
5732 5732 * pending I/Os.
5733 5733 */
5734 5734 segvn_purge(seg);
5735 5735 if (svd->softlockcnt > 0) {
5736 5736 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5737 5737 return (EAGAIN);
5738 5738 }
5739 5739 }
5740 5740
5741 5741 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5742 5742 ASSERT(svd->amp == NULL);
5743 5743 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5744 5744 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5745 5745 HAT_REGION_TEXT);
5746 5746 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5747 5747 unload_done = 1;
5748 5748 } else if (svd->tr_state == SEGVN_TR_INIT) {
5749 5749 svd->tr_state = SEGVN_TR_OFF;
5750 5750 } else if (svd->tr_state == SEGVN_TR_ON) {
5751 5751 ASSERT(svd->amp != NULL);
5752 5752 segvn_textunrepl(seg, 0);
5753 5753 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5754 5754 unload_done = 1;
5755 5755 }
5756 5756
5757 5757 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED &&
5758 5758 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) {
5759 5759 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
5760 5760 segvn_inval_trcache(svd->vp);
5761 5761 }
5762 5762 if (seg->s_szc != 0) {
5763 5763 int err;
5764 5764 pgsz = page_get_pagesize(seg->s_szc);
5765 5765 pgcnt = pgsz >> PAGESHIFT;
5766 5766 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
5767 5767 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5768 5768 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5769 5769 ASSERT(seg->s_base != addr || seg->s_size != len);
5770 5770 /*
5771 5771 * If we are holding the as lock as a reader then
5772 5772 * we need to return IE_RETRY and let the as
5773 5773 * layer drop and re-acquire the lock as a writer.
5774 5774 */
5775 5775 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock))
5776 5776 return (IE_RETRY);
5777 5777 VM_STAT_ADD(segvnvmstats.demoterange[1]);
5778 5778 if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5779 5779 err = segvn_demote_range(seg, addr, len,
5780 5780 SDR_END, 0);
5781 5781 } else {
5782 5782 uint_t szcvec = map_pgszcvec(seg->s_base,
5783 5783 pgsz, (uintptr_t)seg->s_base,
5784 5784 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5785 5785 err = segvn_demote_range(seg, addr, len,
5786 5786 SDR_END, szcvec);
5787 5787 }
5788 5788 if (err == 0)
5789 5789 return (IE_RETRY);
5790 5790 if (err == ENOMEM)
5791 5791 return (IE_NOMEM);
5792 5792 return (err);
5793 5793 }
5794 5794 }
5795 5795
5796 5796
5797 5797 /*
5798 5798 * If it's a private mapping and we're making it writable then we
5799 5799 * may have to reserve the additional swap space now. If we are
5800 5800 * making writable only a part of the segment then we use its vpage
5801 5801 * array to keep a record of the pages for which we have reserved
5802 5802 * swap. In this case we set the pageswap field in the segment's
5803 5803 * segvn structure to record this.
5804 5804 *
5805 5805 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5806 5806 * removing write permission on the entire segment and we haven't
5807 5807 * modified any pages, we can release the swap space.
5808 5808 */
5809 5809 if (svd->type == MAP_PRIVATE) {
5810 5810 if (prot & PROT_WRITE) {
5811 5811 if (!(svd->flags & MAP_NORESERVE) &&
5812 5812 !(svd->swresv && svd->pageswap == 0)) {
5813 5813 size_t sz = 0;
5814 5814
5815 5815 /*
5816 5816 * Start by determining how much swap
5817 5817 * space is required.
5818 5818 */
5819 5819 if (addr == seg->s_base &&
5820 5820 len == seg->s_size &&
5821 5821 svd->pageswap == 0) {
5822 5822 /* The whole segment */
5823 5823 sz = seg->s_size;
5824 5824 } else {
5825 5825 /*
5826 5826 * Make sure that the vpage array
5827 5827 * exists, and make a note of the
5828 5828 * range of elements corresponding
5829 5829 * to len.
5830 5830 */
5831 5831 segvn_vpage(seg);
5832 5832 if (svd->vpage == NULL) {
5833 5833 SEGVN_LOCK_EXIT(seg->s_as,
5834 5834 &svd->lock);
5835 5835 return (ENOMEM);
5836 5836 }
5837 5837 svp = &svd->vpage[seg_page(seg, addr)];
5838 5838 evp = &svd->vpage[seg_page(seg,
5839 5839 addr + len)];
5840 5840
5841 5841 if (svd->pageswap == 0) {
5842 5842 /*
5843 5843 * This is the first time we've
5844 5844 * asked for a part of this
5845 5845 * segment, so we need to
5846 5846 * reserve everything we've
5847 5847 * been asked for.
5848 5848 */
5849 5849 sz = len;
5850 5850 } else {
5851 5851 /*
5852 5852 * We have to count the number
5853 5853 * of pages required.
5854 5854 */
5855 5855 for (cvp = svp; cvp < evp;
5856 5856 cvp++) {
5857 5857 if (!VPP_ISSWAPRES(cvp))
5858 5858 sz++;
5859 5859 }
5860 5860 sz <<= PAGESHIFT;
5861 5861 }
5862 5862 }
5863 5863
5864 5864 /* Try to reserve the necessary swap. */
5865 5865 if (anon_resv_zone(sz,
5866 5866 seg->s_as->a_proc->p_zone) == 0) {
5867 5867 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5868 5868 return (IE_NOMEM);
5869 5869 }
5870 5870
5871 5871 /*
5872 5872 * Make a note of how much swap space
5873 5873 * we've reserved.
5874 5874 */
5875 5875 if (svd->pageswap == 0 && sz == seg->s_size) {
5876 5876 svd->swresv = sz;
5877 5877 } else {
5878 5878 ASSERT(svd->vpage != NULL);
5879 5879 svd->swresv += sz;
5880 5880 svd->pageswap = 1;
5881 5881 for (cvp = svp; cvp < evp; cvp++) {
5882 5882 if (!VPP_ISSWAPRES(cvp))
5883 5883 VPP_SETSWAPRES(cvp);
5884 5884 }
5885 5885 }
5886 5886 }
5887 5887 } else {
5888 5888 /*
5889 5889 * Swap space is released only if this segment
5890 5890 * does not map anonymous memory, since read faults
5891 5891 * on such segments still need an anon slot to read
5892 5892 * in the data.
5893 5893 */
5894 5894 if (svd->swresv != 0 && svd->vp != NULL &&
5895 5895 svd->amp == NULL && addr == seg->s_base &&
5896 5896 len == seg->s_size && svd->pageprot == 0) {
5897 5897 ASSERT(svd->pageswap == 0);
5898 5898 anon_unresv_zone(svd->swresv,
5899 5899 seg->s_as->a_proc->p_zone);
5900 5900 svd->swresv = 0;
5901 5901 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
5902 5902 "anon proc:%p %lu %u", seg, 0, 0);
5903 5903 }
5904 5904 }
5905 5905 }
5906 5906
5907 5907 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5908 5908 if (svd->prot == prot) {
5909 5909 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5910 5910 return (0); /* all done */
5911 5911 }
5912 5912 svd->prot = (uchar_t)prot;
5913 5913 } else if (svd->type == MAP_PRIVATE) {
5914 5914 struct anon *ap = NULL;
5915 5915 page_t *pp;
5916 5916 u_offset_t offset, off;
5917 5917 struct anon_map *amp;
5918 5918 ulong_t anon_idx = 0;
5919 5919
5920 5920 /*
5921 5921 * A vpage structure exists or else the change does not
5922 5922 * involve the entire segment. Establish a vpage structure
5923 5923 * if none is there. Then, for each page in the range,
5924 5924 * adjust its individual permissions. Note that write-
5925 5925 * enabling a MAP_PRIVATE page can affect the claims for
5926 5926 * locked down memory. Overcommitting memory terminates
5927 5927 * the operation.
5928 5928 */
5929 5929 segvn_vpage(seg);
5930 5930 if (svd->vpage == NULL) {
5931 5931 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5932 5932 return (ENOMEM);
5933 5933 }
5934 5934 svd->pageprot = 1;
5935 5935 if ((amp = svd->amp) != NULL) {
5936 5936 anon_idx = svd->anon_index + seg_page(seg, addr);
5937 5937 ASSERT(seg->s_szc == 0 ||
5938 5938 IS_P2ALIGNED(anon_idx, pgcnt));
5939 5939 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5940 5940 }
5941 5941
5942 5942 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5943 5943 evp = &svd->vpage[seg_page(seg, addr + len)];
5944 5944
5945 5945 /*
5946 5946 * See Statement at the beginning of segvn_lockop regarding
5947 5947 * the way cowcnts and lckcnts are handled.
5948 5948 */
5949 5949 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5950 5950
5951 5951 if (seg->s_szc != 0) {
5952 5952 if (amp != NULL) {
5953 5953 anon_array_enter(amp, anon_idx,
5954 5954 &cookie);
5955 5955 }
5956 5956 if (IS_P2ALIGNED(anon_idx, pgcnt) &&
5957 5957 !segvn_claim_pages(seg, svp, offset,
5958 5958 anon_idx, prot)) {
5959 5959 if (amp != NULL) {
5960 5960 anon_array_exit(&cookie);
5961 5961 }
5962 5962 break;
5963 5963 }
5964 5964 if (amp != NULL) {
5965 5965 anon_array_exit(&cookie);
5966 5966 }
5967 5967 anon_idx++;
5968 5968 } else {
5969 5969 if (amp != NULL) {
5970 5970 anon_array_enter(amp, anon_idx,
5971 5971 &cookie);
5972 5972 ap = anon_get_ptr(amp->ahp, anon_idx++);
5973 5973 }
5974 5974
5975 5975 if (VPP_ISPPLOCK(svp) &&
5976 5976 VPP_PROT(svp) != prot) {
5977 5977
5978 5978 if (amp == NULL || ap == NULL) {
5979 5979 vp = svd->vp;
5980 5980 off = offset;
5981 5981 } else
5982 5982 swap_xlate(ap, &vp, &off);
5983 5983 if (amp != NULL)
5984 5984 anon_array_exit(&cookie);
5985 5985
5986 5986 if ((pp = page_lookup(vp, off,
5987 5987 SE_SHARED)) == NULL) {
5988 5988 panic("segvn_setprot: no page");
5989 5989 /*NOTREACHED*/
5990 5990 }
5991 5991 ASSERT(seg->s_szc == 0);
5992 5992 if ((VPP_PROT(svp) ^ prot) &
5993 5993 PROT_WRITE) {
5994 5994 if (prot & PROT_WRITE) {
5995 5995 if (!page_addclaim(
5996 5996 pp)) {
5997 5997 page_unlock(pp);
5998 5998 break;
5999 5999 }
6000 6000 } else {
6001 6001 if (!page_subclaim(
6002 6002 pp)) {
6003 6003 page_unlock(pp);
6004 6004 break;
6005 6005 }
6006 6006 }
6007 6007 }
6008 6008 page_unlock(pp);
6009 6009 } else if (amp != NULL)
6010 6010 anon_array_exit(&cookie);
6011 6011 }
6012 6012 VPP_SETPROT(svp, prot);
6013 6013 offset += PAGESIZE;
6014 6014 }
6015 6015 if (amp != NULL)
6016 6016 ANON_LOCK_EXIT(&->a_rwlock);
6017 6017
6018 6018 /*
6019 6019 * Did we terminate prematurely? If so, simply unload
6020 6020 * the translations to the things we've updated so far.
6021 6021 */
6022 6022 if (svp != evp) {
6023 6023 if (unload_done) {
6024 6024 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6025 6025 return (IE_NOMEM);
6026 6026 }
6027 6027 len = (svp - &svd->vpage[seg_page(seg, addr)]) *
6028 6028 PAGESIZE;
6029 6029 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz));
6030 6030 if (len != 0)
6031 6031 hat_unload(seg->s_as->a_hat, addr,
6032 6032 len, HAT_UNLOAD);
6033 6033 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6034 6034 return (IE_NOMEM);
6035 6035 }
6036 6036 } else {
6037 6037 segvn_vpage(seg);
6038 6038 if (svd->vpage == NULL) {
6039 6039 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6040 6040 return (ENOMEM);
6041 6041 }
6042 6042 svd->pageprot = 1;
6043 6043 evp = &svd->vpage[seg_page(seg, addr + len)];
6044 6044 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
6045 6045 VPP_SETPROT(svp, prot);
6046 6046 }
6047 6047 }
6048 6048
6049 6049 if (unload_done) {
6050 6050 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6051 6051 return (0);
6052 6052 }
6053 6053
6054 6054 if (((prot & PROT_WRITE) != 0 &&
6055 6055 (svd->vp != NULL || svd->type == MAP_PRIVATE)) ||
6056 6056 (prot & ~PROT_USER) == PROT_NONE) {
6057 6057 /*
6058 6058 * Either private or shared data with write access (in
6059 6059 * which case we need to throw out all former translations
6060 6060 * so that we get the right translations set up on fault
6061 6061 * and we don't allow write access to any copy-on-write pages
6062 6062 * that might be around or to prevent write access to pages
6063 6063 * representing holes in a file), or we don't have permission
6064 6064 * to access the memory at all (in which case we have to
6065 6065 * unload any current translations that might exist).
6066 6066 */
6067 6067 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
6068 6068 } else {
6069 6069 /*
6070 6070 * A shared mapping or a private mapping in which write
6071 6071 * protection is going to be denied - just change all the
6072 6072 * protections over the range of addresses in question.
6073 6073 * segvn does not support any other attributes other
6074 6074 * than prot so we can use hat_chgattr.
6075 6075 */
6076 6076 hat_chgattr(seg->s_as->a_hat, addr, len, prot);
6077 6077 }
6078 6078
6079 6079 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6080 6080
6081 6081 return (0);
6082 6082 }
6083 6083
6084 6084 /*
6085 6085 * segvn_setpagesize is called via segop_setpagesize from as_setpagesize,
6086 6086 * to determine if the seg is capable of mapping the requested szc.
6087 6087 */
6088 6088 static int
6089 6089 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
6090 6090 {
6091 6091 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6092 6092 struct segvn_data *nsvd;
6093 6093 struct anon_map *amp = svd->amp;
6094 6094 struct seg *nseg;
6095 6095 caddr_t eaddr = addr + len, a;
6096 6096 size_t pgsz = page_get_pagesize(szc);
6097 6097 pgcnt_t pgcnt = page_get_pagecnt(szc);
6098 6098 int err;
6099 6099 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6100 6100
6101 6101 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6102 6102 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6103 6103
6104 6104 if (seg->s_szc == szc || segvn_lpg_disable != 0) {
6105 6105 return (0);
6106 6106 }
6107 6107
6108 6108 /*
6109 6109 * addr should always be pgsz aligned but eaddr may be misaligned if
6110 6110 * it's at the end of the segment.
6111 6111 *
6112 6112 * XXX we should assert this condition since as_setpagesize() logic
6113 6113 * guarantees it.
6114 6114 */
6115 6115 if (!IS_P2ALIGNED(addr, pgsz) ||
6116 6116 (!IS_P2ALIGNED(eaddr, pgsz) &&
6117 6117 eaddr != seg->s_base + seg->s_size)) {
6118 6118
6119 6119 segvn_setpgsz_align_err++;
6120 6120 return (EINVAL);
6121 6121 }
6122 6122
6123 6123 if (amp != NULL && svd->type == MAP_SHARED) {
6124 6124 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6125 6125 if (!IS_P2ALIGNED(an_idx, pgcnt)) {
6126 6126
6127 6127 segvn_setpgsz_anon_align_err++;
6128 6128 return (EINVAL);
6129 6129 }
6130 6130 }
6131 6131
6132 6132 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas ||
6133 6133 szc > segvn_maxpgszc) {
6134 6134 return (EINVAL);
6135 6135 }
6136 6136
6137 6137 /* paranoid check */
6138 6138 if (svd->vp != NULL &&
6139 6139 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) {
6140 6140 return (EINVAL);
6141 6141 }
6142 6142
6143 6143 if (seg->s_szc == 0 && svd->vp != NULL &&
6144 6144 map_addr_vacalign_check(addr, off)) {
6145 6145 return (EINVAL);
6146 6146 }
6147 6147
6148 6148 /*
6149 6149 * Check that protections are the same within new page
6150 6150 * size boundaries.
6151 6151 */
6152 6152 if (svd->pageprot) {
6153 6153 for (a = addr; a < eaddr; a += pgsz) {
6154 6154 if ((a + pgsz) > eaddr) {
6155 6155 if (!sameprot(seg, a, eaddr - a)) {
6156 6156 return (EINVAL);
6157 6157 }
6158 6158 } else {
6159 6159 if (!sameprot(seg, a, pgsz)) {
6160 6160 return (EINVAL);
6161 6161 }
6162 6162 }
6163 6163 }
6164 6164 }
6165 6165
6166 6166 /*
6167 6167 * Since we are changing page size we first have to flush
6168 6168 * the cache. This makes sure all the pagelock calls have
6169 6169 * to recheck protections.
6170 6170 */
6171 6171 if (svd->softlockcnt > 0) {
6172 6172 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6173 6173
6174 6174 /*
6175 6175 * If this is shared segment non 0 softlockcnt
6176 6176 * means locked pages are still in use.
6177 6177 */
6178 6178 if (svd->type == MAP_SHARED) {
6179 6179 return (EAGAIN);
6180 6180 }
6181 6181
6182 6182 /*
6183 6183 * Since we do have the segvn writers lock nobody can fill
6184 6184 * the cache with entries belonging to this seg during
6185 6185 * the purge. The flush either succeeds or we still have
6186 6186 * pending I/Os.
6187 6187 */
6188 6188 segvn_purge(seg);
6189 6189 if (svd->softlockcnt > 0) {
6190 6190 return (EAGAIN);
6191 6191 }
6192 6192 }
6193 6193
6194 6194 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6195 6195 ASSERT(svd->amp == NULL);
6196 6196 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6197 6197 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6198 6198 HAT_REGION_TEXT);
6199 6199 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6200 6200 } else if (svd->tr_state == SEGVN_TR_INIT) {
6201 6201 svd->tr_state = SEGVN_TR_OFF;
6202 6202 } else if (svd->tr_state == SEGVN_TR_ON) {
6203 6203 ASSERT(svd->amp != NULL);
6204 6204 segvn_textunrepl(seg, 1);
6205 6205 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6206 6206 amp = NULL;
6207 6207 }
6208 6208
6209 6209 /*
6210 6210 * Operation for sub range of existing segment.
6211 6211 */
6212 6212 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) {
6213 6213 if (szc < seg->s_szc) {
6214 6214 VM_STAT_ADD(segvnvmstats.demoterange[2]);
6215 6215 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0);
6216 6216 if (err == 0) {
6217 6217 return (IE_RETRY);
6218 6218 }
6219 6219 if (err == ENOMEM) {
6220 6220 return (IE_NOMEM);
6221 6221 }
6222 6222 return (err);
6223 6223 }
6224 6224 if (addr != seg->s_base) {
6225 6225 nseg = segvn_split_seg(seg, addr);
6226 6226 if (eaddr != (nseg->s_base + nseg->s_size)) {
6227 6227 /* eaddr is szc aligned */
6228 6228 (void) segvn_split_seg(nseg, eaddr);
6229 6229 }
6230 6230 return (IE_RETRY);
6231 6231 }
6232 6232 if (eaddr != (seg->s_base + seg->s_size)) {
6233 6233 /* eaddr is szc aligned */
6234 6234 (void) segvn_split_seg(seg, eaddr);
6235 6235 }
6236 6236 return (IE_RETRY);
6237 6237 }
6238 6238
6239 6239 /*
6240 6240 * Break any low level sharing and reset seg->s_szc to 0.
6241 6241 */
6242 6242 if ((err = segvn_clrszc(seg)) != 0) {
6243 6243 if (err == ENOMEM) {
6244 6244 err = IE_NOMEM;
6245 6245 }
6246 6246 return (err);
6247 6247 }
6248 6248 ASSERT(seg->s_szc == 0);
6249 6249
6250 6250 /*
6251 6251 * If the end of the current segment is not pgsz aligned
6252 6252 * then attempt to concatenate with the next segment.
6253 6253 */
6254 6254 if (!IS_P2ALIGNED(eaddr, pgsz)) {
6255 6255 nseg = AS_SEGNEXT(seg->s_as, seg);
6256 6256 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) {
6257 6257 return (ENOMEM);
6258 6258 }
6259 6259 if (nseg->s_ops != &segvn_ops) {
6260 6260 return (EINVAL);
6261 6261 }
6262 6262 nsvd = (struct segvn_data *)nseg->s_data;
6263 6263 if (nsvd->softlockcnt > 0) {
6264 6264 /*
6265 6265 * If this is shared segment non 0 softlockcnt
6266 6266 * means locked pages are still in use.
6267 6267 */
6268 6268 if (nsvd->type == MAP_SHARED) {
6269 6269 return (EAGAIN);
6270 6270 }
6271 6271 segvn_purge(nseg);
6272 6272 if (nsvd->softlockcnt > 0) {
6273 6273 return (EAGAIN);
6274 6274 }
6275 6275 }
6276 6276 err = segvn_clrszc(nseg);
6277 6277 if (err == ENOMEM) {
6278 6278 err = IE_NOMEM;
6279 6279 }
6280 6280 if (err != 0) {
6281 6281 return (err);
6282 6282 }
6283 6283 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6284 6284 err = segvn_concat(seg, nseg, 1);
6285 6285 if (err == -1) {
6286 6286 return (EINVAL);
6287 6287 }
6288 6288 if (err == -2) {
6289 6289 return (IE_NOMEM);
6290 6290 }
6291 6291 return (IE_RETRY);
6292 6292 }
6293 6293
6294 6294 /*
6295 6295 * May need to re-align anon array to
6296 6296 * new szc.
6297 6297 */
6298 6298 if (amp != NULL) {
6299 6299 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) {
6300 6300 struct anon_hdr *nahp;
6301 6301
6302 6302 ASSERT(svd->type == MAP_PRIVATE);
6303 6303
6304 6304 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6305 6305 ASSERT(amp->refcnt == 1);
6306 6306 nahp = anon_create(btop(amp->size), ANON_NOSLEEP);
6307 6307 if (nahp == NULL) {
6308 6308 ANON_LOCK_EXIT(&->a_rwlock);
6309 6309 return (IE_NOMEM);
6310 6310 }
6311 6311 if (anon_copy_ptr(amp->ahp, svd->anon_index,
6312 6312 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) {
6313 6313 anon_release(nahp, btop(amp->size));
6314 6314 ANON_LOCK_EXIT(&->a_rwlock);
6315 6315 return (IE_NOMEM);
6316 6316 }
6317 6317 anon_release(amp->ahp, btop(amp->size));
6318 6318 amp->ahp = nahp;
6319 6319 svd->anon_index = 0;
6320 6320 ANON_LOCK_EXIT(&->a_rwlock);
6321 6321 }
6322 6322 }
6323 6323 if (svd->vp != NULL && szc != 0) {
6324 6324 struct vattr va;
6325 6325 u_offset_t eoffpage = svd->offset;
6326 6326 va.va_mask = AT_SIZE;
6327 6327 eoffpage += seg->s_size;
6328 6328 eoffpage = btopr(eoffpage);
6329 6329 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) {
6330 6330 segvn_setpgsz_getattr_err++;
6331 6331 return (EINVAL);
6332 6332 }
6333 6333 if (btopr(va.va_size) < eoffpage) {
6334 6334 segvn_setpgsz_eof_err++;
6335 6335 return (EINVAL);
6336 6336 }
6337 6337 if (amp != NULL) {
6338 6338 /*
6339 6339 * anon_fill_cow_holes() may call VOP_GETPAGE().
6340 6340 * don't take anon map lock here to avoid holding it
6341 6341 * across VOP_GETPAGE() calls that may call back into
6342 6342 * segvn for klsutering checks. We don't really need
6343 6343 * anon map lock here since it's a private segment and
6344 6344 * we hold as level lock as writers.
6345 6345 */
6346 6346 if ((err = anon_fill_cow_holes(seg, seg->s_base,
6347 6347 amp->ahp, svd->anon_index, svd->vp, svd->offset,
6348 6348 seg->s_size, szc, svd->prot, svd->vpage,
6349 6349 svd->cred)) != 0) {
6350 6350 return (EINVAL);
6351 6351 }
6352 6352 }
6353 6353 segvn_setvnode_mpss(svd->vp);
6354 6354 }
6355 6355
6356 6356 if (amp != NULL) {
6357 6357 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6358 6358 if (svd->type == MAP_PRIVATE) {
6359 6359 amp->a_szc = szc;
6360 6360 } else if (szc > amp->a_szc) {
6361 6361 amp->a_szc = szc;
6362 6362 }
6363 6363 ANON_LOCK_EXIT(&->a_rwlock);
6364 6364 }
6365 6365
6366 6366 seg->s_szc = szc;
6367 6367
6368 6368 return (0);
6369 6369 }
6370 6370
6371 6371 static int
6372 6372 segvn_clrszc(struct seg *seg)
6373 6373 {
6374 6374 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6375 6375 struct anon_map *amp = svd->amp;
6376 6376 size_t pgsz;
6377 6377 pgcnt_t pages;
6378 6378 int err = 0;
6379 6379 caddr_t a = seg->s_base;
6380 6380 caddr_t ea = a + seg->s_size;
6381 6381 ulong_t an_idx = svd->anon_index;
6382 6382 vnode_t *vp = svd->vp;
6383 6383 struct vpage *vpage = svd->vpage;
6384 6384 page_t *anon_pl[1 + 1], *pp;
6385 6385 struct anon *ap, *oldap;
6386 6386 uint_t prot = svd->prot, vpprot;
6387 6387 int pageflag = 0;
6388 6388
6389 6389 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6390 6390 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6391 6391 ASSERT(svd->softlockcnt == 0);
6392 6392
6393 6393 if (vp == NULL && amp == NULL) {
6394 6394 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6395 6395 seg->s_szc = 0;
6396 6396 return (0);
6397 6397 }
6398 6398
6399 6399 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6400 6400 ASSERT(svd->amp == NULL);
6401 6401 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6402 6402 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6403 6403 HAT_REGION_TEXT);
6404 6404 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6405 6405 } else if (svd->tr_state == SEGVN_TR_ON) {
6406 6406 ASSERT(svd->amp != NULL);
6407 6407 segvn_textunrepl(seg, 1);
6408 6408 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6409 6409 amp = NULL;
6410 6410 } else {
6411 6411 if (svd->tr_state != SEGVN_TR_OFF) {
6412 6412 ASSERT(svd->tr_state == SEGVN_TR_INIT);
6413 6413 svd->tr_state = SEGVN_TR_OFF;
6414 6414 }
6415 6415
6416 6416 /*
6417 6417 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6418 6418 * unload argument is 0 when we are freeing the segment
6419 6419 * and unload was already done.
6420 6420 */
6421 6421 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
6422 6422 HAT_UNLOAD_UNMAP);
6423 6423 }
6424 6424
6425 6425 if (amp == NULL || svd->type == MAP_SHARED) {
6426 6426 seg->s_szc = 0;
6427 6427 return (0);
6428 6428 }
6429 6429
6430 6430 pgsz = page_get_pagesize(seg->s_szc);
6431 6431 pages = btop(pgsz);
6432 6432
6433 6433 /*
6434 6434 * XXX anon rwlock is not really needed because this is a
6435 6435 * private segment and we are writers.
6436 6436 */
6437 6437 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6438 6438
6439 6439 for (; a < ea; a += pgsz, an_idx += pages) {
6440 6440 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) {
6441 6441 ASSERT(vpage != NULL || svd->pageprot == 0);
6442 6442 if (vpage != NULL) {
6443 6443 ASSERT(sameprot(seg, a, pgsz));
6444 6444 prot = VPP_PROT(vpage);
6445 6445 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0;
6446 6446 }
6447 6447 if (seg->s_szc != 0) {
6448 6448 ASSERT(vp == NULL || anon_pages(amp->ahp,
6449 6449 an_idx, pages) == pages);
6450 6450 if ((err = anon_map_demotepages(amp, an_idx,
6451 6451 seg, a, prot, vpage, svd->cred)) != 0) {
6452 6452 goto out;
6453 6453 }
6454 6454 } else {
6455 6455 if (oldap->an_refcnt == 1) {
6456 6456 continue;
6457 6457 }
6458 6458 if ((err = anon_getpage(&oldap, &vpprot,
6459 6459 anon_pl, PAGESIZE, seg, a, S_READ,
6460 6460 svd->cred))) {
6461 6461 goto out;
6462 6462 }
6463 6463 if ((pp = anon_private(&ap, seg, a, prot,
6464 6464 anon_pl[0], pageflag, svd->cred)) == NULL) {
6465 6465 err = ENOMEM;
6466 6466 goto out;
6467 6467 }
6468 6468 anon_decref(oldap);
6469 6469 (void) anon_set_ptr(amp->ahp, an_idx, ap,
6470 6470 ANON_SLEEP);
6471 6471 page_unlock(pp);
6472 6472 }
6473 6473 }
6474 6474 vpage = (vpage == NULL) ? NULL : vpage + pages;
6475 6475 }
6476 6476
6477 6477 amp->a_szc = 0;
6478 6478 seg->s_szc = 0;
6479 6479 out:
6480 6480 ANON_LOCK_EXIT(&->a_rwlock);
6481 6481 return (err);
6482 6482 }
6483 6483
6484 6484 static int
6485 6485 segvn_claim_pages(
6486 6486 struct seg *seg,
6487 6487 struct vpage *svp,
6488 6488 u_offset_t off,
6489 6489 ulong_t anon_idx,
6490 6490 uint_t prot)
6491 6491 {
6492 6492 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6493 6493 size_t ppasize = (pgcnt + 1) * sizeof (page_t *);
6494 6494 page_t **ppa;
6495 6495 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6496 6496 struct anon_map *amp = svd->amp;
6497 6497 struct vpage *evp = svp + pgcnt;
6498 6498 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6499 6499 + seg->s_base;
6500 6500 struct anon *ap;
6501 6501 struct vnode *vp = svd->vp;
6502 6502 page_t *pp;
6503 6503 pgcnt_t pg_idx, i;
6504 6504 int err = 0;
6505 6505 anoff_t aoff;
6506 6506 int anon = (amp != NULL) ? 1 : 0;
6507 6507
6508 6508 ASSERT(svd->type == MAP_PRIVATE);
6509 6509 ASSERT(svd->vpage != NULL);
6510 6510 ASSERT(seg->s_szc != 0);
6511 6511 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
6512 6512 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt));
6513 6513 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT));
6514 6514
6515 6515 if (VPP_PROT(svp) == prot)
6516 6516 return (1);
6517 6517 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE))
6518 6518 return (1);
6519 6519
6520 6520 ppa = kmem_alloc(ppasize, KM_SLEEP);
6521 6521 if (anon && vp != NULL) {
6522 6522 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) {
6523 6523 anon = 0;
6524 6524 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt));
6525 6525 }
6526 6526 ASSERT(!anon ||
6527 6527 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt);
6528 6528 }
6529 6529
6530 6530 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) {
6531 6531 if (!VPP_ISPPLOCK(svp))
6532 6532 continue;
6533 6533 if (anon) {
6534 6534 ap = anon_get_ptr(amp->ahp, anon_idx);
6535 6535 if (ap == NULL) {
6536 6536 panic("segvn_claim_pages: no anon slot");
6537 6537 }
6538 6538 swap_xlate(ap, &vp, &aoff);
6539 6539 off = (u_offset_t)aoff;
6540 6540 }
6541 6541 ASSERT(vp != NULL);
6542 6542 if ((pp = page_lookup(vp,
6543 6543 (u_offset_t)off, SE_SHARED)) == NULL) {
6544 6544 panic("segvn_claim_pages: no page");
6545 6545 }
6546 6546 ppa[pg_idx++] = pp;
6547 6547 off += PAGESIZE;
6548 6548 }
6549 6549
6550 6550 if (ppa[0] == NULL) {
6551 6551 kmem_free(ppa, ppasize);
6552 6552 return (1);
6553 6553 }
6554 6554
6555 6555 ASSERT(pg_idx <= pgcnt);
6556 6556 ppa[pg_idx] = NULL;
6557 6557
6558 6558
6559 6559 /* Find each large page within ppa, and adjust its claim */
6560 6560
6561 6561 /* Does ppa cover a single large page? */
6562 6562 if (ppa[0]->p_szc == seg->s_szc) {
6563 6563 if (prot & PROT_WRITE)
6564 6564 err = page_addclaim_pages(ppa);
6565 6565 else
6566 6566 err = page_subclaim_pages(ppa);
6567 6567 } else {
6568 6568 for (i = 0; ppa[i]; i += pgcnt) {
6569 6569 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt));
6570 6570 if (prot & PROT_WRITE)
6571 6571 err = page_addclaim_pages(&ppa[i]);
6572 6572 else
6573 6573 err = page_subclaim_pages(&ppa[i]);
6574 6574 if (err == 0)
6575 6575 break;
6576 6576 }
6577 6577 }
6578 6578
6579 6579 for (i = 0; i < pg_idx; i++) {
6580 6580 ASSERT(ppa[i] != NULL);
6581 6581 page_unlock(ppa[i]);
6582 6582 }
6583 6583
6584 6584 kmem_free(ppa, ppasize);
6585 6585 return (err);
6586 6586 }
6587 6587
6588 6588 /*
6589 6589 * Returns right (upper address) segment if split occurred.
6590 6590 * If the address is equal to the beginning or end of its segment it returns
6591 6591 * the current segment.
6592 6592 */
6593 6593 static struct seg *
6594 6594 segvn_split_seg(struct seg *seg, caddr_t addr)
6595 6595 {
6596 6596 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6597 6597 struct seg *nseg;
6598 6598 size_t nsize;
6599 6599 struct segvn_data *nsvd;
6600 6600
6601 6601 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6602 6602 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6603 6603
6604 6604 ASSERT(addr >= seg->s_base);
6605 6605 ASSERT(addr <= seg->s_base + seg->s_size);
6606 6606 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6607 6607
6608 6608 if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6609 6609 return (seg);
6610 6610
6611 6611 nsize = seg->s_base + seg->s_size - addr;
6612 6612 seg->s_size = addr - seg->s_base;
6613 6613 nseg = seg_alloc(seg->s_as, addr, nsize);
6614 6614 ASSERT(nseg != NULL);
6615 6615 nseg->s_ops = seg->s_ops;
6616 6616 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
6617 6617 nseg->s_data = (void *)nsvd;
6618 6618 nseg->s_szc = seg->s_szc;
6619 6619 *nsvd = *svd;
6620 6620 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6621 6621 nsvd->seg = nseg;
6622 6622 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL);
6623 6623
6624 6624 if (nsvd->vp != NULL) {
6625 6625 VN_HOLD(nsvd->vp);
6626 6626 nsvd->offset = svd->offset +
6627 6627 (uintptr_t)(nseg->s_base - seg->s_base);
6628 6628 if (nsvd->type == MAP_SHARED)
6629 6629 lgrp_shm_policy_init(NULL, nsvd->vp);
6630 6630 } else {
6631 6631 /*
6632 6632 * The offset for an anonymous segment has no signifigance in
6633 6633 * terms of an offset into a file. If we were to use the above
6634 6634 * calculation instead, the structures read out of
6635 6635 * /proc/<pid>/xmap would be more difficult to decipher since
6636 6636 * it would be unclear whether two seemingly contiguous
6637 6637 * prxmap_t structures represented different segments or a
6638 6638 * single segment that had been split up into multiple prxmap_t
6639 6639 * structures (e.g. if some part of the segment had not yet
6640 6640 * been faulted in).
6641 6641 */
6642 6642 nsvd->offset = 0;
6643 6643 }
6644 6644
6645 6645 ASSERT(svd->softlockcnt == 0);
6646 6646 ASSERT(svd->softlockcnt_sbase == 0);
6647 6647 ASSERT(svd->softlockcnt_send == 0);
6648 6648 crhold(svd->cred);
6649 6649
6650 6650 if (svd->vpage != NULL) {
6651 6651 size_t bytes = vpgtob(seg_pages(seg));
6652 6652 size_t nbytes = vpgtob(seg_pages(nseg));
6653 6653 struct vpage *ovpage = svd->vpage;
6654 6654
6655 6655 svd->vpage = kmem_alloc(bytes, KM_SLEEP);
6656 6656 bcopy(ovpage, svd->vpage, bytes);
6657 6657 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
6658 6658 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes);
6659 6659 kmem_free(ovpage, bytes + nbytes);
6660 6660 }
6661 6661 if (svd->amp != NULL && svd->type == MAP_PRIVATE) {
6662 6662 struct anon_map *oamp = svd->amp, *namp;
6663 6663 struct anon_hdr *nahp;
6664 6664
6665 6665 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER);
6666 6666 ASSERT(oamp->refcnt == 1);
6667 6667 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
6668 6668 (void) anon_copy_ptr(oamp->ahp, svd->anon_index,
6669 6669 nahp, 0, btop(seg->s_size), ANON_SLEEP);
6670 6670
6671 6671 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
6672 6672 namp->a_szc = nseg->s_szc;
6673 6673 (void) anon_copy_ptr(oamp->ahp,
6674 6674 svd->anon_index + btop(seg->s_size),
6675 6675 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
6676 6676 anon_release(oamp->ahp, btop(oamp->size));
6677 6677 oamp->ahp = nahp;
6678 6678 oamp->size = seg->s_size;
6679 6679 svd->anon_index = 0;
6680 6680 nsvd->amp = namp;
6681 6681 nsvd->anon_index = 0;
6682 6682 ANON_LOCK_EXIT(&oamp->a_rwlock);
6683 6683 } else if (svd->amp != NULL) {
6684 6684 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6685 6685 ASSERT(svd->amp == nsvd->amp);
6686 6686 ASSERT(seg->s_szc <= svd->amp->a_szc);
6687 6687 nsvd->anon_index = svd->anon_index + seg_pages(seg);
6688 6688 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt));
6689 6689 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER);
6690 6690 svd->amp->refcnt++;
6691 6691 ANON_LOCK_EXIT(&svd->amp->a_rwlock);
6692 6692 }
6693 6693
6694 6694 /*
6695 6695 * Split the amount of swap reserved.
6696 6696 */
6697 6697 if (svd->swresv) {
6698 6698 /*
6699 6699 * For MAP_NORESERVE, only allocate swap reserve for pages
6700 6700 * being used. Other segments get enough to cover whole
6701 6701 * segment.
6702 6702 */
6703 6703 if (svd->flags & MAP_NORESERVE) {
6704 6704 size_t oswresv;
6705 6705
6706 6706 ASSERT(svd->amp);
6707 6707 oswresv = svd->swresv;
6708 6708 svd->swresv = ptob(anon_pages(svd->amp->ahp,
6709 6709 svd->anon_index, btop(seg->s_size)));
6710 6710 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
6711 6711 nsvd->anon_index, btop(nseg->s_size)));
6712 6712 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
6713 6713 } else {
6714 6714 if (svd->pageswap) {
6715 6715 svd->swresv = segvn_count_swap_by_vpages(seg);
6716 6716 ASSERT(nsvd->swresv >= svd->swresv);
6717 6717 nsvd->swresv -= svd->swresv;
6718 6718 } else {
6719 6719 ASSERT(svd->swresv == seg->s_size +
6720 6720 nseg->s_size);
6721 6721 svd->swresv = seg->s_size;
6722 6722 nsvd->swresv = nseg->s_size;
6723 6723 }
6724 6724 }
6725 6725 }
6726 6726
6727 6727 return (nseg);
6728 6728 }
6729 6729
6730 6730 /*
6731 6731 * called on memory operations (unmap, setprot, setpagesize) for a subset
6732 6732 * of a large page segment to either demote the memory range (SDR_RANGE)
6733 6733 * or the ends (SDR_END) by addr/len.
6734 6734 *
6735 6735 * returns 0 on success. returns errno, including ENOMEM, on failure.
6736 6736 */
6737 6737 static int
6738 6738 segvn_demote_range(
6739 6739 struct seg *seg,
6740 6740 caddr_t addr,
6741 6741 size_t len,
6742 6742 int flag,
6743 6743 uint_t szcvec)
6744 6744 {
6745 6745 caddr_t eaddr = addr + len;
6746 6746 caddr_t lpgaddr, lpgeaddr;
6747 6747 struct seg *nseg;
6748 6748 struct seg *badseg1 = NULL;
6749 6749 struct seg *badseg2 = NULL;
6750 6750 size_t pgsz;
6751 6751 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6752 6752 int err;
6753 6753 uint_t szc = seg->s_szc;
6754 6754 uint_t tszcvec;
6755 6755
6756 6756 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6757 6757 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6758 6758 ASSERT(szc != 0);
6759 6759 pgsz = page_get_pagesize(szc);
6760 6760 ASSERT(seg->s_base != addr || seg->s_size != len);
6761 6761 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6762 6762 ASSERT(svd->softlockcnt == 0);
6763 6763 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6764 6764 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6765 6765
6766 6766 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
6767 6767 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr);
6768 6768 if (flag == SDR_RANGE) {
6769 6769 /* demote entire range */
6770 6770 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6771 6771 (void) segvn_split_seg(nseg, lpgeaddr);
6772 6772 ASSERT(badseg1->s_base == lpgaddr);
6773 6773 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr);
6774 6774 } else if (addr != lpgaddr) {
6775 6775 ASSERT(flag == SDR_END);
6776 6776 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6777 6777 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz &&
6778 6778 eaddr < lpgaddr + 2 * pgsz) {
6779 6779 (void) segvn_split_seg(nseg, lpgeaddr);
6780 6780 ASSERT(badseg1->s_base == lpgaddr);
6781 6781 ASSERT(badseg1->s_size == 2 * pgsz);
6782 6782 } else {
6783 6783 nseg = segvn_split_seg(nseg, lpgaddr + pgsz);
6784 6784 ASSERT(badseg1->s_base == lpgaddr);
6785 6785 ASSERT(badseg1->s_size == pgsz);
6786 6786 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) {
6787 6787 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz);
6788 6788 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz);
6789 6789 badseg2 = nseg;
6790 6790 (void) segvn_split_seg(nseg, lpgeaddr);
6791 6791 ASSERT(badseg2->s_base == lpgeaddr - pgsz);
6792 6792 ASSERT(badseg2->s_size == pgsz);
6793 6793 }
6794 6794 }
6795 6795 } else {
6796 6796 ASSERT(flag == SDR_END);
6797 6797 ASSERT(eaddr < lpgeaddr);
6798 6798 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz);
6799 6799 (void) segvn_split_seg(nseg, lpgeaddr);
6800 6800 ASSERT(badseg1->s_base == lpgeaddr - pgsz);
6801 6801 ASSERT(badseg1->s_size == pgsz);
6802 6802 }
6803 6803
6804 6804 ASSERT(badseg1 != NULL);
6805 6805 ASSERT(badseg1->s_szc == szc);
6806 6806 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz ||
6807 6807 badseg1->s_size == 2 * pgsz);
6808 6808 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz));
6809 6809 ASSERT(badseg1->s_size == pgsz ||
6810 6810 sameprot(badseg1, badseg1->s_base + pgsz, pgsz));
6811 6811 if (err = segvn_clrszc(badseg1)) {
6812 6812 return (err);
6813 6813 }
6814 6814 ASSERT(badseg1->s_szc == 0);
6815 6815
6816 6816 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6817 6817 uint_t tszc = highbit(tszcvec) - 1;
6818 6818 caddr_t ta = MAX(addr, badseg1->s_base);
6819 6819 caddr_t te;
6820 6820 size_t tpgsz = page_get_pagesize(tszc);
6821 6821
6822 6822 ASSERT(svd->type == MAP_SHARED);
6823 6823 ASSERT(flag == SDR_END);
6824 6824 ASSERT(tszc < szc && tszc > 0);
6825 6825
6826 6826 if (eaddr > badseg1->s_base + badseg1->s_size) {
6827 6827 te = badseg1->s_base + badseg1->s_size;
6828 6828 } else {
6829 6829 te = eaddr;
6830 6830 }
6831 6831
6832 6832 ASSERT(ta <= te);
6833 6833 badseg1->s_szc = tszc;
6834 6834 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) {
6835 6835 if (badseg2 != NULL) {
6836 6836 err = segvn_demote_range(badseg1, ta, te - ta,
6837 6837 SDR_END, tszcvec);
6838 6838 if (err != 0) {
6839 6839 return (err);
6840 6840 }
6841 6841 } else {
6842 6842 return (segvn_demote_range(badseg1, ta,
6843 6843 te - ta, SDR_END, tszcvec));
6844 6844 }
6845 6845 }
6846 6846 }
6847 6847
6848 6848 if (badseg2 == NULL)
6849 6849 return (0);
6850 6850 ASSERT(badseg2->s_szc == szc);
6851 6851 ASSERT(badseg2->s_size == pgsz);
6852 6852 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size));
6853 6853 if (err = segvn_clrszc(badseg2)) {
6854 6854 return (err);
6855 6855 }
6856 6856 ASSERT(badseg2->s_szc == 0);
6857 6857
6858 6858 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6859 6859 uint_t tszc = highbit(tszcvec) - 1;
6860 6860 size_t tpgsz = page_get_pagesize(tszc);
6861 6861
6862 6862 ASSERT(svd->type == MAP_SHARED);
6863 6863 ASSERT(flag == SDR_END);
6864 6864 ASSERT(tszc < szc && tszc > 0);
6865 6865 ASSERT(badseg2->s_base > addr);
6866 6866 ASSERT(eaddr > badseg2->s_base);
6867 6867 ASSERT(eaddr < badseg2->s_base + badseg2->s_size);
6868 6868
6869 6869 badseg2->s_szc = tszc;
6870 6870 if (!IS_P2ALIGNED(eaddr, tpgsz)) {
6871 6871 return (segvn_demote_range(badseg2, badseg2->s_base,
6872 6872 eaddr - badseg2->s_base, SDR_END, tszcvec));
6873 6873 }
6874 6874 }
6875 6875
6876 6876 return (0);
6877 6877 }
6878 6878
6879 6879 static int
6880 6880 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6881 6881 {
6882 6882 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6883 6883 struct vpage *vp, *evp;
6884 6884
6885 6885 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6886 6886
6887 6887 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6888 6888 /*
6889 6889 * If segment protection can be used, simply check against them.
6890 6890 */
6891 6891 if (svd->pageprot == 0) {
6892 6892 int err;
6893 6893
6894 6894 err = ((svd->prot & prot) != prot) ? EACCES : 0;
6895 6895 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6896 6896 return (err);
6897 6897 }
6898 6898
6899 6899 /*
6900 6900 * Have to check down to the vpage level.
6901 6901 */
6902 6902 evp = &svd->vpage[seg_page(seg, addr + len)];
6903 6903 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6904 6904 if ((VPP_PROT(vp) & prot) != prot) {
6905 6905 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6906 6906 return (EACCES);
6907 6907 }
6908 6908 }
6909 6909 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6910 6910 return (0);
6911 6911 }
6912 6912
6913 6913 static int
6914 6914 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6915 6915 {
6916 6916 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6917 6917 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6918 6918
6919 6919 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6920 6920
6921 6921 if (pgno != 0) {
6922 6922 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6923 6923 if (svd->pageprot == 0) {
6924 6924 do {
6925 6925 protv[--pgno] = svd->prot;
6926 6926 } while (pgno != 0);
6927 6927 } else {
6928 6928 size_t pgoff = seg_page(seg, addr);
6929 6929
6930 6930 do {
6931 6931 pgno--;
6932 6932 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]);
6933 6933 } while (pgno != 0);
6934 6934 }
6935 6935 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6936 6936 }
6937 6937 return (0);
6938 6938 }
6939 6939
6940 6940 static u_offset_t
6941 6941 segvn_getoffset(struct seg *seg, caddr_t addr)
6942 6942 {
6943 6943 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6944 6944
6945 6945 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6946 6946
6947 6947 return (svd->offset + (uintptr_t)(addr - seg->s_base));
6948 6948 }
6949 6949
6950 6950 /*ARGSUSED*/
6951 6951 static int
6952 6952 segvn_gettype(struct seg *seg, caddr_t addr)
6953 6953 {
6954 6954 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6955 6955
6956 6956 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6957 6957
6958 6958 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6959 6959 MAP_INITDATA)));
6960 6960 }
6961 6961
6962 6962 /*ARGSUSED*/
6963 6963 static int
6964 6964 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6965 6965 {
6966 6966 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6967 6967
6968 6968 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6969 6969
6970 6970 *vpp = svd->vp;
6971 6971 return (0);
6972 6972 }
6973 6973
6974 6974 /*
6975 6975 * Check to see if it makes sense to do kluster/read ahead to
6976 6976 * addr + delta relative to the mapping at addr. We assume here
6977 6977 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6978 6978 *
6979 6979 * For segvn, we currently "approve" of the action if we are
6980 6980 * still in the segment and it maps from the same vp/off,
6981 6981 * or if the advice stored in segvn_data or vpages allows it.
6982 6982 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6983 6983 */
6984 6984 static int
6985 6985 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6986 6986 {
6987 6987 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6988 6988 struct anon *oap, *ap;
6989 6989 ssize_t pd;
6990 6990 size_t page;
6991 6991 struct vnode *vp1, *vp2;
6992 6992 u_offset_t off1, off2;
6993 6993 struct anon_map *amp;
6994 6994
6995 6995 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6996 6996 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6997 6997 SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
6998 6998
6999 6999 if (addr + delta < seg->s_base ||
7000 7000 addr + delta >= (seg->s_base + seg->s_size))
7001 7001 return (-1); /* exceeded segment bounds */
7002 7002
7003 7003 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */
7004 7004 page = seg_page(seg, addr);
7005 7005
7006 7006 /*
7007 7007 * Check to see if either of the pages addr or addr + delta
7008 7008 * have advice set that prevents klustering (if MADV_RANDOM advice
7009 7009 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
7010 7010 * is negative).
7011 7011 */
7012 7012 if (svd->advice == MADV_RANDOM ||
7013 7013 svd->advice == MADV_SEQUENTIAL && delta < 0)
7014 7014 return (-1);
7015 7015 else if (svd->pageadvice && svd->vpage) {
7016 7016 struct vpage *bvpp, *evpp;
7017 7017
7018 7018 bvpp = &svd->vpage[page];
7019 7019 evpp = &svd->vpage[page + pd];
7020 7020 if (VPP_ADVICE(bvpp) == MADV_RANDOM ||
7021 7021 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0)
7022 7022 return (-1);
7023 7023 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) &&
7024 7024 VPP_ADVICE(evpp) == MADV_RANDOM)
7025 7025 return (-1);
7026 7026 }
7027 7027
7028 7028 if (svd->type == MAP_SHARED)
7029 7029 return (0); /* shared mapping - all ok */
7030 7030
7031 7031 if ((amp = svd->amp) == NULL)
7032 7032 return (0); /* off original vnode */
7033 7033
7034 7034 page += svd->anon_index;
7035 7035
7036 7036 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7037 7037
7038 7038 oap = anon_get_ptr(amp->ahp, page);
7039 7039 ap = anon_get_ptr(amp->ahp, page + pd);
7040 7040
7041 7041 ANON_LOCK_EXIT(&->a_rwlock);
7042 7042
7043 7043 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) {
7044 7044 return (-1); /* one with and one without an anon */
7045 7045 }
7046 7046
7047 7047 if (oap == NULL) { /* implies that ap == NULL */
7048 7048 return (0); /* off original vnode */
7049 7049 }
7050 7050
7051 7051 /*
7052 7052 * Now we know we have two anon pointers - check to
7053 7053 * see if they happen to be properly allocated.
7054 7054 */
7055 7055
7056 7056 /*
7057 7057 * XXX We cheat here and don't lock the anon slots. We can't because
7058 7058 * we may have been called from the anon layer which might already
7059 7059 * have locked them. We are holding a refcnt on the slots so they
7060 7060 * can't disappear. The worst that will happen is we'll get the wrong
7061 7061 * names (vp, off) for the slots and make a poor klustering decision.
7062 7062 */
7063 7063 swap_xlate(ap, &vp1, &off1);
7064 7064 swap_xlate(oap, &vp2, &off2);
7065 7065
7066 7066
7067 7067 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta)
7068 7068 return (-1);
7069 7069 return (0);
7070 7070 }
7071 7071
7072 7072 /*
7073 7073 * Swap the pages of seg out to secondary storage, returning the
7074 7074 * number of bytes of storage freed.
7075 7075 *
7076 7076 * The basic idea is first to unload all translations and then to call
7077 7077 * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the
7078 7078 * swap device. Pages to which other segments have mappings will remain
7079 7079 * mapped and won't be swapped. Our caller (as_swapout) has already
7080 7080 * performed the unloading step.
7081 7081 *
7082 7082 * The value returned is intended to correlate well with the process's
7083 7083 * memory requirements. However, there are some caveats:
7084 7084 * 1) When given a shared segment as argument, this routine will
7085 7085 * only succeed in swapping out pages for the last sharer of the
7086 7086 * segment. (Previous callers will only have decremented mapping
7087 7087 * reference counts.)
7088 7088 * 2) We assume that the hat layer maintains a large enough translation
7089 7089 * cache to capture process reference patterns.
7090 7090 */
7091 7091 static size_t
7092 7092 segvn_swapout(struct seg *seg)
7093 7093 {
7094 7094 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7095 7095 struct anon_map *amp;
7096 7096 pgcnt_t pgcnt = 0;
7097 7097 pgcnt_t npages;
7098 7098 pgcnt_t page;
7099 7099 ulong_t anon_index;
7100 7100
7101 7101 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7102 7102
7103 7103 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7104 7104 /*
7105 7105 * Find pages unmapped by our caller and force them
7106 7106 * out to the virtual swap device.
7107 7107 */
7108 7108 if ((amp = svd->amp) != NULL)
7109 7109 anon_index = svd->anon_index;
7110 7110 npages = seg->s_size >> PAGESHIFT;
7111 7111 for (page = 0; page < npages; page++) {
7112 7112 page_t *pp;
7113 7113 struct anon *ap;
7114 7114 struct vnode *vp;
7115 7115 u_offset_t off;
7116 7116 anon_sync_obj_t cookie;
7117 7117
7118 7118 /*
7119 7119 * Obtain <vp, off> pair for the page, then look it up.
7120 7120 *
7121 7121 * Note that this code is willing to consider regular
7122 7122 * pages as well as anon pages. Is this appropriate here?
7123 7123 */
7124 7124 ap = NULL;
7125 7125 if (amp != NULL) {
7126 7126 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7127 7127 if (anon_array_try_enter(amp, anon_index + page,
7128 7128 &cookie)) {
7129 7129 ANON_LOCK_EXIT(&->a_rwlock);
7130 7130 continue;
7131 7131 }
7132 7132 ap = anon_get_ptr(amp->ahp, anon_index + page);
7133 7133 if (ap != NULL) {
7134 7134 swap_xlate(ap, &vp, &off);
7135 7135 } else {
7136 7136 vp = svd->vp;
7137 7137 off = svd->offset + ptob(page);
7138 7138 }
7139 7139 anon_array_exit(&cookie);
7140 7140 ANON_LOCK_EXIT(&->a_rwlock);
7141 7141 } else {
7142 7142 vp = svd->vp;
7143 7143 off = svd->offset + ptob(page);
7144 7144 }
7145 7145 if (vp == NULL) { /* untouched zfod page */
7146 7146 ASSERT(ap == NULL);
7147 7147 continue;
7148 7148 }
7149 7149
7150 7150 pp = page_lookup_nowait(vp, off, SE_SHARED);
7151 7151 if (pp == NULL)
7152 7152 continue;
7153 7153
7154 7154
7155 7155 /*
7156 7156 * Examine the page to see whether it can be tossed out,
7157 7157 * keeping track of how many we've found.
7158 7158 */
7159 7159 if (!page_tryupgrade(pp)) {
7160 7160 /*
7161 7161 * If the page has an i/o lock and no mappings,
7162 7162 * it's very likely that the page is being
7163 7163 * written out as a result of klustering.
7164 7164 * Assume this is so and take credit for it here.
7165 7165 */
7166 7166 if (!page_io_trylock(pp)) {
7167 7167 if (!hat_page_is_mapped(pp))
7168 7168 pgcnt++;
7169 7169 } else {
7170 7170 page_io_unlock(pp);
7171 7171 }
7172 7172 page_unlock(pp);
7173 7173 continue;
7174 7174 }
7175 7175 ASSERT(!page_iolock_assert(pp));
7176 7176
7177 7177
7178 7178 /*
7179 7179 * Skip if page is locked or has mappings.
7180 7180 * We don't need the page_struct_lock to look at lckcnt
7181 7181 * and cowcnt because the page is exclusive locked.
7182 7182 */
7183 7183 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
7184 7184 hat_page_is_mapped(pp)) {
7185 7185 page_unlock(pp);
7186 7186 continue;
7187 7187 }
7188 7188
7189 7189 /*
7190 7190 * dispose skips large pages so try to demote first.
7191 7191 */
7192 7192 if (pp->p_szc != 0 && !page_try_demote_pages(pp)) {
7193 7193 page_unlock(pp);
7194 7194 /*
7195 7195 * XXX should skip the remaining page_t's of this
7196 7196 * large page.
7197 7197 */
7198 7198 continue;
7199 7199 }
7200 7200
7201 7201 ASSERT(pp->p_szc == 0);
7202 7202
7203 7203 /*
7204 7204 * No longer mapped -- we can toss it out. How
7205 7205 * we do so depends on whether or not it's dirty.
7206 7206 */
7207 7207 if (hat_ismod(pp) && pp->p_vnode) {
7208 7208 /*
7209 7209 * We must clean the page before it can be
7210 7210 * freed. Setting B_FREE will cause pvn_done
7211 7211 * to free the page when the i/o completes.
7212 7212 * XXX: This also causes it to be accounted
7213 7213 * as a pageout instead of a swap: need
7214 7214 * B_SWAPOUT bit to use instead of B_FREE.
7215 7215 *
7216 7216 * Hold the vnode before releasing the page lock
7217 7217 * to prevent it from being freed and re-used by
7218 7218 * some other thread.
7219 7219 */
7220 7220 VN_HOLD(vp);
7221 7221 page_unlock(pp);
7222 7222
7223 7223 /*
7224 7224 * Queue all i/o requests for the pageout thread
7225 7225 * to avoid saturating the pageout devices.
7226 7226 */
7227 7227 if (!queue_io_request(vp, off))
7228 7228 VN_RELE(vp);
7229 7229 } else {
7230 7230 /*
7231 7231 * The page was clean, free it.
7232 7232 *
7233 7233 * XXX: Can we ever encounter modified pages
7234 7234 * with no associated vnode here?
7235 7235 */
7236 7236 ASSERT(pp->p_vnode != NULL);
7237 7237 /*LINTED: constant in conditional context*/
7238 7238 VN_DISPOSE(pp, B_FREE, 0, kcred);
7239 7239 }
7240 7240
7241 7241 /*
7242 7242 * Credit now even if i/o is in progress.
7243 7243 */
7244 7244 pgcnt++;
7245 7245 }
7246 7246 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7247 7247
7248 7248 /*
7249 7249 * Wakeup pageout to initiate i/o on all queued requests.
7250 7250 */
7251 7251 cv_signal_pageout();
7252 7252 return (ptob(pgcnt));
7253 7253 }
7254 7254
7255 7255 /*
7256 7256 * Synchronize primary storage cache with real object in virtual memory.
7257 7257 *
7258 7258 * XXX - Anonymous pages should not be sync'ed out at all.
7259 7259 */
7260 7260 static int
7261 7261 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
7262 7262 {
7263 7263 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7264 7264 struct vpage *vpp;
7265 7265 page_t *pp;
7266 7266 u_offset_t offset;
7267 7267 struct vnode *vp;
7268 7268 u_offset_t off;
7269 7269 caddr_t eaddr;
7270 7270 int bflags;
7271 7271 int err = 0;
7272 7272 int segtype;
7273 7273 int pageprot;
7274 7274 int prot;
7275 7275 ulong_t anon_index;
7276 7276 struct anon_map *amp;
7277 7277 struct anon *ap;
7278 7278 anon_sync_obj_t cookie;
7279 7279
7280 7280 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7281 7281
7282 7282 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7283 7283
7284 7284 if (svd->softlockcnt > 0) {
7285 7285 /*
7286 7286 * If this is shared segment non 0 softlockcnt
7287 7287 * means locked pages are still in use.
7288 7288 */
7289 7289 if (svd->type == MAP_SHARED) {
7290 7290 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7291 7291 return (EAGAIN);
7292 7292 }
7293 7293
7294 7294 /*
7295 7295 * flush all pages from seg cache
7296 7296 * otherwise we may deadlock in swap_putpage
7297 7297 * for B_INVAL page (4175402).
7298 7298 *
7299 7299 * Even if we grab segvn WRITER's lock
7300 7300 * here, there might be another thread which could've
7301 7301 * successfully performed lookup/insert just before
7302 7302 * we acquired the lock here. So, grabbing either
7303 7303 * lock here is of not much use. Until we devise
7304 7304 * a strategy at upper layers to solve the
7305 7305 * synchronization issues completely, we expect
7306 7306 * applications to handle this appropriately.
7307 7307 */
7308 7308 segvn_purge(seg);
7309 7309 if (svd->softlockcnt > 0) {
7310 7310 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7311 7311 return (EAGAIN);
7312 7312 }
7313 7313 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7314 7314 svd->amp->a_softlockcnt > 0) {
7315 7315 /*
7316 7316 * Try to purge this amp's entries from pcache. It will
7317 7317 * succeed only if other segments that share the amp have no
7318 7318 * outstanding softlock's.
7319 7319 */
7320 7320 segvn_purge(seg);
7321 7321 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) {
7322 7322 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7323 7323 return (EAGAIN);
7324 7324 }
7325 7325 }
7326 7326
7327 7327 vpp = svd->vpage;
7328 7328 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7329 7329 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) |
7330 7330 ((flags & MS_INVALIDATE) ? B_INVAL : 0);
7331 7331
7332 7332 if (attr) {
7333 7333 pageprot = attr & ~(SHARED|PRIVATE);
7334 7334 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE;
7335 7335
7336 7336 /*
7337 7337 * We are done if the segment types don't match
7338 7338 * or if we have segment level protections and
7339 7339 * they don't match.
7340 7340 */
7341 7341 if (svd->type != segtype) {
7342 7342 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7343 7343 return (0);
7344 7344 }
7345 7345 if (vpp == NULL) {
7346 7346 if (svd->prot != pageprot) {
7347 7347 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7348 7348 return (0);
7349 7349 }
7350 7350 prot = svd->prot;
7351 7351 } else
7352 7352 vpp = &svd->vpage[seg_page(seg, addr)];
7353 7353
7354 7354 } else if (svd->vp && svd->amp == NULL &&
7355 7355 (flags & MS_INVALIDATE) == 0) {
7356 7356
7357 7357 /*
7358 7358 * No attributes, no anonymous pages and MS_INVALIDATE flag
7359 7359 * is not on, just use one big request.
7360 7360 */
7361 7361 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len,
7362 7362 bflags, svd->cred, NULL);
7363 7363 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7364 7364 return (err);
7365 7365 }
7366 7366
7367 7367 if ((amp = svd->amp) != NULL)
7368 7368 anon_index = svd->anon_index + seg_page(seg, addr);
7369 7369
7370 7370 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) {
7371 7371 ap = NULL;
7372 7372 if (amp != NULL) {
7373 7373 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7374 7374 anon_array_enter(amp, anon_index, &cookie);
7375 7375 ap = anon_get_ptr(amp->ahp, anon_index++);
7376 7376 if (ap != NULL) {
7377 7377 swap_xlate(ap, &vp, &off);
7378 7378 } else {
7379 7379 vp = svd->vp;
7380 7380 off = offset;
7381 7381 }
7382 7382 anon_array_exit(&cookie);
7383 7383 ANON_LOCK_EXIT(&->a_rwlock);
7384 7384 } else {
7385 7385 vp = svd->vp;
7386 7386 off = offset;
7387 7387 }
7388 7388 offset += PAGESIZE;
7389 7389
7390 7390 if (vp == NULL) /* untouched zfod page */
7391 7391 continue;
7392 7392
7393 7393 if (attr) {
7394 7394 if (vpp) {
7395 7395 prot = VPP_PROT(vpp);
7396 7396 vpp++;
7397 7397 }
7398 7398 if (prot != pageprot) {
7399 7399 continue;
7400 7400 }
7401 7401 }
7402 7402
7403 7403 /*
7404 7404 * See if any of these pages are locked -- if so, then we
7405 7405 * will have to truncate an invalidate request at the first
7406 7406 * locked one. We don't need the page_struct_lock to test
7407 7407 * as this is only advisory; even if we acquire it someone
7408 7408 * might race in and lock the page after we unlock and before
7409 7409 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7410 7410 */
7411 7411 if (flags & MS_INVALIDATE) {
7412 7412 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) {
7413 7413 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
7414 7414 page_unlock(pp);
7415 7415 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7416 7416 return (EBUSY);
7417 7417 }
7418 7418 if (ap != NULL && pp->p_szc != 0 &&
7419 7419 page_tryupgrade(pp)) {
7420 7420 if (pp->p_lckcnt == 0 &&
7421 7421 pp->p_cowcnt == 0) {
7422 7422 /*
7423 7423 * swapfs VN_DISPOSE() won't
7424 7424 * invalidate large pages.
7425 7425 * Attempt to demote.
7426 7426 * XXX can't help it if it
7427 7427 * fails. But for swapfs
7428 7428 * pages it is no big deal.
7429 7429 */
7430 7430 (void) page_try_demote_pages(
7431 7431 pp);
7432 7432 }
7433 7433 }
7434 7434 page_unlock(pp);
7435 7435 }
7436 7436 } else if (svd->type == MAP_SHARED && amp != NULL) {
7437 7437 /*
7438 7438 * Avoid writing out to disk ISM's large pages
7439 7439 * because segspt_free_pages() relies on NULL an_pvp
7440 7440 * of anon slots of such pages.
7441 7441 */
7442 7442
7443 7443 ASSERT(svd->vp == NULL);
7444 7444 /*
7445 7445 * swapfs uses page_lookup_nowait if not freeing or
7446 7446 * invalidating and skips a page if
7447 7447 * page_lookup_nowait returns NULL.
7448 7448 */
7449 7449 pp = page_lookup_nowait(vp, off, SE_SHARED);
7450 7450 if (pp == NULL) {
7451 7451 continue;
7452 7452 }
7453 7453 if (pp->p_szc != 0) {
7454 7454 page_unlock(pp);
7455 7455 continue;
7456 7456 }
7457 7457
7458 7458 /*
7459 7459 * Note ISM pages are created large so (vp, off)'s
7460 7460 * page cannot suddenly become large after we unlock
7461 7461 * pp.
7462 7462 */
7463 7463 page_unlock(pp);
7464 7464 }
7465 7465 /*
7466 7466 * XXX - Should ultimately try to kluster
7467 7467 * calls to VOP_PUTPAGE() for performance.
7468 7468 */
7469 7469 VN_HOLD(vp);
7470 7470 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
7471 7471 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)),
7472 7472 svd->cred, NULL);
7473 7473
7474 7474 VN_RELE(vp);
7475 7475 if (err)
7476 7476 break;
7477 7477 }
7478 7478 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7479 7479 return (err);
7480 7480 }
7481 7481
7482 7482 /*
7483 7483 * Determine if we have data corresponding to pages in the
7484 7484 * primary storage virtual memory cache (i.e., "in core").
7485 7485 */
7486 7486 static size_t
7487 7487 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
7488 7488 {
7489 7489 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7490 7490 struct vnode *vp, *avp;
7491 7491 u_offset_t offset, aoffset;
7492 7492 size_t p, ep;
7493 7493 int ret;
7494 7494 struct vpage *vpp;
7495 7495 page_t *pp;
7496 7496 uint_t start;
7497 7497 struct anon_map *amp; /* XXX - for locknest */
7498 7498 struct anon *ap;
7499 7499 uint_t attr;
7500 7500 anon_sync_obj_t cookie;
7501 7501
7502 7502 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7503 7503
7504 7504 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7505 7505 if (svd->amp == NULL && svd->vp == NULL) {
7506 7506 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7507 7507 bzero(vec, btopr(len));
7508 7508 return (len); /* no anonymous pages created yet */
7509 7509 }
7510 7510
7511 7511 p = seg_page(seg, addr);
7512 7512 ep = seg_page(seg, addr + len);
7513 7513 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0;
7514 7514
7515 7515 amp = svd->amp;
7516 7516 for (; p < ep; p++, addr += PAGESIZE) {
7517 7517 vpp = (svd->vpage) ? &svd->vpage[p]: NULL;
7518 7518 ret = start;
7519 7519 ap = NULL;
7520 7520 avp = NULL;
7521 7521 /* Grab the vnode/offset for the anon slot */
7522 7522 if (amp != NULL) {
7523 7523 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7524 7524 anon_array_enter(amp, svd->anon_index + p, &cookie);
7525 7525 ap = anon_get_ptr(amp->ahp, svd->anon_index + p);
7526 7526 if (ap != NULL) {
7527 7527 swap_xlate(ap, &avp, &aoffset);
7528 7528 }
7529 7529 anon_array_exit(&cookie);
7530 7530 ANON_LOCK_EXIT(&->a_rwlock);
7531 7531 }
7532 7532 if ((avp != NULL) && page_exists(avp, aoffset)) {
7533 7533 /* A page exists for the anon slot */
7534 7534 ret |= SEG_PAGE_INCORE;
7535 7535
7536 7536 /*
7537 7537 * If page is mapped and writable
7538 7538 */
7539 7539 attr = (uint_t)0;
7540 7540 if ((hat_getattr(seg->s_as->a_hat, addr,
7541 7541 &attr) != -1) && (attr & PROT_WRITE)) {
7542 7542 ret |= SEG_PAGE_ANON;
7543 7543 }
7544 7544 /*
7545 7545 * Don't get page_struct lock for lckcnt and cowcnt,
7546 7546 * since this is purely advisory.
7547 7547 */
7548 7548 if ((pp = page_lookup_nowait(avp, aoffset,
7549 7549 SE_SHARED)) != NULL) {
7550 7550 if (pp->p_lckcnt)
7551 7551 ret |= SEG_PAGE_SOFTLOCK;
7552 7552 if (pp->p_cowcnt)
7553 7553 ret |= SEG_PAGE_HASCOW;
7554 7554 page_unlock(pp);
7555 7555 }
7556 7556 }
7557 7557
7558 7558 /* Gather vnode statistics */
7559 7559 vp = svd->vp;
7560 7560 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7561 7561
7562 7562 if (vp != NULL) {
7563 7563 /*
7564 7564 * Try to obtain a "shared" lock on the page
7565 7565 * without blocking. If this fails, determine
7566 7566 * if the page is in memory.
7567 7567 */
7568 7568 pp = page_lookup_nowait(vp, offset, SE_SHARED);
7569 7569 if ((pp == NULL) && (page_exists(vp, offset))) {
7570 7570 /* Page is incore, and is named */
7571 7571 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7572 7572 }
7573 7573 /*
7574 7574 * Don't get page_struct lock for lckcnt and cowcnt,
7575 7575 * since this is purely advisory.
7576 7576 */
7577 7577 if (pp != NULL) {
7578 7578 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7579 7579 if (pp->p_lckcnt)
7580 7580 ret |= SEG_PAGE_SOFTLOCK;
7581 7581 if (pp->p_cowcnt)
7582 7582 ret |= SEG_PAGE_HASCOW;
7583 7583 page_unlock(pp);
7584 7584 }
7585 7585 }
7586 7586
7587 7587 /* Gather virtual page information */
7588 7588 if (vpp) {
7589 7589 if (VPP_ISPPLOCK(vpp))
7590 7590 ret |= SEG_PAGE_LOCKED;
7591 7591 vpp++;
7592 7592 }
7593 7593
7594 7594 *vec++ = (char)ret;
7595 7595 }
7596 7596 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7597 7597 return (len);
7598 7598 }
7599 7599
7600 7600 /*
7601 7601 * Statement for p_cowcnts/p_lckcnts.
7602 7602 *
7603 7603 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7604 7604 * irrespective of the following factors or anything else:
7605 7605 *
7606 7606 * (1) anon slots are populated or not
7607 7607 * (2) cow is broken or not
7608 7608 * (3) refcnt on ap is 1 or greater than 1
7609 7609 *
7610 7610 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7611 7611 * and munlock.
7612 7612 *
7613 7613 *
7614 7614 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7615 7615 *
7616 7616 * if vpage has PROT_WRITE
7617 7617 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7618 7618 * else
7619 7619 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7620 7620 *
7621 7621 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7622 7622 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7623 7623 *
7624 7624 * We may also break COW if softlocking on read access in the physio case.
7625 7625 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7626 7626 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7627 7627 * vpage doesn't have PROT_WRITE.
7628 7628 *
7629 7629 *
7630 7630 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7631 7631 *
7632 7632 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7633 7633 * increment p_lckcnt by calling page_subclaim() which takes care of
7634 7634 * availrmem accounting and p_lckcnt overflow.
7635 7635 *
7636 7636 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7637 7637 * increment p_cowcnt by calling page_addclaim() which takes care of
7638 7638 * availrmem availability and p_cowcnt overflow.
7639 7639 */
7640 7640
7641 7641 /*
7642 7642 * Lock down (or unlock) pages mapped by this segment.
7643 7643 *
7644 7644 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7645 7645 * At fault time they will be relocated into larger pages.
7646 7646 */
7647 7647 static int
7648 7648 segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
7649 7649 int attr, int op, ulong_t *lockmap, size_t pos)
7650 7650 {
7651 7651 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7652 7652 struct vpage *vpp;
7653 7653 struct vpage *evp;
7654 7654 page_t *pp;
7655 7655 u_offset_t offset;
7656 7656 u_offset_t off;
7657 7657 int segtype;
7658 7658 int pageprot;
7659 7659 int claim;
7660 7660 struct vnode *vp;
7661 7661 ulong_t anon_index;
7662 7662 struct anon_map *amp;
7663 7663 struct anon *ap;
7664 7664 struct vattr va;
7665 7665 anon_sync_obj_t cookie;
7666 7666 struct kshmid *sp = NULL;
7667 7667 struct proc *p = curproc;
7668 7668 kproject_t *proj = NULL;
7669 7669 int chargeproc = 1;
7670 7670 size_t locked_bytes = 0;
7671 7671 size_t unlocked_bytes = 0;
7672 7672 int err = 0;
7673 7673
7674 7674 /*
7675 7675 * Hold write lock on address space because may split or concatenate
7676 7676 * segments
7677 7677 */
7678 7678 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7679 7679
7680 7680 /*
7681 7681 * If this is a shm, use shm's project and zone, else use
7682 7682 * project and zone of calling process
7683 7683 */
7684 7684
7685 7685 /* Determine if this segment backs a sysV shm */
7686 7686 if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7687 7687 ASSERT(svd->type == MAP_SHARED);
7688 7688 ASSERT(svd->tr_state == SEGVN_TR_OFF);
7689 7689 sp = svd->amp->a_sp;
7690 7690 proj = sp->shm_perm.ipc_proj;
7691 7691 chargeproc = 0;
7692 7692 }
7693 7693
7694 7694 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7695 7695 if (attr) {
7696 7696 pageprot = attr & ~(SHARED|PRIVATE);
7697 7697 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE;
7698 7698
7699 7699 /*
7700 7700 * We are done if the segment types don't match
7701 7701 * or if we have segment level protections and
7702 7702 * they don't match.
7703 7703 */
7704 7704 if (svd->type != segtype) {
7705 7705 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7706 7706 return (0);
7707 7707 }
7708 7708 if (svd->pageprot == 0 && svd->prot != pageprot) {
7709 7709 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7710 7710 return (0);
7711 7711 }
7712 7712 }
7713 7713
7714 7714 if (op == MC_LOCK) {
7715 7715 if (svd->tr_state == SEGVN_TR_INIT) {
7716 7716 svd->tr_state = SEGVN_TR_OFF;
7717 7717 } else if (svd->tr_state == SEGVN_TR_ON) {
7718 7718 ASSERT(svd->amp != NULL);
7719 7719 segvn_textunrepl(seg, 0);
7720 7720 ASSERT(svd->amp == NULL &&
7721 7721 svd->tr_state == SEGVN_TR_OFF);
7722 7722 }
7723 7723 }
7724 7724
7725 7725 /*
7726 7726 * If we're locking, then we must create a vpage structure if
7727 7727 * none exists. If we're unlocking, then check to see if there
7728 7728 * is a vpage -- if not, then we could not have locked anything.
7729 7729 */
7730 7730
7731 7731 if ((vpp = svd->vpage) == NULL) {
7732 7732 if (op == MC_LOCK) {
7733 7733 segvn_vpage(seg);
7734 7734 if (svd->vpage == NULL) {
7735 7735 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7736 7736 return (ENOMEM);
7737 7737 }
7738 7738 } else {
7739 7739 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7740 7740 return (0);
7741 7741 }
7742 7742 }
7743 7743
7744 7744 /*
7745 7745 * The anonymous data vector (i.e., previously
7746 7746 * unreferenced mapping to swap space) can be allocated
7747 7747 * by lazily testing for its existence.
7748 7748 */
7749 7749 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) {
7750 7750 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
7751 7751 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
7752 7752 svd->amp->a_szc = seg->s_szc;
7753 7753 }
7754 7754
7755 7755 if ((amp = svd->amp) != NULL) {
7756 7756 anon_index = svd->anon_index + seg_page(seg, addr);
7757 7757 }
7758 7758
7759 7759 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7760 7760 evp = &svd->vpage[seg_page(seg, addr + len)];
7761 7761
7762 7762 if (sp != NULL)
7763 7763 mutex_enter(&sp->shm_mlock);
7764 7764
7765 7765 /* determine number of unlocked bytes in range for lock operation */
7766 7766 if (op == MC_LOCK) {
7767 7767
7768 7768 if (sp == NULL) {
7769 7769 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7770 7770 vpp++) {
7771 7771 if (!VPP_ISPPLOCK(vpp))
7772 7772 unlocked_bytes += PAGESIZE;
7773 7773 }
7774 7774 } else {
7775 7775 ulong_t i_idx, i_edx;
7776 7776 anon_sync_obj_t i_cookie;
7777 7777 struct anon *i_ap;
7778 7778 struct vnode *i_vp;
7779 7779 u_offset_t i_off;
7780 7780
7781 7781 /* Only count sysV pages once for locked memory */
7782 7782 i_edx = svd->anon_index + seg_page(seg, addr + len);
7783 7783 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7784 7784 for (i_idx = anon_index; i_idx < i_edx; i_idx++) {
7785 7785 anon_array_enter(amp, i_idx, &i_cookie);
7786 7786 i_ap = anon_get_ptr(amp->ahp, i_idx);
7787 7787 if (i_ap == NULL) {
7788 7788 unlocked_bytes += PAGESIZE;
7789 7789 anon_array_exit(&i_cookie);
7790 7790 continue;
7791 7791 }
7792 7792 swap_xlate(i_ap, &i_vp, &i_off);
7793 7793 anon_array_exit(&i_cookie);
7794 7794 pp = page_lookup(i_vp, i_off, SE_SHARED);
7795 7795 if (pp == NULL) {
7796 7796 unlocked_bytes += PAGESIZE;
7797 7797 continue;
7798 7798 } else if (pp->p_lckcnt == 0)
7799 7799 unlocked_bytes += PAGESIZE;
7800 7800 page_unlock(pp);
7801 7801 }
7802 7802 ANON_LOCK_EXIT(&->a_rwlock);
7803 7803 }
7804 7804
7805 7805 mutex_enter(&p->p_lock);
7806 7806 err = rctl_incr_locked_mem(p, proj, unlocked_bytes,
7807 7807 chargeproc);
7808 7808 mutex_exit(&p->p_lock);
7809 7809
7810 7810 if (err) {
7811 7811 if (sp != NULL)
7812 7812 mutex_exit(&sp->shm_mlock);
7813 7813 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7814 7814 return (err);
7815 7815 }
7816 7816 }
7817 7817 /*
7818 7818 * Loop over all pages in the range. Process if we're locking and
7819 7819 * page has not already been locked in this mapping; or if we're
7820 7820 * unlocking and the page has been locked.
7821 7821 */
7822 7822 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7823 7823 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) {
7824 7824 if ((attr == 0 || VPP_PROT(vpp) == pageprot) &&
7825 7825 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) ||
7826 7826 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) {
7827 7827
7828 7828 if (amp != NULL)
7829 7829 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7830 7830 /*
7831 7831 * If this isn't a MAP_NORESERVE segment and
7832 7832 * we're locking, allocate anon slots if they
7833 7833 * don't exist. The page is brought in later on.
7834 7834 */
7835 7835 if (op == MC_LOCK && svd->vp == NULL &&
7836 7836 ((svd->flags & MAP_NORESERVE) == 0) &&
7837 7837 amp != NULL &&
7838 7838 ((ap = anon_get_ptr(amp->ahp, anon_index))
7839 7839 == NULL)) {
7840 7840 anon_array_enter(amp, anon_index, &cookie);
7841 7841
7842 7842 if ((ap = anon_get_ptr(amp->ahp,
7843 7843 anon_index)) == NULL) {
7844 7844 pp = anon_zero(seg, addr, &ap,
7845 7845 svd->cred);
7846 7846 if (pp == NULL) {
7847 7847 anon_array_exit(&cookie);
7848 7848 ANON_LOCK_EXIT(&->a_rwlock);
7849 7849 err = ENOMEM;
7850 7850 goto out;
7851 7851 }
7852 7852 ASSERT(anon_get_ptr(amp->ahp,
7853 7853 anon_index) == NULL);
7854 7854 (void) anon_set_ptr(amp->ahp,
7855 7855 anon_index, ap, ANON_SLEEP);
7856 7856 page_unlock(pp);
7857 7857 }
7858 7858 anon_array_exit(&cookie);
7859 7859 }
7860 7860
7861 7861 /*
7862 7862 * Get name for page, accounting for
7863 7863 * existence of private copy.
7864 7864 */
7865 7865 ap = NULL;
7866 7866 if (amp != NULL) {
7867 7867 anon_array_enter(amp, anon_index, &cookie);
7868 7868 ap = anon_get_ptr(amp->ahp, anon_index);
7869 7869 if (ap != NULL) {
7870 7870 swap_xlate(ap, &vp, &off);
7871 7871 } else {
7872 7872 if (svd->vp == NULL &&
7873 7873 (svd->flags & MAP_NORESERVE)) {
7874 7874 anon_array_exit(&cookie);
7875 7875 ANON_LOCK_EXIT(&->a_rwlock);
7876 7876 continue;
7877 7877 }
7878 7878 vp = svd->vp;
7879 7879 off = offset;
7880 7880 }
7881 7881 if (op != MC_LOCK || ap == NULL) {
7882 7882 anon_array_exit(&cookie);
7883 7883 ANON_LOCK_EXIT(&->a_rwlock);
7884 7884 }
7885 7885 } else {
7886 7886 vp = svd->vp;
7887 7887 off = offset;
7888 7888 }
7889 7889
7890 7890 /*
7891 7891 * Get page frame. It's ok if the page is
7892 7892 * not available when we're unlocking, as this
7893 7893 * may simply mean that a page we locked got
7894 7894 * truncated out of existence after we locked it.
7895 7895 *
7896 7896 * Invoke VOP_GETPAGE() to obtain the page struct
7897 7897 * since we may need to read it from disk if its
7898 7898 * been paged out.
7899 7899 */
7900 7900 if (op != MC_LOCK)
7901 7901 pp = page_lookup(vp, off, SE_SHARED);
7902 7902 else {
7903 7903 page_t *pl[1 + 1];
7904 7904 int error;
7905 7905
7906 7906 ASSERT(vp != NULL);
7907 7907
7908 7908 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
7909 7909 (uint_t *)NULL, pl, PAGESIZE, seg, addr,
7910 7910 S_OTHER, svd->cred, NULL);
7911 7911
7912 7912 if (error && ap != NULL) {
7913 7913 anon_array_exit(&cookie);
7914 7914 ANON_LOCK_EXIT(&->a_rwlock);
7915 7915 }
7916 7916
7917 7917 /*
7918 7918 * If the error is EDEADLK then we must bounce
7919 7919 * up and drop all vm subsystem locks and then
7920 7920 * retry the operation later
7921 7921 * This behavior is a temporary measure because
7922 7922 * ufs/sds logging is badly designed and will
7923 7923 * deadlock if we don't allow this bounce to
7924 7924 * happen. The real solution is to re-design
7925 7925 * the logging code to work properly. See bug
7926 7926 * 4125102 for details of the problem.
7927 7927 */
7928 7928 if (error == EDEADLK) {
7929 7929 err = error;
7930 7930 goto out;
7931 7931 }
7932 7932 /*
7933 7933 * Quit if we fail to fault in the page. Treat
7934 7934 * the failure as an error, unless the addr
7935 7935 * is mapped beyond the end of a file.
7936 7936 */
7937 7937 if (error && svd->vp) {
7938 7938 va.va_mask = AT_SIZE;
7939 7939 if (VOP_GETATTR(svd->vp, &va, 0,
7940 7940 svd->cred, NULL) != 0) {
7941 7941 err = EIO;
7942 7942 goto out;
7943 7943 }
7944 7944 if (btopr(va.va_size) >=
7945 7945 btopr(off + 1)) {
7946 7946 err = EIO;
7947 7947 goto out;
7948 7948 }
7949 7949 goto out;
7950 7950
7951 7951 } else if (error) {
7952 7952 err = EIO;
7953 7953 goto out;
7954 7954 }
7955 7955 pp = pl[0];
7956 7956 ASSERT(pp != NULL);
7957 7957 }
7958 7958
7959 7959 /*
7960 7960 * See Statement at the beginning of this routine.
7961 7961 *
7962 7962 * claim is always set if MAP_PRIVATE and PROT_WRITE
7963 7963 * irrespective of following factors:
7964 7964 *
7965 7965 * (1) anon slots are populated or not
7966 7966 * (2) cow is broken or not
7967 7967 * (3) refcnt on ap is 1 or greater than 1
7968 7968 *
7969 7969 * See 4140683 for details
7970 7970 */
7971 7971 claim = ((VPP_PROT(vpp) & PROT_WRITE) &&
7972 7972 (svd->type == MAP_PRIVATE));
7973 7973
7974 7974 /*
7975 7975 * Perform page-level operation appropriate to
7976 7976 * operation. If locking, undo the SOFTLOCK
7977 7977 * performed to bring the page into memory
7978 7978 * after setting the lock. If unlocking,
7979 7979 * and no page was found, account for the claim
7980 7980 * separately.
7981 7981 */
7982 7982 if (op == MC_LOCK) {
7983 7983 int ret = 1; /* Assume success */
7984 7984
7985 7985 ASSERT(!VPP_ISPPLOCK(vpp));
7986 7986
7987 7987 ret = page_pp_lock(pp, claim, 0);
7988 7988 if (ap != NULL) {
7989 7989 if (ap->an_pvp != NULL) {
7990 7990 anon_swap_free(ap, pp);
7991 7991 }
7992 7992 anon_array_exit(&cookie);
7993 7993 ANON_LOCK_EXIT(&->a_rwlock);
7994 7994 }
7995 7995 if (ret == 0) {
7996 7996 /* locking page failed */
7997 7997 page_unlock(pp);
7998 7998 err = EAGAIN;
7999 7999 goto out;
8000 8000 }
8001 8001 VPP_SETPPLOCK(vpp);
8002 8002 if (sp != NULL) {
8003 8003 if (pp->p_lckcnt == 1)
8004 8004 locked_bytes += PAGESIZE;
8005 8005 } else
8006 8006 locked_bytes += PAGESIZE;
8007 8007
8008 8008 if (lockmap != (ulong_t *)NULL)
8009 8009 BT_SET(lockmap, pos);
8010 8010
8011 8011 page_unlock(pp);
8012 8012 } else {
8013 8013 ASSERT(VPP_ISPPLOCK(vpp));
8014 8014 if (pp != NULL) {
8015 8015 /* sysV pages should be locked */
8016 8016 ASSERT(sp == NULL || pp->p_lckcnt > 0);
8017 8017 page_pp_unlock(pp, claim, 0);
8018 8018 if (sp != NULL) {
8019 8019 if (pp->p_lckcnt == 0)
8020 8020 unlocked_bytes
8021 8021 += PAGESIZE;
8022 8022 } else
8023 8023 unlocked_bytes += PAGESIZE;
8024 8024 page_unlock(pp);
8025 8025 } else {
8026 8026 ASSERT(sp == NULL);
8027 8027 unlocked_bytes += PAGESIZE;
8028 8028 }
8029 8029 VPP_CLRPPLOCK(vpp);
8030 8030 }
8031 8031 }
8032 8032 }
8033 8033 out:
8034 8034 if (op == MC_LOCK) {
8035 8035 /* Credit back bytes that did not get locked */
8036 8036 if ((unlocked_bytes - locked_bytes) > 0) {
8037 8037 if (proj == NULL)
8038 8038 mutex_enter(&p->p_lock);
8039 8039 rctl_decr_locked_mem(p, proj,
8040 8040 (unlocked_bytes - locked_bytes), chargeproc);
8041 8041 if (proj == NULL)
8042 8042 mutex_exit(&p->p_lock);
8043 8043 }
8044 8044
8045 8045 } else {
8046 8046 /* Account bytes that were unlocked */
8047 8047 if (unlocked_bytes > 0) {
8048 8048 if (proj == NULL)
8049 8049 mutex_enter(&p->p_lock);
8050 8050 rctl_decr_locked_mem(p, proj, unlocked_bytes,
8051 8051 chargeproc);
8052 8052 if (proj == NULL)
8053 8053 mutex_exit(&p->p_lock);
8054 8054 }
8055 8055 }
8056 8056 if (sp != NULL)
8057 8057 mutex_exit(&sp->shm_mlock);
8058 8058 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8059 8059
8060 8060 return (err);
8061 8061 }
8062 8062
8063 8063 /*
8064 8064 * Set advice from user for specified pages
8065 8065 * There are 9 types of advice:
8066 8066 * MADV_NORMAL - Normal (default) behavior (whatever that is)
8067 8067 * MADV_RANDOM - Random page references
8068 8068 * do not allow readahead or 'klustering'
8069 8069 * MADV_SEQUENTIAL - Sequential page references
8070 8070 * Pages previous to the one currently being
8071 8071 * accessed (determined by fault) are 'not needed'
8072 8072 * and are freed immediately
8073 8073 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
8074 8074 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
8075 8075 * MADV_FREE - Contents can be discarded
8076 8076 * MADV_ACCESS_DEFAULT- Default access
8077 8077 * MADV_ACCESS_LWP - Next LWP will access heavily
8078 8078 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
8079 8079 */
8080 8080 static int
8081 8081 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8082 8082 {
8083 8083 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8084 8084 size_t page;
8085 8085 int err = 0;
8086 8086 int already_set;
8087 8087 struct anon_map *amp;
8088 8088 ulong_t anon_index;
8089 8089 struct seg *next;
8090 8090 lgrp_mem_policy_t policy;
8091 8091 struct seg *prev;
8092 8092 struct vnode *vp;
8093 8093
8094 8094 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8095 8095
8096 8096 /*
8097 8097 * In case of MADV_FREE, we won't be modifying any segment private
8098 8098 * data structures; so, we only need to grab READER's lock
8099 8099 */
8100 8100 if (behav != MADV_FREE) {
8101 8101 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8102 8102 if (svd->tr_state != SEGVN_TR_OFF) {
8103 8103 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8104 8104 return (0);
8105 8105 }
8106 8106 } else {
8107 8107 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8108 8108 }
8109 8109
8110 8110 /*
8111 8111 * Large pages are assumed to be only turned on when accesses to the
8112 8112 * segment's address range have spatial and temporal locality. That
8113 8113 * justifies ignoring MADV_SEQUENTIAL for large page segments.
8114 8114 * Also, ignore advice affecting lgroup memory allocation
8115 8115 * if don't need to do lgroup optimizations on this system
8116 8116 */
8117 8117
8118 8118 if ((behav == MADV_SEQUENTIAL &&
8119 8119 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) ||
8120 8120 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT ||
8121 8121 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) {
8122 8122 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8123 8123 return (0);
8124 8124 }
8125 8125
8126 8126 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT ||
8127 8127 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) {
8128 8128 /*
8129 8129 * Since we are going to unload hat mappings
8130 8130 * we first have to flush the cache. Otherwise
8131 8131 * this might lead to system panic if another
8132 8132 * thread is doing physio on the range whose
8133 8133 * mappings are unloaded by madvise(3C).
8134 8134 */
8135 8135 if (svd->softlockcnt > 0) {
8136 8136 /*
8137 8137 * If this is shared segment non 0 softlockcnt
8138 8138 * means locked pages are still in use.
8139 8139 */
8140 8140 if (svd->type == MAP_SHARED) {
8141 8141 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8142 8142 return (EAGAIN);
8143 8143 }
8144 8144 /*
8145 8145 * Since we do have the segvn writers lock
8146 8146 * nobody can fill the cache with entries
8147 8147 * belonging to this seg during the purge.
8148 8148 * The flush either succeeds or we still
8149 8149 * have pending I/Os. In the later case,
8150 8150 * madvise(3C) fails.
8151 8151 */
8152 8152 segvn_purge(seg);
8153 8153 if (svd->softlockcnt > 0) {
8154 8154 /*
8155 8155 * Since madvise(3C) is advisory and
8156 8156 * it's not part of UNIX98, madvise(3C)
8157 8157 * failure here doesn't cause any hardship.
8158 8158 * Note that we don't block in "as" layer.
8159 8159 */
8160 8160 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8161 8161 return (EAGAIN);
8162 8162 }
8163 8163 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
8164 8164 svd->amp->a_softlockcnt > 0) {
8165 8165 /*
8166 8166 * Try to purge this amp's entries from pcache. It
8167 8167 * will succeed only if other segments that share the
8168 8168 * amp have no outstanding softlock's.
8169 8169 */
8170 8170 segvn_purge(seg);
8171 8171 }
8172 8172 }
8173 8173
8174 8174 amp = svd->amp;
8175 8175 vp = svd->vp;
8176 8176 if (behav == MADV_FREE) {
8177 8177 /*
8178 8178 * MADV_FREE is not supported for segments with
8179 8179 * underlying object; if anonmap is NULL, anon slots
8180 8180 * are not yet populated and there is nothing for
8181 8181 * us to do. As MADV_FREE is advisory, we don't
8182 8182 * return error in either case.
8183 8183 */
8184 8184 if (vp != NULL || amp == NULL) {
8185 8185 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8186 8186 return (0);
8187 8187 }
8188 8188
8189 8189 segvn_purge(seg);
8190 8190
8191 8191 page = seg_page(seg, addr);
8192 8192 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8193 8193 anon_disclaim(amp, svd->anon_index + page, len);
8194 8194 ANON_LOCK_EXIT(&->a_rwlock);
8195 8195 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8196 8196 return (0);
8197 8197 }
8198 8198
8199 8199 /*
8200 8200 * If advice is to be applied to entire segment,
8201 8201 * use advice field in seg_data structure
8202 8202 * otherwise use appropriate vpage entry.
8203 8203 */
8204 8204 if ((addr == seg->s_base) && (len == seg->s_size)) {
8205 8205 switch (behav) {
8206 8206 case MADV_ACCESS_LWP:
8207 8207 case MADV_ACCESS_MANY:
8208 8208 case MADV_ACCESS_DEFAULT:
8209 8209 /*
8210 8210 * Set memory allocation policy for this segment
8211 8211 */
8212 8212 policy = lgrp_madv_to_policy(behav, len, svd->type);
8213 8213 if (svd->type == MAP_SHARED)
8214 8214 already_set = lgrp_shm_policy_set(policy, amp,
8215 8215 svd->anon_index, vp, svd->offset, len);
8216 8216 else {
8217 8217 /*
8218 8218 * For private memory, need writers lock on
8219 8219 * address space because the segment may be
8220 8220 * split or concatenated when changing policy
8221 8221 */
8222 8222 if (AS_READ_HELD(seg->s_as,
8223 8223 &seg->s_as->a_lock)) {
8224 8224 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8225 8225 return (IE_RETRY);
8226 8226 }
8227 8227
8228 8228 already_set = lgrp_privm_policy_set(policy,
8229 8229 &svd->policy_info, len);
8230 8230 }
8231 8231
8232 8232 /*
8233 8233 * If policy set already and it shouldn't be reapplied,
8234 8234 * don't do anything.
8235 8235 */
8236 8236 if (already_set &&
8237 8237 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8238 8238 break;
8239 8239
8240 8240 /*
8241 8241 * Mark any existing pages in given range for
8242 8242 * migration
8243 8243 */
8244 8244 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8245 8245 vp, svd->offset, 1);
8246 8246
8247 8247 /*
8248 8248 * If same policy set already or this is a shared
8249 8249 * memory segment, don't need to try to concatenate
8250 8250 * segment with adjacent ones.
8251 8251 */
8252 8252 if (already_set || svd->type == MAP_SHARED)
8253 8253 break;
8254 8254
8255 8255 /*
8256 8256 * Try to concatenate this segment with previous
8257 8257 * one and next one, since we changed policy for
8258 8258 * this one and it may be compatible with adjacent
8259 8259 * ones now.
8260 8260 */
8261 8261 prev = AS_SEGPREV(seg->s_as, seg);
8262 8262 next = AS_SEGNEXT(seg->s_as, seg);
8263 8263
8264 8264 if (next && next->s_ops == &segvn_ops &&
8265 8265 addr + len == next->s_base)
8266 8266 (void) segvn_concat(seg, next, 1);
8267 8267
8268 8268 if (prev && prev->s_ops == &segvn_ops &&
8269 8269 addr == prev->s_base + prev->s_size) {
8270 8270 /*
8271 8271 * Drop lock for private data of current
8272 8272 * segment before concatenating (deleting) it
8273 8273 * and return IE_REATTACH to tell as_ctl() that
8274 8274 * current segment has changed
8275 8275 */
8276 8276 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8277 8277 if (!segvn_concat(prev, seg, 1))
8278 8278 err = IE_REATTACH;
8279 8279
8280 8280 return (err);
8281 8281 }
8282 8282 break;
8283 8283
8284 8284 case MADV_SEQUENTIAL:
8285 8285 /*
8286 8286 * unloading mapping guarantees
8287 8287 * detection in segvn_fault
8288 8288 */
8289 8289 ASSERT(seg->s_szc == 0);
8290 8290 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8291 8291 hat_unload(seg->s_as->a_hat, addr, len,
8292 8292 HAT_UNLOAD);
8293 8293 /* FALLTHROUGH */
8294 8294 case MADV_NORMAL:
8295 8295 case MADV_RANDOM:
8296 8296 svd->advice = (uchar_t)behav;
8297 8297 svd->pageadvice = 0;
8298 8298 break;
8299 8299 case MADV_WILLNEED: /* handled in memcntl */
8300 8300 case MADV_DONTNEED: /* handled in memcntl */
8301 8301 case MADV_FREE: /* handled above */
8302 8302 break;
8303 8303 default:
8304 8304 err = EINVAL;
8305 8305 }
8306 8306 } else {
8307 8307 caddr_t eaddr;
8308 8308 struct seg *new_seg;
8309 8309 struct segvn_data *new_svd;
8310 8310 u_offset_t off;
8311 8311 caddr_t oldeaddr;
8312 8312
8313 8313 page = seg_page(seg, addr);
8314 8314
8315 8315 segvn_vpage(seg);
8316 8316 if (svd->vpage == NULL) {
8317 8317 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8318 8318 return (ENOMEM);
8319 8319 }
8320 8320
8321 8321 switch (behav) {
8322 8322 struct vpage *bvpp, *evpp;
8323 8323
8324 8324 case MADV_ACCESS_LWP:
8325 8325 case MADV_ACCESS_MANY:
8326 8326 case MADV_ACCESS_DEFAULT:
8327 8327 /*
8328 8328 * Set memory allocation policy for portion of this
8329 8329 * segment
8330 8330 */
8331 8331
8332 8332 /*
8333 8333 * Align address and length of advice to page
8334 8334 * boundaries for large pages
8335 8335 */
8336 8336 if (seg->s_szc != 0) {
8337 8337 size_t pgsz;
8338 8338
8339 8339 pgsz = page_get_pagesize(seg->s_szc);
8340 8340 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
8341 8341 len = P2ROUNDUP(len, pgsz);
8342 8342 }
8343 8343
8344 8344 /*
8345 8345 * Check to see whether policy is set already
8346 8346 */
8347 8347 policy = lgrp_madv_to_policy(behav, len, svd->type);
8348 8348
8349 8349 anon_index = svd->anon_index + page;
8350 8350 off = svd->offset + (uintptr_t)(addr - seg->s_base);
8351 8351
8352 8352 if (svd->type == MAP_SHARED)
8353 8353 already_set = lgrp_shm_policy_set(policy, amp,
8354 8354 anon_index, vp, off, len);
8355 8355 else
8356 8356 already_set =
8357 8357 (policy == svd->policy_info.mem_policy);
8358 8358
8359 8359 /*
8360 8360 * If policy set already and it shouldn't be reapplied,
8361 8361 * don't do anything.
8362 8362 */
8363 8363 if (already_set &&
8364 8364 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8365 8365 break;
8366 8366
8367 8367 /*
8368 8368 * For private memory, need writers lock on
8369 8369 * address space because the segment may be
8370 8370 * split or concatenated when changing policy
8371 8371 */
8372 8372 if (svd->type == MAP_PRIVATE &&
8373 8373 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) {
8374 8374 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8375 8375 return (IE_RETRY);
8376 8376 }
8377 8377
8378 8378 /*
8379 8379 * Mark any existing pages in given range for
8380 8380 * migration
8381 8381 */
8382 8382 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8383 8383 vp, svd->offset, 1);
8384 8384
8385 8385 /*
8386 8386 * Don't need to try to split or concatenate
8387 8387 * segments, since policy is same or this is a shared
8388 8388 * memory segment
8389 8389 */
8390 8390 if (already_set || svd->type == MAP_SHARED)
8391 8391 break;
8392 8392
8393 8393 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
8394 8394 ASSERT(svd->amp == NULL);
8395 8395 ASSERT(svd->tr_state == SEGVN_TR_OFF);
8396 8396 ASSERT(svd->softlockcnt == 0);
8397 8397 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
8398 8398 HAT_REGION_TEXT);
8399 8399 svd->rcookie = HAT_INVALID_REGION_COOKIE;
8400 8400 }
8401 8401
8402 8402 /*
8403 8403 * Split off new segment if advice only applies to a
8404 8404 * portion of existing segment starting in middle
8405 8405 */
8406 8406 new_seg = NULL;
8407 8407 eaddr = addr + len;
8408 8408 oldeaddr = seg->s_base + seg->s_size;
8409 8409 if (addr > seg->s_base) {
8410 8410 /*
8411 8411 * Must flush I/O page cache
8412 8412 * before splitting segment
8413 8413 */
8414 8414 if (svd->softlockcnt > 0)
8415 8415 segvn_purge(seg);
8416 8416
8417 8417 /*
8418 8418 * Split segment and return IE_REATTACH to tell
8419 8419 * as_ctl() that current segment changed
8420 8420 */
8421 8421 new_seg = segvn_split_seg(seg, addr);
8422 8422 new_svd = (struct segvn_data *)new_seg->s_data;
8423 8423 err = IE_REATTACH;
8424 8424
8425 8425 /*
8426 8426 * If new segment ends where old one
8427 8427 * did, try to concatenate the new
8428 8428 * segment with next one.
8429 8429 */
8430 8430 if (eaddr == oldeaddr) {
8431 8431 /*
8432 8432 * Set policy for new segment
8433 8433 */
8434 8434 (void) lgrp_privm_policy_set(policy,
8435 8435 &new_svd->policy_info,
8436 8436 new_seg->s_size);
8437 8437
8438 8438 next = AS_SEGNEXT(new_seg->s_as,
8439 8439 new_seg);
8440 8440
8441 8441 if (next &&
8442 8442 next->s_ops == &segvn_ops &&
8443 8443 eaddr == next->s_base)
8444 8444 (void) segvn_concat(new_seg,
8445 8445 next, 1);
8446 8446 }
8447 8447 }
8448 8448
8449 8449 /*
8450 8450 * Split off end of existing segment if advice only
8451 8451 * applies to a portion of segment ending before
8452 8452 * end of the existing segment
8453 8453 */
8454 8454 if (eaddr < oldeaddr) {
8455 8455 /*
8456 8456 * Must flush I/O page cache
8457 8457 * before splitting segment
8458 8458 */
8459 8459 if (svd->softlockcnt > 0)
8460 8460 segvn_purge(seg);
8461 8461
8462 8462 /*
8463 8463 * If beginning of old segment was already
8464 8464 * split off, use new segment to split end off
8465 8465 * from.
8466 8466 */
8467 8467 if (new_seg != NULL && new_seg != seg) {
8468 8468 /*
8469 8469 * Split segment
8470 8470 */
8471 8471 (void) segvn_split_seg(new_seg, eaddr);
8472 8472
8473 8473 /*
8474 8474 * Set policy for new segment
8475 8475 */
8476 8476 (void) lgrp_privm_policy_set(policy,
8477 8477 &new_svd->policy_info,
8478 8478 new_seg->s_size);
8479 8479 } else {
8480 8480 /*
8481 8481 * Split segment and return IE_REATTACH
8482 8482 * to tell as_ctl() that current
8483 8483 * segment changed
8484 8484 */
8485 8485 (void) segvn_split_seg(seg, eaddr);
8486 8486 err = IE_REATTACH;
8487 8487
8488 8488 (void) lgrp_privm_policy_set(policy,
8489 8489 &svd->policy_info, seg->s_size);
8490 8490
8491 8491 /*
8492 8492 * If new segment starts where old one
8493 8493 * did, try to concatenate it with
8494 8494 * previous segment.
8495 8495 */
8496 8496 if (addr == seg->s_base) {
8497 8497 prev = AS_SEGPREV(seg->s_as,
8498 8498 seg);
8499 8499
8500 8500 /*
8501 8501 * Drop lock for private data
8502 8502 * of current segment before
8503 8503 * concatenating (deleting) it
8504 8504 */
8505 8505 if (prev &&
8506 8506 prev->s_ops ==
8507 8507 &segvn_ops &&
8508 8508 addr == prev->s_base +
8509 8509 prev->s_size) {
8510 8510 SEGVN_LOCK_EXIT(
8511 8511 seg->s_as,
8512 8512 &svd->lock);
8513 8513 (void) segvn_concat(
8514 8514 prev, seg, 1);
8515 8515 return (err);
8516 8516 }
8517 8517 }
8518 8518 }
8519 8519 }
8520 8520 break;
8521 8521 case MADV_SEQUENTIAL:
8522 8522 ASSERT(seg->s_szc == 0);
8523 8523 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8524 8524 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
8525 8525 /* FALLTHROUGH */
8526 8526 case MADV_NORMAL:
8527 8527 case MADV_RANDOM:
8528 8528 bvpp = &svd->vpage[page];
8529 8529 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8530 8530 for (; bvpp < evpp; bvpp++)
8531 8531 VPP_SETADVICE(bvpp, behav);
8532 8532 svd->advice = MADV_NORMAL;
8533 8533 break;
8534 8534 case MADV_WILLNEED: /* handled in memcntl */
8535 8535 case MADV_DONTNEED: /* handled in memcntl */
8536 8536 case MADV_FREE: /* handled above */
8537 8537 break;
8538 8538 default:
8539 8539 err = EINVAL;
8540 8540 }
8541 8541 }
8542 8542 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8543 8543 return (err);
8544 8544 }
8545 8545
8546 8546 /*
8547 8547 * There is one kind of inheritance that can be specified for pages:
8548 8548 *
8549 8549 * SEGP_INH_ZERO - Pages should be zeroed in the child
8550 8550 */
8551 8551 static int
8552 8552 segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8553 8553 {
8554 8554 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8555 8555 struct vpage *bvpp, *evpp;
8556 8556 size_t page;
8557 8557 int ret = 0;
8558 8558
8559 8559 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8560 8560
8561 8561 /* Can't support something we don't know about */
8562 8562 if (behav != SEGP_INH_ZERO)
8563 8563 return (ENOTSUP);
8564 8564
8565 8565 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8566 8566
8567 8567 /*
8568 8568 * This must be a straightforward anonymous segment that is mapped
8569 8569 * privately and is not backed by a vnode.
8570 8570 */
8571 8571 if (svd->tr_state != SEGVN_TR_OFF ||
8572 8572 svd->type != MAP_PRIVATE ||
8573 8573 svd->vp != NULL) {
8574 8574 ret = EINVAL;
8575 8575 goto out;
8576 8576 }
8577 8577
8578 8578 /*
8579 8579 * If the entire segment has been marked as inherit zero, then no reason
8580 8580 * to do anything else.
8581 8581 */
8582 8582 if (svd->svn_inz == SEGVN_INZ_ALL) {
8583 8583 ret = 0;
8584 8584 goto out;
8585 8585 }
8586 8586
8587 8587 /*
8588 8588 * If this applies to the entire segment, simply mark it and we're done.
8589 8589 */
8590 8590 if ((addr == seg->s_base) && (len == seg->s_size)) {
8591 8591 svd->svn_inz = SEGVN_INZ_ALL;
8592 8592 ret = 0;
8593 8593 goto out;
8594 8594 }
8595 8595
8596 8596 /*
8597 8597 * We've been asked to mark a subset of this segment as inherit zero,
8598 8598 * therefore we need to mainpulate its vpages.
8599 8599 */
8600 8600 if (svd->vpage == NULL) {
8601 8601 segvn_vpage(seg);
8602 8602 if (svd->vpage == NULL) {
8603 8603 ret = ENOMEM;
8604 8604 goto out;
8605 8605 }
8606 8606 }
8607 8607
8608 8608 svd->svn_inz = SEGVN_INZ_VPP;
8609 8609 page = seg_page(seg, addr);
8610 8610 bvpp = &svd->vpage[page];
8611 8611 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8612 8612 for (; bvpp < evpp; bvpp++)
8613 8613 VPP_SETINHZERO(bvpp);
8614 8614 ret = 0;
8615 8615
8616 8616 out:
8617 8617 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8618 8618 return (ret);
8619 8619 }
8620 8620
8621 8621 /*
8622 8622 * Create a vpage structure for this seg.
8623 8623 */
8624 8624 static void
8625 8625 segvn_vpage(struct seg *seg)
8626 8626 {
8627 8627 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8628 8628 struct vpage *vp, *evp;
8629 8629 static pgcnt_t page_limit = 0;
8630 8630
8631 8631 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
8632 8632
8633 8633 /*
8634 8634 * If no vpage structure exists, allocate one. Copy the protections
8635 8635 * and the advice from the segment itself to the individual pages.
8636 8636 */
8637 8637 if (svd->vpage == NULL) {
8638 8638 /*
8639 8639 * Start by calculating the number of pages we must allocate to
8640 8640 * track the per-page vpage structs needs for this entire
8641 8641 * segment. If we know now that it will require more than our
8642 8642 * heuristic for the maximum amount of kmem we can consume then
8643 8643 * fail. We do this here, instead of trying to detect this deep
8644 8644 * in page_resv and propagating the error up, since the entire
8645 8645 * memory allocation stack is not amenable to passing this
8646 8646 * back. Instead, it wants to keep trying.
8647 8647 *
8648 8648 * As a heuristic we set a page limit of 5/8s of total_pages
8649 8649 * for this allocation. We use shifts so that no floating
8650 8650 * point conversion takes place and only need to do the
8651 8651 * calculation once.
8652 8652 */
8653 8653 ulong_t mem_needed = seg_pages(seg) * sizeof (struct vpage);
8654 8654 pgcnt_t npages = mem_needed >> PAGESHIFT;
8655 8655
8656 8656 if (page_limit == 0)
8657 8657 page_limit = (total_pages >> 1) + (total_pages >> 3);
8658 8658
8659 8659 if (npages > page_limit)
8660 8660 return;
8661 8661
8662 8662 svd->pageadvice = 1;
8663 8663 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP);
8664 8664 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
8665 8665 for (vp = svd->vpage; vp < evp; vp++) {
8666 8666 VPP_SETPROT(vp, svd->prot);
8667 8667 VPP_SETADVICE(vp, svd->advice);
8668 8668 }
8669 8669 }
8670 8670 }
8671 8671
8672 8672 /*
8673 8673 * Dump the pages belonging to this segvn segment.
8674 8674 */
8675 8675 static void
8676 8676 segvn_dump(struct seg *seg)
8677 8677 {
8678 8678 struct segvn_data *svd;
8679 8679 page_t *pp;
8680 8680 struct anon_map *amp;
8681 8681 ulong_t anon_index;
8682 8682 struct vnode *vp;
8683 8683 u_offset_t off, offset;
8684 8684 pfn_t pfn;
8685 8685 pgcnt_t page, npages;
8686 8686 caddr_t addr;
8687 8687
8688 8688 npages = seg_pages(seg);
8689 8689 svd = (struct segvn_data *)seg->s_data;
8690 8690 vp = svd->vp;
8691 8691 off = offset = svd->offset;
8692 8692 addr = seg->s_base;
8693 8693
8694 8694 if ((amp = svd->amp) != NULL) {
8695 8695 anon_index = svd->anon_index;
8696 8696 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8697 8697 }
8698 8698
8699 8699 for (page = 0; page < npages; page++, offset += PAGESIZE) {
8700 8700 struct anon *ap;
8701 8701 int we_own_it = 0;
8702 8702
8703 8703 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) {
8704 8704 swap_xlate_nopanic(ap, &vp, &off);
8705 8705 } else {
8706 8706 vp = svd->vp;
8707 8707 off = offset;
8708 8708 }
8709 8709
8710 8710 /*
8711 8711 * If pp == NULL, the page either does not exist
8712 8712 * or is exclusively locked. So determine if it
8713 8713 * exists before searching for it.
8714 8714 */
8715 8715
8716 8716 if ((pp = page_lookup_nowait(vp, off, SE_SHARED)))
8717 8717 we_own_it = 1;
8718 8718 else
8719 8719 pp = page_exists(vp, off);
8720 8720
8721 8721 if (pp) {
8722 8722 pfn = page_pptonum(pp);
8723 8723 dump_addpage(seg->s_as, addr, pfn);
8724 8724 if (we_own_it)
8725 8725 page_unlock(pp);
8726 8726 }
8727 8727 addr += PAGESIZE;
8728 8728 dump_timeleft = dump_timeout;
8729 8729 }
8730 8730
8731 8731 if (amp != NULL)
8732 8732 ANON_LOCK_EXIT(&->a_rwlock);
8733 8733 }
8734 8734
8735 8735 #ifdef DEBUG
8736 8736 static uint32_t segvn_pglock_mtbf = 0;
8737 8737 #endif
8738 8738
8739 8739 #define PCACHE_SHWLIST ((page_t *)-2)
8740 8740 #define NOPCACHE_SHWLIST ((page_t *)-1)
8741 8741
8742 8742 /*
8743 8743 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8744 8744 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8745 8745 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8746 8746 * the same parts of the segment. Currently shadow list creation is only
8747 8747 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8748 8748 * tagged with segment pointer, starting virtual address and length. This
8749 8749 * approach for MAP_SHARED segments may add many pcache entries for the same
8750 8750 * set of pages and lead to long hash chains that decrease pcache lookup
8751 8751 * performance. To avoid this issue for shared segments shared anon map and
8752 8752 * starting anon index are used for pcache entry tagging. This allows all
8753 8753 * segments to share pcache entries for the same anon range and reduces pcache
8754 8754 * chain's length as well as memory overhead from duplicate shadow lists and
8755 8755 * pcache entries.
8756 8756 *
8757 8757 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8758 8758 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8759 8759 * part of softlockcnt accounting is done differently for private and shared
8760 8760 * segments. In private segment case softlock is only incremented when a new
8761 8761 * shadow list is created but not when an existing one is found via
8762 8762 * seg_plookup(). pcache entries have reference count incremented/decremented
8763 8763 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8764 8764 * reference count can be purged (and purging is needed before segment can be
8765 8765 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8766 8766 * decrement softlockcnt. Since in private segment case each of its pcache
8767 8767 * entries only belongs to this segment we can expect that when
8768 8768 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8769 8769 * segment purge will succeed and softlockcnt will drop to 0. In shared
8770 8770 * segment case reference count in pcache entry counts active locks from many
8771 8771 * different segments so we can't expect segment purging to succeed even when
8772 8772 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8773 8773 * segment. To be able to determine when there're no pending pagelocks in
8774 8774 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8775 8775 * but instead softlockcnt is incremented and decremented for every
8776 8776 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8777 8777 * list was created or an existing one was found. When softlockcnt drops to 0
8778 8778 * this segment no longer has any claims for pcached shadow lists and the
8779 8779 * segment can be freed even if there're still active pcache entries
8780 8780 * shared by this segment anon map. Shared segment pcache entries belong to
8781 8781 * anon map and are typically removed when anon map is freed after all
8782 8782 * processes destroy the segments that use this anon map.
8783 8783 */
8784 8784 static int
8785 8785 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
8786 8786 enum lock_type type, enum seg_rw rw)
8787 8787 {
8788 8788 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8789 8789 size_t np;
8790 8790 pgcnt_t adjustpages;
8791 8791 pgcnt_t npages;
8792 8792 ulong_t anon_index;
8793 8793 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE;
8794 8794 uint_t error;
8795 8795 struct anon_map *amp;
8796 8796 pgcnt_t anpgcnt;
8797 8797 struct page **pplist, **pl, *pp;
8798 8798 caddr_t a;
8799 8799 size_t page;
8800 8800 caddr_t lpgaddr, lpgeaddr;
8801 8801 anon_sync_obj_t cookie;
8802 8802 int anlock;
8803 8803 struct anon_map *pamp;
8804 8804 caddr_t paddr;
8805 8805 seg_preclaim_cbfunc_t preclaim_callback;
8806 8806 size_t pgsz;
8807 8807 int use_pcache;
8808 8808 size_t wlen;
8809 8809 uint_t pflags = 0;
8810 8810 int sftlck_sbase = 0;
8811 8811 int sftlck_send = 0;
8812 8812
8813 8813 #ifdef DEBUG
8814 8814 if (type == L_PAGELOCK && segvn_pglock_mtbf) {
8815 8815 hrtime_t ts = gethrtime();
8816 8816 if ((ts % segvn_pglock_mtbf) == 0) {
8817 8817 return (ENOTSUP);
8818 8818 }
8819 8819 if ((ts % segvn_pglock_mtbf) == 1) {
8820 8820 return (EFAULT);
8821 8821 }
8822 8822 }
8823 8823 #endif
8824 8824
8825 8825 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START,
8826 8826 "segvn_pagelock: start seg %p addr %p", seg, addr);
8827 8827
8828 8828 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8829 8829 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
8830 8830
8831 8831 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8832 8832
8833 8833 /*
8834 8834 * for now we only support pagelock to anon memory. We would have to
8835 8835 * check protections for vnode objects and call into the vnode driver.
8836 8836 * That's too much for a fast path. Let the fault entry point handle
8837 8837 * it.
8838 8838 */
8839 8839 if (svd->vp != NULL) {
8840 8840 if (type == L_PAGELOCK) {
8841 8841 error = ENOTSUP;
8842 8842 goto out;
8843 8843 }
8844 8844 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8845 8845 }
8846 8846 if ((amp = svd->amp) == NULL) {
8847 8847 if (type == L_PAGELOCK) {
8848 8848 error = EFAULT;
8849 8849 goto out;
8850 8850 }
8851 8851 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8852 8852 }
8853 8853 if (rw != S_READ && rw != S_WRITE) {
8854 8854 if (type == L_PAGELOCK) {
8855 8855 error = ENOTSUP;
8856 8856 goto out;
8857 8857 }
8858 8858 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8859 8859 }
8860 8860
8861 8861 if (seg->s_szc != 0) {
8862 8862 /*
8863 8863 * We are adjusting the pagelock region to the large page size
8864 8864 * boundary because the unlocked part of a large page cannot
8865 8865 * be freed anyway unless all constituent pages of a large
8866 8866 * page are locked. Bigger regions reduce pcache chain length
8867 8867 * and improve lookup performance. The tradeoff is that the
8868 8868 * very first segvn_pagelock() call for a given page is more
8869 8869 * expensive if only 1 page_t is needed for IO. This is only
8870 8870 * an issue if pcache entry doesn't get reused by several
8871 8871 * subsequent calls. We optimize here for the case when pcache
8872 8872 * is heavily used by repeated IOs to the same address range.
8873 8873 *
8874 8874 * Note segment's page size cannot change while we are holding
8875 8875 * as lock. And then it cannot change while softlockcnt is
8876 8876 * not 0. This will allow us to correctly recalculate large
8877 8877 * page size region for the matching pageunlock/reclaim call
8878 8878 * since as_pageunlock() caller must always match
8879 8879 * as_pagelock() call's addr and len.
8880 8880 *
8881 8881 * For pageunlock *ppp points to the pointer of page_t that
8882 8882 * corresponds to the real unadjusted start address. Similar
8883 8883 * for pagelock *ppp must point to the pointer of page_t that
8884 8884 * corresponds to the real unadjusted start address.
8885 8885 */
8886 8886 pgsz = page_get_pagesize(seg->s_szc);
8887 8887 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
8888 8888 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8889 8889 } else if (len < segvn_pglock_comb_thrshld) {
8890 8890 lpgaddr = addr;
8891 8891 lpgeaddr = addr + len;
8892 8892 adjustpages = 0;
8893 8893 pgsz = PAGESIZE;
8894 8894 } else {
8895 8895 /*
8896 8896 * Align the address range of large enough requests to allow
8897 8897 * combining of different shadow lists into 1 to reduce memory
8898 8898 * overhead from potentially overlapping large shadow lists
8899 8899 * (worst case is we have a 1MB IO into buffers with start
8900 8900 * addresses separated by 4K). Alignment is only possible if
8901 8901 * padded chunks have sufficient access permissions. Note
8902 8902 * permissions won't change between L_PAGELOCK and
8903 8903 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8904 8904 * segvn_setprot() to wait until softlockcnt drops to 0. This
8905 8905 * allows us to determine in L_PAGEUNLOCK the same range we
8906 8906 * computed in L_PAGELOCK.
8907 8907 *
8908 8908 * If alignment is limited by segment ends set
8909 8909 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8910 8910 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8911 8911 * per segment counters. In L_PAGEUNLOCK case decrease
8912 8912 * softlockcnt_sbase/softlockcnt_send counters if
8913 8913 * sftlck_sbase/sftlck_send flags are set. When
8914 8914 * softlockcnt_sbase/softlockcnt_send are non 0
8915 8915 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8916 8916 * won't merge the segments. This restriction combined with
8917 8917 * restriction on segment unmapping and splitting for segments
8918 8918 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8919 8919 * correctly determine the same range that was previously
8920 8920 * locked by matching L_PAGELOCK.
8921 8921 */
8922 8922 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16);
8923 8923 pgsz = PAGESIZE;
8924 8924 if (svd->type == MAP_PRIVATE) {
8925 8925 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr,
8926 8926 segvn_pglock_comb_balign);
8927 8927 if (lpgaddr < seg->s_base) {
8928 8928 lpgaddr = seg->s_base;
8929 8929 sftlck_sbase = 1;
8930 8930 }
8931 8931 } else {
8932 8932 ulong_t aix = svd->anon_index + seg_page(seg, addr);
8933 8933 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign);
8934 8934 if (aaix < svd->anon_index) {
8935 8935 lpgaddr = seg->s_base;
8936 8936 sftlck_sbase = 1;
8937 8937 } else {
8938 8938 lpgaddr = addr - ptob(aix - aaix);
8939 8939 ASSERT(lpgaddr >= seg->s_base);
8940 8940 }
8941 8941 }
8942 8942 if (svd->pageprot && lpgaddr != addr) {
8943 8943 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)];
8944 8944 struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8945 8945 while (vp < evp) {
8946 8946 if ((VPP_PROT(vp) & protchk) == 0) {
8947 8947 break;
8948 8948 }
8949 8949 vp++;
8950 8950 }
8951 8951 if (vp < evp) {
8952 8952 lpgaddr = addr;
8953 8953 pflags = 0;
8954 8954 }
8955 8955 }
8956 8956 lpgeaddr = addr + len;
8957 8957 if (pflags) {
8958 8958 if (svd->type == MAP_PRIVATE) {
8959 8959 lpgeaddr = (caddr_t)P2ROUNDUP(
8960 8960 (uintptr_t)lpgeaddr,
8961 8961 segvn_pglock_comb_balign);
8962 8962 } else {
8963 8963 ulong_t aix = svd->anon_index +
8964 8964 seg_page(seg, lpgeaddr);
8965 8965 ulong_t aaix = P2ROUNDUP(aix,
8966 8966 segvn_pglock_comb_palign);
8967 8967 if (aaix < aix) {
8968 8968 lpgeaddr = 0;
8969 8969 } else {
8970 8970 lpgeaddr += ptob(aaix - aix);
8971 8971 }
8972 8972 }
8973 8973 if (lpgeaddr == 0 ||
8974 8974 lpgeaddr > seg->s_base + seg->s_size) {
8975 8975 lpgeaddr = seg->s_base + seg->s_size;
8976 8976 sftlck_send = 1;
8977 8977 }
8978 8978 }
8979 8979 if (svd->pageprot && lpgeaddr != addr + len) {
8980 8980 struct vpage *vp;
8981 8981 struct vpage *evp;
8982 8982
8983 8983 vp = &svd->vpage[seg_page(seg, addr + len)];
8984 8984 evp = &svd->vpage[seg_page(seg, lpgeaddr)];
8985 8985
8986 8986 while (vp < evp) {
8987 8987 if ((VPP_PROT(vp) & protchk) == 0) {
8988 8988 break;
8989 8989 }
8990 8990 vp++;
8991 8991 }
8992 8992 if (vp < evp) {
8993 8993 lpgeaddr = addr + len;
8994 8994 }
8995 8995 }
8996 8996 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8997 8997 }
8998 8998
8999 8999 /*
9000 9000 * For MAP_SHARED segments we create pcache entries tagged by amp and
9001 9001 * anon index so that we can share pcache entries with other segments
9002 9002 * that map this amp. For private segments pcache entries are tagged
9003 9003 * with segment and virtual address.
9004 9004 */
9005 9005 if (svd->type == MAP_SHARED) {
9006 9006 pamp = amp;
9007 9007 paddr = (caddr_t)((lpgaddr - seg->s_base) +
9008 9008 ptob(svd->anon_index));
9009 9009 preclaim_callback = shamp_reclaim;
9010 9010 } else {
9011 9011 pamp = NULL;
9012 9012 paddr = lpgaddr;
9013 9013 preclaim_callback = segvn_reclaim;
9014 9014 }
9015 9015
9016 9016 if (type == L_PAGEUNLOCK) {
9017 9017 VM_STAT_ADD(segvnvmstats.pagelock[0]);
9018 9018
9019 9019 /*
9020 9020 * update hat ref bits for /proc. We need to make sure
9021 9021 * that threads tracing the ref and mod bits of the
9022 9022 * address space get the right data.
9023 9023 * Note: page ref and mod bits are updated at reclaim time
9024 9024 */
9025 9025 if (seg->s_as->a_vbits) {
9026 9026 for (a = addr; a < addr + len; a += PAGESIZE) {
9027 9027 if (rw == S_WRITE) {
9028 9028 hat_setstat(seg->s_as, a,
9029 9029 PAGESIZE, P_REF | P_MOD);
9030 9030 } else {
9031 9031 hat_setstat(seg->s_as, a,
9032 9032 PAGESIZE, P_REF);
9033 9033 }
9034 9034 }
9035 9035 }
9036 9036
9037 9037 /*
9038 9038 * Check the shadow list entry after the last page used in
9039 9039 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
9040 9040 * was not inserted into pcache and is not large page
9041 9041 * adjusted. In this case call reclaim callback directly and
9042 9042 * don't adjust the shadow list start and size for large
9043 9043 * pages.
9044 9044 */
9045 9045 npages = btop(len);
9046 9046 if ((*ppp)[npages] == NOPCACHE_SHWLIST) {
9047 9047 void *ptag;
9048 9048 if (pamp != NULL) {
9049 9049 ASSERT(svd->type == MAP_SHARED);
9050 9050 ptag = (void *)pamp;
9051 9051 paddr = (caddr_t)((addr - seg->s_base) +
9052 9052 ptob(svd->anon_index));
9053 9053 } else {
9054 9054 ptag = (void *)seg;
9055 9055 paddr = addr;
9056 9056 }
9057 9057 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
9058 9058 } else {
9059 9059 ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
9060 9060 IS_SWAPFSVP((*ppp)[npages]->p_vnode));
9061 9061 len = lpgeaddr - lpgaddr;
9062 9062 npages = btop(len);
9063 9063 seg_pinactive(seg, pamp, paddr, len,
9064 9064 *ppp - adjustpages, rw, pflags, preclaim_callback);
9065 9065 }
9066 9066
9067 9067 if (pamp != NULL) {
9068 9068 ASSERT(svd->type == MAP_SHARED);
9069 9069 ASSERT(svd->softlockcnt >= npages);
9070 9070 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
9071 9071 }
9072 9072
9073 9073 if (sftlck_sbase) {
9074 9074 ASSERT(svd->softlockcnt_sbase > 0);
9075 9075 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase);
9076 9076 }
9077 9077 if (sftlck_send) {
9078 9078 ASSERT(svd->softlockcnt_send > 0);
9079 9079 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send);
9080 9080 }
9081 9081
9082 9082 /*
9083 9083 * If someone is blocked while unmapping, we purge
9084 9084 * segment page cache and thus reclaim pplist synchronously
9085 9085 * without waiting for seg_pasync_thread. This speeds up
9086 9086 * unmapping in cases where munmap(2) is called, while
9087 9087 * raw async i/o is still in progress or where a thread
9088 9088 * exits on data fault in a multithreaded application.
9089 9089 */
9090 9090 if (AS_ISUNMAPWAIT(seg->s_as)) {
9091 9091 if (svd->softlockcnt == 0) {
9092 9092 mutex_enter(&seg->s_as->a_contents);
9093 9093 if (AS_ISUNMAPWAIT(seg->s_as)) {
9094 9094 AS_CLRUNMAPWAIT(seg->s_as);
9095 9095 cv_broadcast(&seg->s_as->a_cv);
9096 9096 }
9097 9097 mutex_exit(&seg->s_as->a_contents);
9098 9098 } else if (pamp == NULL) {
9099 9099 /*
9100 9100 * softlockcnt is not 0 and this is a
9101 9101 * MAP_PRIVATE segment. Try to purge its
9102 9102 * pcache entries to reduce softlockcnt.
9103 9103 * If it drops to 0 segvn_reclaim()
9104 9104 * will wake up a thread waiting on
9105 9105 * unmapwait flag.
9106 9106 *
9107 9107 * We don't purge MAP_SHARED segments with non
9108 9108 * 0 softlockcnt since IO is still in progress
9109 9109 * for such segments.
9110 9110 */
9111 9111 ASSERT(svd->type == MAP_PRIVATE);
9112 9112 segvn_purge(seg);
9113 9113 }
9114 9114 }
9115 9115 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9116 9116 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END,
9117 9117 "segvn_pagelock: unlock seg %p addr %p", seg, addr);
9118 9118 return (0);
9119 9119 }
9120 9120
9121 9121 /* The L_PAGELOCK case ... */
9122 9122
9123 9123 VM_STAT_ADD(segvnvmstats.pagelock[1]);
9124 9124
9125 9125 /*
9126 9126 * For MAP_SHARED segments we have to check protections before
9127 9127 * seg_plookup() since pcache entries may be shared by many segments
9128 9128 * with potentially different page protections.
9129 9129 */
9130 9130 if (pamp != NULL) {
9131 9131 ASSERT(svd->type == MAP_SHARED);
9132 9132 if (svd->pageprot == 0) {
9133 9133 if ((svd->prot & protchk) == 0) {
9134 9134 error = EACCES;
9135 9135 goto out;
9136 9136 }
9137 9137 } else {
9138 9138 /*
9139 9139 * check page protections
9140 9140 */
9141 9141 caddr_t ea;
9142 9142
9143 9143 if (seg->s_szc) {
9144 9144 a = lpgaddr;
9145 9145 ea = lpgeaddr;
9146 9146 } else {
9147 9147 a = addr;
9148 9148 ea = addr + len;
9149 9149 }
9150 9150 for (; a < ea; a += pgsz) {
9151 9151 struct vpage *vp;
9152 9152
9153 9153 ASSERT(seg->s_szc == 0 ||
9154 9154 sameprot(seg, a, pgsz));
9155 9155 vp = &svd->vpage[seg_page(seg, a)];
9156 9156 if ((VPP_PROT(vp) & protchk) == 0) {
9157 9157 error = EACCES;
9158 9158 goto out;
9159 9159 }
9160 9160 }
9161 9161 }
9162 9162 }
9163 9163
9164 9164 /*
9165 9165 * try to find pages in segment page cache
9166 9166 */
9167 9167 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
9168 9168 if (pplist != NULL) {
9169 9169 if (pamp != NULL) {
9170 9170 npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
9171 9171 ASSERT(svd->type == MAP_SHARED);
9172 9172 atomic_add_long((ulong_t *)&svd->softlockcnt,
9173 9173 npages);
9174 9174 }
9175 9175 if (sftlck_sbase) {
9176 9176 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9177 9177 }
9178 9178 if (sftlck_send) {
9179 9179 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9180 9180 }
9181 9181 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9182 9182 *ppp = pplist + adjustpages;
9183 9183 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
9184 9184 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
9185 9185 return (0);
9186 9186 }
9187 9187
9188 9188 /*
9189 9189 * For MAP_SHARED segments we already verified above that segment
9190 9190 * protections allow this pagelock operation.
9191 9191 */
9192 9192 if (pamp == NULL) {
9193 9193 ASSERT(svd->type == MAP_PRIVATE);
9194 9194 if (svd->pageprot == 0) {
9195 9195 if ((svd->prot & protchk) == 0) {
9196 9196 error = EACCES;
9197 9197 goto out;
9198 9198 }
9199 9199 if (svd->prot & PROT_WRITE) {
9200 9200 wlen = lpgeaddr - lpgaddr;
9201 9201 } else {
9202 9202 wlen = 0;
9203 9203 ASSERT(rw == S_READ);
9204 9204 }
9205 9205 } else {
9206 9206 int wcont = 1;
9207 9207 /*
9208 9208 * check page protections
9209 9209 */
9210 9210 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) {
9211 9211 struct vpage *vp;
9212 9212
9213 9213 ASSERT(seg->s_szc == 0 ||
9214 9214 sameprot(seg, a, pgsz));
9215 9215 vp = &svd->vpage[seg_page(seg, a)];
9216 9216 if ((VPP_PROT(vp) & protchk) == 0) {
9217 9217 error = EACCES;
9218 9218 goto out;
9219 9219 }
9220 9220 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) {
9221 9221 wlen += pgsz;
9222 9222 } else {
9223 9223 wcont = 0;
9224 9224 ASSERT(rw == S_READ);
9225 9225 }
9226 9226 }
9227 9227 }
9228 9228 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr);
9229 9229 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr);
9230 9230 }
9231 9231
9232 9232 /*
9233 9233 * Only build large page adjusted shadow list if we expect to insert
9234 9234 * it into pcache. For large enough pages it's a big overhead to
9235 9235 * create a shadow list of the entire large page. But this overhead
9236 9236 * should be amortized over repeated pcache hits on subsequent reuse
9237 9237 * of this shadow list (IO into any range within this shadow list will
9238 9238 * find it in pcache since we large page align the request for pcache
9239 9239 * lookups). pcache performance is improved with bigger shadow lists
9240 9240 * as it reduces the time to pcache the entire big segment and reduces
9241 9241 * pcache chain length.
9242 9242 */
9243 9243 if (seg_pinsert_check(seg, pamp, paddr,
9244 9244 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) {
9245 9245 addr = lpgaddr;
9246 9246 len = lpgeaddr - lpgaddr;
9247 9247 use_pcache = 1;
9248 9248 } else {
9249 9249 use_pcache = 0;
9250 9250 /*
9251 9251 * Since this entry will not be inserted into the pcache, we
9252 9252 * will not do any adjustments to the starting address or
9253 9253 * size of the memory to be locked.
9254 9254 */
9255 9255 adjustpages = 0;
9256 9256 }
9257 9257 npages = btop(len);
9258 9258
9259 9259 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP);
9260 9260 pl = pplist;
9261 9261 *ppp = pplist + adjustpages;
9262 9262 /*
9263 9263 * If use_pcache is 0 this shadow list is not large page adjusted.
9264 9264 * Record this info in the last entry of shadow array so that
9265 9265 * L_PAGEUNLOCK can determine if it should large page adjust the
9266 9266 * address range to find the real range that was locked.
9267 9267 */
9268 9268 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST;
9269 9269
9270 9270 page = seg_page(seg, addr);
9271 9271 anon_index = svd->anon_index + page;
9272 9272
9273 9273 anlock = 0;
9274 9274 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9275 9275 ASSERT(amp->a_szc >= seg->s_szc);
9276 9276 anpgcnt = page_get_pagecnt(amp->a_szc);
9277 9277 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) {
9278 9278 struct anon *ap;
9279 9279 struct vnode *vp;
9280 9280 u_offset_t off;
9281 9281
9282 9282 /*
9283 9283 * Lock and unlock anon array only once per large page.
9284 9284 * anon_array_enter() locks the root anon slot according to
9285 9285 * a_szc which can't change while anon map is locked. We lock
9286 9286 * anon the first time through this loop and each time we
9287 9287 * reach anon index that corresponds to a root of a large
9288 9288 * page.
9289 9289 */
9290 9290 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) {
9291 9291 ASSERT(anlock == 0);
9292 9292 anon_array_enter(amp, anon_index, &cookie);
9293 9293 anlock = 1;
9294 9294 }
9295 9295 ap = anon_get_ptr(amp->ahp, anon_index);
9296 9296
9297 9297 /*
9298 9298 * We must never use seg_pcache for COW pages
9299 9299 * because we might end up with original page still
9300 9300 * lying in seg_pcache even after private page is
9301 9301 * created. This leads to data corruption as
9302 9302 * aio_write refers to the page still in cache
9303 9303 * while all other accesses refer to the private
9304 9304 * page.
9305 9305 */
9306 9306 if (ap == NULL || ap->an_refcnt != 1) {
9307 9307 struct vpage *vpage;
9308 9308
9309 9309 if (seg->s_szc) {
9310 9310 error = EFAULT;
9311 9311 break;
9312 9312 }
9313 9313 if (svd->vpage != NULL) {
9314 9314 vpage = &svd->vpage[seg_page(seg, a)];
9315 9315 } else {
9316 9316 vpage = NULL;
9317 9317 }
9318 9318 ASSERT(anlock);
9319 9319 anon_array_exit(&cookie);
9320 9320 anlock = 0;
9321 9321 pp = NULL;
9322 9322 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0,
9323 9323 vpage, &pp, 0, F_INVAL, rw, 1);
9324 9324 if (error) {
9325 9325 error = fc_decode(error);
9326 9326 break;
9327 9327 }
9328 9328 anon_array_enter(amp, anon_index, &cookie);
9329 9329 anlock = 1;
9330 9330 ap = anon_get_ptr(amp->ahp, anon_index);
9331 9331 if (ap == NULL || ap->an_refcnt != 1) {
9332 9332 error = EFAULT;
9333 9333 break;
9334 9334 }
9335 9335 }
9336 9336 swap_xlate(ap, &vp, &off);
9337 9337 pp = page_lookup_nowait(vp, off, SE_SHARED);
9338 9338 if (pp == NULL) {
9339 9339 error = EFAULT;
9340 9340 break;
9341 9341 }
9342 9342 if (ap->an_pvp != NULL) {
9343 9343 anon_swap_free(ap, pp);
9344 9344 }
9345 9345 /*
9346 9346 * Unlock anon if this is the last slot in a large page.
9347 9347 */
9348 9348 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) {
9349 9349 ASSERT(anlock);
9350 9350 anon_array_exit(&cookie);
9351 9351 anlock = 0;
9352 9352 }
9353 9353 *pplist++ = pp;
9354 9354 }
9355 9355 if (anlock) { /* Ensure the lock is dropped */
9356 9356 anon_array_exit(&cookie);
9357 9357 }
9358 9358 ANON_LOCK_EXIT(&->a_rwlock);
9359 9359
9360 9360 if (a >= addr + len) {
9361 9361 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9362 9362 if (pamp != NULL) {
9363 9363 ASSERT(svd->type == MAP_SHARED);
9364 9364 atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9365 9365 npages);
9366 9366 wlen = len;
9367 9367 }
9368 9368 if (sftlck_sbase) {
9369 9369 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9370 9370 }
9371 9371 if (sftlck_send) {
9372 9372 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9373 9373 }
9374 9374 if (use_pcache) {
9375 9375 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9376 9376 rw, pflags, preclaim_callback);
9377 9377 }
9378 9378 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9379 9379 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9380 9380 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9381 9381 return (0);
9382 9382 }
9383 9383
9384 9384 pplist = pl;
9385 9385 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9386 9386 while (np > (uint_t)0) {
9387 9387 ASSERT(PAGE_LOCKED(*pplist));
9388 9388 page_unlock(*pplist);
9389 9389 np--;
9390 9390 pplist++;
9391 9391 }
9392 9392 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9393 9393 out:
9394 9394 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9395 9395 *ppp = NULL;
9396 9396 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END,
9397 9397 "segvn_pagelock: cache miss seg %p addr %p", seg, addr);
9398 9398 return (error);
9399 9399 }
9400 9400
9401 9401 /*
9402 9402 * purge any cached pages in the I/O page cache
9403 9403 */
9404 9404 static void
9405 9405 segvn_purge(struct seg *seg)
9406 9406 {
9407 9407 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9408 9408
9409 9409 /*
9410 9410 * pcache is only used by pure anon segments.
9411 9411 */
9412 9412 if (svd->amp == NULL || svd->vp != NULL) {
9413 9413 return;
9414 9414 }
9415 9415
9416 9416 /*
9417 9417 * For MAP_SHARED segments non 0 segment's softlockcnt means
9418 9418 * active IO is still in progress via this segment. So we only
9419 9419 * purge MAP_SHARED segments when their softlockcnt is 0.
9420 9420 */
9421 9421 if (svd->type == MAP_PRIVATE) {
9422 9422 if (svd->softlockcnt) {
9423 9423 seg_ppurge(seg, NULL, 0);
9424 9424 }
9425 9425 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) {
9426 9426 seg_ppurge(seg, svd->amp, 0);
9427 9427 }
9428 9428 }
9429 9429
9430 9430 /*
9431 9431 * If async argument is not 0 we are called from pcache async thread and don't
9432 9432 * hold AS lock.
9433 9433 */
9434 9434
9435 9435 /*ARGSUSED*/
9436 9436 static int
9437 9437 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9438 9438 enum seg_rw rw, int async)
9439 9439 {
9440 9440 struct seg *seg = (struct seg *)ptag;
9441 9441 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9442 9442 pgcnt_t np, npages;
9443 9443 struct page **pl;
9444 9444
9445 9445 npages = np = btop(len);
9446 9446 ASSERT(npages);
9447 9447
9448 9448 ASSERT(svd->vp == NULL && svd->amp != NULL);
9449 9449 ASSERT(svd->softlockcnt >= npages);
9450 9450 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9451 9451
9452 9452 pl = pplist;
9453 9453
9454 9454 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9455 9455 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9456 9456
9457 9457 while (np > (uint_t)0) {
9458 9458 if (rw == S_WRITE) {
9459 9459 hat_setrefmod(*pplist);
9460 9460 } else {
9461 9461 hat_setref(*pplist);
9462 9462 }
9463 9463 page_unlock(*pplist);
9464 9464 np--;
9465 9465 pplist++;
9466 9466 }
9467 9467
9468 9468 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9469 9469
9470 9470 /*
9471 9471 * If we are pcache async thread we don't hold AS lock. This means if
9472 9472 * softlockcnt drops to 0 after the decrement below address space may
9473 9473 * get freed. We can't allow it since after softlock derement to 0 we
9474 9474 * still need to access as structure for possible wakeup of unmap
9475 9475 * waiters. To prevent the disappearance of as we take this segment
9476 9476 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9477 9477 * make sure this routine completes before segment is freed.
9478 9478 *
9479 9479 * The second complication we have to deal with in async case is a
9480 9480 * possibility of missed wake up of unmap wait thread. When we don't
9481 9481 * hold as lock here we may take a_contents lock before unmap wait
9482 9482 * thread that was first to see softlockcnt was still not 0. As a
9483 9483 * result we'll fail to wake up an unmap wait thread. To avoid this
9484 9484 * race we set nounmapwait flag in as structure if we drop softlockcnt
9485 9485 * to 0 when we were called by pcache async thread. unmapwait thread
9486 9486 * will not block if this flag is set.
9487 9487 */
9488 9488 if (async) {
9489 9489 mutex_enter(&svd->segfree_syncmtx);
9490 9490 }
9491 9491
9492 9492 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) {
9493 9493 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
9494 9494 mutex_enter(&seg->s_as->a_contents);
9495 9495 if (async) {
9496 9496 AS_SETNOUNMAPWAIT(seg->s_as);
9497 9497 }
9498 9498 if (AS_ISUNMAPWAIT(seg->s_as)) {
9499 9499 AS_CLRUNMAPWAIT(seg->s_as);
9500 9500 cv_broadcast(&seg->s_as->a_cv);
9501 9501 }
9502 9502 mutex_exit(&seg->s_as->a_contents);
9503 9503 }
9504 9504 }
9505 9505
9506 9506 if (async) {
9507 9507 mutex_exit(&svd->segfree_syncmtx);
9508 9508 }
9509 9509 return (0);
9510 9510 }
9511 9511
9512 9512 /*ARGSUSED*/
9513 9513 static int
9514 9514 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9515 9515 enum seg_rw rw, int async)
9516 9516 {
9517 9517 amp_t *amp = (amp_t *)ptag;
9518 9518 pgcnt_t np, npages;
9519 9519 struct page **pl;
9520 9520
9521 9521 npages = np = btop(len);
9522 9522 ASSERT(npages);
9523 9523 ASSERT(amp->a_softlockcnt >= npages);
9524 9524
9525 9525 pl = pplist;
9526 9526
9527 9527 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9528 9528 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9529 9529
9530 9530 while (np > (uint_t)0) {
9531 9531 if (rw == S_WRITE) {
9532 9532 hat_setrefmod(*pplist);
9533 9533 } else {
9534 9534 hat_setref(*pplist);
9535 9535 }
9536 9536 page_unlock(*pplist);
9537 9537 np--;
9538 9538 pplist++;
9539 9539 }
9540 9540
9541 9541 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9542 9542
9543 9543 /*
9544 9544 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9545 9545 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9546 9546 * and anonmap_purge() acquires a_purgemtx.
9547 9547 */
9548 9548 mutex_enter(&->a_purgemtx);
9549 9549 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) &&
9550 9550 amp->a_purgewait) {
9551 9551 amp->a_purgewait = 0;
9552 9552 cv_broadcast(&->a_purgecv);
9553 9553 }
9554 9554 mutex_exit(&->a_purgemtx);
9555 9555 return (0);
9556 9556 }
9557 9557
9558 9558 /*
9559 9559 * get a memory ID for an addr in a given segment
9560 9560 *
9561 9561 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9562 9562 * At fault time they will be relocated into larger pages.
9563 9563 */
9564 9564 static int
9565 9565 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
9566 9566 {
9567 9567 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9568 9568 struct anon *ap = NULL;
9569 9569 ulong_t anon_index;
9570 9570 struct anon_map *amp;
9571 9571 anon_sync_obj_t cookie;
9572 9572
9573 9573 if (svd->type == MAP_PRIVATE) {
9574 9574 memidp->val[0] = (uintptr_t)seg->s_as;
9575 9575 memidp->val[1] = (uintptr_t)addr;
9576 9576 return (0);
9577 9577 }
9578 9578
9579 9579 if (svd->type == MAP_SHARED) {
9580 9580 if (svd->vp) {
9581 9581 memidp->val[0] = (uintptr_t)svd->vp;
9582 9582 memidp->val[1] = (u_longlong_t)svd->offset +
9583 9583 (uintptr_t)(addr - seg->s_base);
9584 9584 return (0);
9585 9585 } else {
9586 9586
9587 9587 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
9588 9588 if ((amp = svd->amp) != NULL) {
9589 9589 anon_index = svd->anon_index +
9590 9590 seg_page(seg, addr);
9591 9591 }
9592 9592 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9593 9593
9594 9594 ASSERT(amp != NULL);
9595 9595
9596 9596 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9597 9597 anon_array_enter(amp, anon_index, &cookie);
9598 9598 ap = anon_get_ptr(amp->ahp, anon_index);
9599 9599 if (ap == NULL) {
9600 9600 page_t *pp;
9601 9601
9602 9602 pp = anon_zero(seg, addr, &ap, svd->cred);
9603 9603 if (pp == NULL) {
9604 9604 anon_array_exit(&cookie);
9605 9605 ANON_LOCK_EXIT(&->a_rwlock);
9606 9606 return (ENOMEM);
9607 9607 }
9608 9608 ASSERT(anon_get_ptr(amp->ahp, anon_index)
9609 9609 == NULL);
9610 9610 (void) anon_set_ptr(amp->ahp, anon_index,
9611 9611 ap, ANON_SLEEP);
9612 9612 page_unlock(pp);
9613 9613 }
9614 9614
9615 9615 anon_array_exit(&cookie);
9616 9616 ANON_LOCK_EXIT(&->a_rwlock);
9617 9617
9618 9618 memidp->val[0] = (uintptr_t)ap;
9619 9619 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
9620 9620 return (0);
9621 9621 }
9622 9622 }
9623 9623 return (EINVAL);
9624 9624 }
9625 9625
9626 9626 static int
9627 9627 sameprot(struct seg *seg, caddr_t a, size_t len)
9628 9628 {
9629 9629 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9630 9630 struct vpage *vpage;
9631 9631 spgcnt_t pages = btop(len);
9632 9632 uint_t prot;
9633 9633
9634 9634 if (svd->pageprot == 0)
9635 9635 return (1);
9636 9636
9637 9637 ASSERT(svd->vpage != NULL);
9638 9638
9639 9639 vpage = &svd->vpage[seg_page(seg, a)];
9640 9640 prot = VPP_PROT(vpage);
9641 9641 vpage++;
9642 9642 pages--;
9643 9643 while (pages-- > 0) {
9644 9644 if (prot != VPP_PROT(vpage))
9645 9645 return (0);
9646 9646 vpage++;
9647 9647 }
9648 9648 return (1);
9649 9649 }
9650 9650
9651 9651 /*
9652 9652 * Get memory allocation policy info for specified address in given segment
9653 9653 */
9654 9654 static lgrp_mem_policy_info_t *
9655 9655 segvn_getpolicy(struct seg *seg, caddr_t addr)
9656 9656 {
9657 9657 struct anon_map *amp;
9658 9658 ulong_t anon_index;
9659 9659 lgrp_mem_policy_info_t *policy_info;
9660 9660 struct segvn_data *svn_data;
9661 9661 u_offset_t vn_off;
9662 9662 vnode_t *vp;
9663 9663
9664 9664 ASSERT(seg != NULL);
9665 9665
9666 9666 svn_data = (struct segvn_data *)seg->s_data;
9667 9667 if (svn_data == NULL)
9668 9668 return (NULL);
9669 9669
9670 9670 /*
9671 9671 * Get policy info for private or shared memory
9672 9672 */
9673 9673 if (svn_data->type != MAP_SHARED) {
9674 9674 if (svn_data->tr_state != SEGVN_TR_ON) {
9675 9675 policy_info = &svn_data->policy_info;
9676 9676 } else {
9677 9677 policy_info = &svn_data->tr_policy_info;
9678 9678 ASSERT(policy_info->mem_policy ==
9679 9679 LGRP_MEM_POLICY_NEXT_SEG);
9680 9680 }
9681 9681 } else {
9682 9682 amp = svn_data->amp;
9683 9683 anon_index = svn_data->anon_index + seg_page(seg, addr);
9684 9684 vp = svn_data->vp;
9685 9685 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base);
9686 9686 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off);
9687 9687 }
9688 9688
9689 9689 return (policy_info);
9690 9690 }
9691 9691
9692 9692 /*
9693 9693 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9694 9694 * established to per vnode mapping per lgroup amp pages instead of to vnode
9695 9695 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9696 9696 * may share the same text replication amp. If a suitable amp doesn't already
9697 9697 * exist in svntr hash table create a new one. We may fail to bind to amp if
9698 9698 * segment is not eligible for text replication. Code below first checks for
9699 9699 * these conditions. If binding is successful segment tr_state is set to on
9700 9700 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9701 9701 * svd->amp remains as NULL.
9702 9702 */
9703 9703 static void
9704 9704 segvn_textrepl(struct seg *seg)
9705 9705 {
9706 9706 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9707 9707 vnode_t *vp = svd->vp;
9708 9708 u_offset_t off = svd->offset;
9709 9709 size_t size = seg->s_size;
9710 9710 u_offset_t eoff = off + size;
9711 9711 uint_t szc = seg->s_szc;
9712 9712 ulong_t hash = SVNTR_HASH_FUNC(vp);
9713 9713 svntr_t *svntrp;
9714 9714 struct vattr va;
9715 9715 proc_t *p = seg->s_as->a_proc;
9716 9716 lgrp_id_t lgrp_id;
9717 9717 lgrp_id_t olid;
9718 9718 int first;
9719 9719 struct anon_map *amp;
9720 9720
9721 9721 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9722 9722 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9723 9723 ASSERT(p != NULL);
9724 9724 ASSERT(svd->tr_state == SEGVN_TR_INIT);
9725 9725 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9726 9726 ASSERT(svd->flags & MAP_TEXT);
9727 9727 ASSERT(svd->type == MAP_PRIVATE);
9728 9728 ASSERT(vp != NULL && svd->amp == NULL);
9729 9729 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9730 9730 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9731 9731 ASSERT(seg->s_as != &kas);
9732 9732 ASSERT(off < eoff);
9733 9733 ASSERT(svntr_hashtab != NULL);
9734 9734
9735 9735 /*
9736 9736 * If numa optimizations are no longer desired bail out.
9737 9737 */
9738 9738 if (!lgrp_optimizations()) {
9739 9739 svd->tr_state = SEGVN_TR_OFF;
9740 9740 return;
9741 9741 }
9742 9742
9743 9743 /*
9744 9744 * Avoid creating anon maps with size bigger than the file size.
9745 9745 * If VOP_GETATTR() call fails bail out.
9746 9746 */
9747 9747 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME;
9748 9748 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) {
9749 9749 svd->tr_state = SEGVN_TR_OFF;
9750 9750 SEGVN_TR_ADDSTAT(gaerr);
9751 9751 return;
9752 9752 }
9753 9753 if (btopr(va.va_size) < btopr(eoff)) {
9754 9754 svd->tr_state = SEGVN_TR_OFF;
9755 9755 SEGVN_TR_ADDSTAT(overmap);
9756 9756 return;
9757 9757 }
9758 9758
9759 9759 /*
9760 9760 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9761 9761 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9762 9762 * mapping that checks if trcache for this vnode needs to be
9763 9763 * invalidated can't miss us.
9764 9764 */
9765 9765 if (!(vp->v_flag & VVMEXEC)) {
9766 9766 mutex_enter(&vp->v_lock);
9767 9767 vp->v_flag |= VVMEXEC;
9768 9768 mutex_exit(&vp->v_lock);
9769 9769 }
9770 9770 mutex_enter(&svntr_hashtab[hash].tr_lock);
9771 9771 /*
9772 9772 * Bail out if potentially MAP_SHARED writable mappings exist to this
9773 9773 * vnode. We don't want to use old file contents from existing
9774 9774 * replicas if this mapping was established after the original file
9775 9775 * was changed.
9776 9776 */
9777 9777 if (vn_is_mapped(vp, V_WRITE)) {
9778 9778 mutex_exit(&svntr_hashtab[hash].tr_lock);
9779 9779 svd->tr_state = SEGVN_TR_OFF;
9780 9780 SEGVN_TR_ADDSTAT(wrcnt);
9781 9781 return;
9782 9782 }
9783 9783 svntrp = svntr_hashtab[hash].tr_head;
9784 9784 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9785 9785 ASSERT(svntrp->tr_refcnt != 0);
9786 9786 if (svntrp->tr_vp != vp) {
9787 9787 continue;
9788 9788 }
9789 9789
9790 9790 /*
9791 9791 * Bail out if the file or its attributes were changed after
9792 9792 * this replication entry was created since we need to use the
9793 9793 * latest file contents. Note that mtime test alone is not
9794 9794 * sufficient because a user can explicitly change mtime via
9795 9795 * utimes(2) interfaces back to the old value after modifiying
9796 9796 * the file contents. To detect this case we also have to test
9797 9797 * ctime which among other things records the time of the last
9798 9798 * mtime change by utimes(2). ctime is not changed when the file
9799 9799 * is only read or executed so we expect that typically existing
9800 9800 * replication amp's can be used most of the time.
9801 9801 */
9802 9802 if (!svntrp->tr_valid ||
9803 9803 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec ||
9804 9804 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec ||
9805 9805 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec ||
9806 9806 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) {
9807 9807 mutex_exit(&svntr_hashtab[hash].tr_lock);
9808 9808 svd->tr_state = SEGVN_TR_OFF;
9809 9809 SEGVN_TR_ADDSTAT(stale);
9810 9810 return;
9811 9811 }
9812 9812 /*
9813 9813 * if off, eoff and szc match current segment we found the
9814 9814 * existing entry we can use.
9815 9815 */
9816 9816 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff &&
9817 9817 svntrp->tr_szc == szc) {
9818 9818 break;
9819 9819 }
9820 9820 /*
9821 9821 * Don't create different but overlapping in file offsets
9822 9822 * entries to avoid replication of the same file pages more
9823 9823 * than once per lgroup.
9824 9824 */
9825 9825 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) ||
9826 9826 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) {
9827 9827 mutex_exit(&svntr_hashtab[hash].tr_lock);
9828 9828 svd->tr_state = SEGVN_TR_OFF;
9829 9829 SEGVN_TR_ADDSTAT(overlap);
9830 9830 return;
9831 9831 }
9832 9832 }
9833 9833 /*
9834 9834 * If we didn't find existing entry create a new one.
9835 9835 */
9836 9836 if (svntrp == NULL) {
9837 9837 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP);
9838 9838 if (svntrp == NULL) {
9839 9839 mutex_exit(&svntr_hashtab[hash].tr_lock);
9840 9840 svd->tr_state = SEGVN_TR_OFF;
9841 9841 SEGVN_TR_ADDSTAT(nokmem);
9842 9842 return;
9843 9843 }
9844 9844 #ifdef DEBUG
9845 9845 {
9846 9846 lgrp_id_t i;
9847 9847 for (i = 0; i < NLGRPS_MAX; i++) {
9848 9848 ASSERT(svntrp->tr_amp[i] == NULL);
9849 9849 }
9850 9850 }
9851 9851 #endif /* DEBUG */
9852 9852 svntrp->tr_vp = vp;
9853 9853 svntrp->tr_off = off;
9854 9854 svntrp->tr_eoff = eoff;
9855 9855 svntrp->tr_szc = szc;
9856 9856 svntrp->tr_valid = 1;
9857 9857 svntrp->tr_mtime = va.va_mtime;
9858 9858 svntrp->tr_ctime = va.va_ctime;
9859 9859 svntrp->tr_refcnt = 0;
9860 9860 svntrp->tr_next = svntr_hashtab[hash].tr_head;
9861 9861 svntr_hashtab[hash].tr_head = svntrp;
9862 9862 }
9863 9863 first = 1;
9864 9864 again:
9865 9865 /*
9866 9866 * We want to pick a replica with pages on main thread's (t_tid = 1,
9867 9867 * aka T1) lgrp. Currently text replication is only optimized for
9868 9868 * workloads that either have all threads of a process on the same
9869 9869 * lgrp or execute their large text primarily on main thread.
9870 9870 */
9871 9871 lgrp_id = p->p_t1_lgrpid;
9872 9872 if (lgrp_id == LGRP_NONE) {
9873 9873 /*
9874 9874 * In case exec() prefaults text on non main thread use
9875 9875 * current thread lgrpid. It will become main thread anyway
9876 9876 * soon.
9877 9877 */
9878 9878 lgrp_id = lgrp_home_id(curthread);
9879 9879 }
9880 9880 /*
9881 9881 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9882 9882 * just set it to NLGRPS_MAX if it's different from current process T1
9883 9883 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9884 9884 * replication and T1 new home is different from lgrp used for text
9885 9885 * replication. When this happens asyncronous segvn thread rechecks if
9886 9886 * segments should change lgrps used for text replication. If we fail
9887 9887 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9888 9888 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9889 9889 * we want to use. We don't need to use cas in this case because
9890 9890 * another thread that races in between our non atomic check and set
9891 9891 * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9892 9892 */
9893 9893 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9894 9894 olid = p->p_tr_lgrpid;
9895 9895 if (lgrp_id != olid && olid != NLGRPS_MAX) {
9896 9896 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX;
9897 9897 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) !=
9898 9898 olid) {
9899 9899 olid = p->p_tr_lgrpid;
9900 9900 ASSERT(olid != LGRP_NONE);
9901 9901 if (olid != lgrp_id && olid != NLGRPS_MAX) {
9902 9902 p->p_tr_lgrpid = NLGRPS_MAX;
9903 9903 }
9904 9904 }
9905 9905 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
9906 9906 membar_producer();
9907 9907 /*
9908 9908 * lgrp_move_thread() won't schedule async recheck after
9909 9909 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9910 9910 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9911 9911 * is not LGRP_NONE.
9912 9912 */
9913 9913 if (first && p->p_t1_lgrpid != LGRP_NONE &&
9914 9914 p->p_t1_lgrpid != lgrp_id) {
9915 9915 first = 0;
9916 9916 goto again;
9917 9917 }
9918 9918 }
9919 9919 /*
9920 9920 * If no amp was created yet for lgrp_id create a new one as long as
9921 9921 * we have enough memory to afford it.
9922 9922 */
9923 9923 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) {
9924 9924 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
9925 9925 if (trmem > segvn_textrepl_max_bytes) {
9926 9926 SEGVN_TR_ADDSTAT(normem);
9927 9927 goto fail;
9928 9928 }
9929 9929 if (anon_try_resv_zone(size, NULL) == 0) {
9930 9930 SEGVN_TR_ADDSTAT(noanon);
9931 9931 goto fail;
9932 9932 }
9933 9933 amp = anonmap_alloc(size, size, ANON_NOSLEEP);
9934 9934 if (amp == NULL) {
9935 9935 anon_unresv_zone(size, NULL);
9936 9936 SEGVN_TR_ADDSTAT(nokmem);
9937 9937 goto fail;
9938 9938 }
9939 9939 ASSERT(amp->refcnt == 1);
9940 9940 amp->a_szc = szc;
9941 9941 svntrp->tr_amp[lgrp_id] = amp;
9942 9942 SEGVN_TR_ADDSTAT(newamp);
9943 9943 }
9944 9944 svntrp->tr_refcnt++;
9945 9945 ASSERT(svd->svn_trnext == NULL);
9946 9946 ASSERT(svd->svn_trprev == NULL);
9947 9947 svd->svn_trnext = svntrp->tr_svnhead;
9948 9948 svd->svn_trprev = NULL;
9949 9949 if (svntrp->tr_svnhead != NULL) {
9950 9950 svntrp->tr_svnhead->svn_trprev = svd;
9951 9951 }
9952 9952 svntrp->tr_svnhead = svd;
9953 9953 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size);
9954 9954 ASSERT(amp->refcnt >= 1);
9955 9955 svd->amp = amp;
9956 9956 svd->anon_index = 0;
9957 9957 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG;
9958 9958 svd->tr_policy_info.mem_lgrpid = lgrp_id;
9959 9959 svd->tr_state = SEGVN_TR_ON;
9960 9960 mutex_exit(&svntr_hashtab[hash].tr_lock);
9961 9961 SEGVN_TR_ADDSTAT(repl);
9962 9962 return;
9963 9963 fail:
9964 9964 ASSERT(segvn_textrepl_bytes >= size);
9965 9965 atomic_add_long(&segvn_textrepl_bytes, -size);
9966 9966 ASSERT(svntrp != NULL);
9967 9967 ASSERT(svntrp->tr_amp[lgrp_id] == NULL);
9968 9968 if (svntrp->tr_refcnt == 0) {
9969 9969 ASSERT(svntrp == svntr_hashtab[hash].tr_head);
9970 9970 svntr_hashtab[hash].tr_head = svntrp->tr_next;
9971 9971 mutex_exit(&svntr_hashtab[hash].tr_lock);
9972 9972 kmem_cache_free(svntr_cache, svntrp);
9973 9973 } else {
9974 9974 mutex_exit(&svntr_hashtab[hash].tr_lock);
9975 9975 }
9976 9976 svd->tr_state = SEGVN_TR_OFF;
9977 9977 }
9978 9978
9979 9979 /*
9980 9980 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9981 9981 * replication amp. This routine is most typically called when segment is
9982 9982 * unmapped but can also be called when segment no longer qualifies for text
9983 9983 * replication (e.g. due to protection changes). If unload_unmap is set use
9984 9984 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9985 9985 * svntr free all its anon maps and remove it from the hash table.
9986 9986 */
9987 9987 static void
9988 9988 segvn_textunrepl(struct seg *seg, int unload_unmap)
9989 9989 {
9990 9990 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9991 9991 vnode_t *vp = svd->vp;
9992 9992 u_offset_t off = svd->offset;
9993 9993 size_t size = seg->s_size;
9994 9994 u_offset_t eoff = off + size;
9995 9995 uint_t szc = seg->s_szc;
9996 9996 ulong_t hash = SVNTR_HASH_FUNC(vp);
9997 9997 svntr_t *svntrp;
9998 9998 svntr_t **prv_svntrp;
9999 9999 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid;
10000 10000 lgrp_id_t i;
10001 10001
10002 10002 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
10003 10003 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
10004 10004 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
10005 10005 ASSERT(svd->tr_state == SEGVN_TR_ON);
10006 10006 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10007 10007 ASSERT(svd->amp != NULL);
10008 10008 ASSERT(svd->amp->refcnt >= 1);
10009 10009 ASSERT(svd->anon_index == 0);
10010 10010 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
10011 10011 ASSERT(svntr_hashtab != NULL);
10012 10012
10013 10013 mutex_enter(&svntr_hashtab[hash].tr_lock);
10014 10014 prv_svntrp = &svntr_hashtab[hash].tr_head;
10015 10015 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) {
10016 10016 ASSERT(svntrp->tr_refcnt != 0);
10017 10017 if (svntrp->tr_vp == vp && svntrp->tr_off == off &&
10018 10018 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) {
10019 10019 break;
10020 10020 }
10021 10021 }
10022 10022 if (svntrp == NULL) {
10023 10023 panic("segvn_textunrepl: svntr record not found");
10024 10024 }
10025 10025 if (svntrp->tr_amp[lgrp_id] != svd->amp) {
10026 10026 panic("segvn_textunrepl: amp mismatch");
10027 10027 }
10028 10028 svd->tr_state = SEGVN_TR_OFF;
10029 10029 svd->amp = NULL;
10030 10030 if (svd->svn_trprev == NULL) {
10031 10031 ASSERT(svntrp->tr_svnhead == svd);
10032 10032 svntrp->tr_svnhead = svd->svn_trnext;
10033 10033 if (svntrp->tr_svnhead != NULL) {
10034 10034 svntrp->tr_svnhead->svn_trprev = NULL;
10035 10035 }
10036 10036 svd->svn_trnext = NULL;
10037 10037 } else {
10038 10038 svd->svn_trprev->svn_trnext = svd->svn_trnext;
10039 10039 if (svd->svn_trnext != NULL) {
10040 10040 svd->svn_trnext->svn_trprev = svd->svn_trprev;
10041 10041 svd->svn_trnext = NULL;
10042 10042 }
10043 10043 svd->svn_trprev = NULL;
10044 10044 }
10045 10045 if (--svntrp->tr_refcnt) {
10046 10046 mutex_exit(&svntr_hashtab[hash].tr_lock);
10047 10047 goto done;
10048 10048 }
10049 10049 *prv_svntrp = svntrp->tr_next;
10050 10050 mutex_exit(&svntr_hashtab[hash].tr_lock);
10051 10051 for (i = 0; i < NLGRPS_MAX; i++) {
10052 10052 struct anon_map *amp = svntrp->tr_amp[i];
10053 10053 if (amp == NULL) {
10054 10054 continue;
10055 10055 }
10056 10056 ASSERT(amp->refcnt == 1);
10057 10057 ASSERT(amp->swresv == size);
10058 10058 ASSERT(amp->size == size);
10059 10059 ASSERT(amp->a_szc == szc);
10060 10060 if (amp->a_szc != 0) {
10061 10061 anon_free_pages(amp->ahp, 0, size, szc);
10062 10062 } else {
10063 10063 anon_free(amp->ahp, 0, size);
10064 10064 }
10065 10065 svntrp->tr_amp[i] = NULL;
10066 10066 ASSERT(segvn_textrepl_bytes >= size);
10067 10067 atomic_add_long(&segvn_textrepl_bytes, -size);
10068 10068 anon_unresv_zone(amp->swresv, NULL);
10069 10069 amp->refcnt = 0;
10070 10070 anonmap_free(amp);
10071 10071 }
10072 10072 kmem_cache_free(svntr_cache, svntrp);
10073 10073 done:
10074 10074 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size,
10075 10075 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL);
10076 10076 }
10077 10077
10078 10078 /*
10079 10079 * This is called when a MAP_SHARED writable mapping is created to a vnode
10080 10080 * that is currently used for execution (VVMEXEC flag is set). In this case we
10081 10081 * need to prevent further use of existing replicas.
10082 10082 */
10083 10083 static void
10084 10084 segvn_inval_trcache(vnode_t *vp)
10085 10085 {
10086 10086 ulong_t hash = SVNTR_HASH_FUNC(vp);
10087 10087 svntr_t *svntrp;
10088 10088
10089 10089 ASSERT(vp->v_flag & VVMEXEC);
10090 10090
10091 10091 if (svntr_hashtab == NULL) {
10092 10092 return;
10093 10093 }
10094 10094
10095 10095 mutex_enter(&svntr_hashtab[hash].tr_lock);
10096 10096 svntrp = svntr_hashtab[hash].tr_head;
10097 10097 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
10098 10098 ASSERT(svntrp->tr_refcnt != 0);
10099 10099 if (svntrp->tr_vp == vp && svntrp->tr_valid) {
10100 10100 svntrp->tr_valid = 0;
10101 10101 }
10102 10102 }
10103 10103 mutex_exit(&svntr_hashtab[hash].tr_lock);
10104 10104 }
10105 10105
10106 10106 static void
10107 10107 segvn_trasync_thread(void)
10108 10108 {
10109 10109 callb_cpr_t cpr_info;
10110 10110 kmutex_t cpr_lock; /* just for CPR stuff */
10111 10111
10112 10112 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
10113 10113
10114 10114 CALLB_CPR_INIT(&cpr_info, &cpr_lock,
10115 10115 callb_generic_cpr, "segvn_async");
10116 10116
10117 10117 if (segvn_update_textrepl_interval == 0) {
10118 10118 segvn_update_textrepl_interval = segvn_update_tr_time * hz;
10119 10119 } else {
10120 10120 segvn_update_textrepl_interval *= hz;
10121 10121 }
10122 10122 (void) timeout(segvn_trupdate_wakeup, NULL,
10123 10123 segvn_update_textrepl_interval);
10124 10124
10125 10125 for (;;) {
10126 10126 mutex_enter(&cpr_lock);
10127 10127 CALLB_CPR_SAFE_BEGIN(&cpr_info);
10128 10128 mutex_exit(&cpr_lock);
10129 10129 sema_p(&segvn_trasync_sem);
10130 10130 mutex_enter(&cpr_lock);
10131 10131 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
10132 10132 mutex_exit(&cpr_lock);
10133 10133 segvn_trupdate();
10134 10134 }
10135 10135 }
10136 10136
10137 10137 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0;
10138 10138
10139 10139 static void
10140 10140 segvn_trupdate_wakeup(void *dummy)
10141 10141 {
10142 10142 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations();
10143 10143
10144 10144 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) {
10145 10145 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs;
10146 10146 sema_v(&segvn_trasync_sem);
10147 10147 }
10148 10148
10149 10149 if (!segvn_disable_textrepl_update &&
10150 10150 segvn_update_textrepl_interval != 0) {
10151 10151 (void) timeout(segvn_trupdate_wakeup, dummy,
10152 10152 segvn_update_textrepl_interval);
10153 10153 }
10154 10154 }
10155 10155
10156 10156 static void
10157 10157 segvn_trupdate(void)
10158 10158 {
10159 10159 ulong_t hash;
10160 10160 svntr_t *svntrp;
10161 10161 segvn_data_t *svd;
10162 10162
10163 10163 ASSERT(svntr_hashtab != NULL);
10164 10164
10165 10165 for (hash = 0; hash < svntr_hashtab_sz; hash++) {
10166 10166 mutex_enter(&svntr_hashtab[hash].tr_lock);
10167 10167 svntrp = svntr_hashtab[hash].tr_head;
10168 10168 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
10169 10169 ASSERT(svntrp->tr_refcnt != 0);
10170 10170 svd = svntrp->tr_svnhead;
10171 10171 for (; svd != NULL; svd = svd->svn_trnext) {
10172 10172 segvn_trupdate_seg(svd->seg, svd, svntrp,
10173 10173 hash);
10174 10174 }
10175 10175 }
10176 10176 mutex_exit(&svntr_hashtab[hash].tr_lock);
10177 10177 }
10178 10178 }
10179 10179
10180 10180 static void
10181 10181 segvn_trupdate_seg(struct seg *seg,
10182 10182 segvn_data_t *svd,
10183 10183 svntr_t *svntrp,
10184 10184 ulong_t hash)
10185 10185 {
10186 10186 proc_t *p;
10187 10187 lgrp_id_t lgrp_id;
10188 10188 struct as *as;
10189 10189 size_t size;
10190 10190 struct anon_map *amp;
10191 10191
10192 10192 ASSERT(svd->vp != NULL);
10193 10193 ASSERT(svd->vp == svntrp->tr_vp);
10194 10194 ASSERT(svd->offset == svntrp->tr_off);
10195 10195 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff);
10196 10196 ASSERT(seg != NULL);
10197 10197 ASSERT(svd->seg == seg);
10198 10198 ASSERT(seg->s_data == (void *)svd);
10199 10199 ASSERT(seg->s_szc == svntrp->tr_szc);
10200 10200 ASSERT(svd->tr_state == SEGVN_TR_ON);
10201 10201 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10202 10202 ASSERT(svd->amp != NULL);
10203 10203 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10204 10204 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE);
10205 10205 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX);
10206 10206 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp);
10207 10207 ASSERT(svntrp->tr_refcnt != 0);
10208 10208 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock));
10209 10209
10210 10210 as = seg->s_as;
10211 10211 ASSERT(as != NULL && as != &kas);
10212 10212 p = as->a_proc;
10213 10213 ASSERT(p != NULL);
10214 10214 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
10215 10215 lgrp_id = p->p_t1_lgrpid;
10216 10216 if (lgrp_id == LGRP_NONE) {
10217 10217 return;
10218 10218 }
10219 10219 ASSERT(lgrp_id < NLGRPS_MAX);
10220 10220 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10221 10221 return;
10222 10222 }
10223 10223
10224 10224 /*
10225 10225 * Use tryenter locking since we are locking as/seg and svntr hash
10226 10226 * lock in reverse from syncrounous thread order.
10227 10227 */
10228 10228 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) {
10229 10229 SEGVN_TR_ADDSTAT(nolock);
10230 10230 if (segvn_lgrp_trthr_migrs_snpsht) {
10231 10231 segvn_lgrp_trthr_migrs_snpsht = 0;
10232 10232 }
10233 10233 return;
10234 10234 }
10235 10235 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10236 10236 AS_LOCK_EXIT(as, &as->a_lock);
10237 10237 SEGVN_TR_ADDSTAT(nolock);
10238 10238 if (segvn_lgrp_trthr_migrs_snpsht) {
10239 10239 segvn_lgrp_trthr_migrs_snpsht = 0;
10240 10240 }
10241 10241 return;
10242 10242 }
10243 10243 size = seg->s_size;
10244 10244 if (svntrp->tr_amp[lgrp_id] == NULL) {
10245 10245 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
10246 10246 if (trmem > segvn_textrepl_max_bytes) {
10247 10247 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10248 10248 AS_LOCK_EXIT(as, &as->a_lock);
10249 10249 atomic_add_long(&segvn_textrepl_bytes, -size);
10250 10250 SEGVN_TR_ADDSTAT(normem);
10251 10251 return;
10252 10252 }
10253 10253 if (anon_try_resv_zone(size, NULL) == 0) {
10254 10254 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10255 10255 AS_LOCK_EXIT(as, &as->a_lock);
10256 10256 atomic_add_long(&segvn_textrepl_bytes, -size);
10257 10257 SEGVN_TR_ADDSTAT(noanon);
10258 10258 return;
10259 10259 }
10260 10260 amp = anonmap_alloc(size, size, KM_NOSLEEP);
10261 10261 if (amp == NULL) {
10262 10262 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10263 10263 AS_LOCK_EXIT(as, &as->a_lock);
10264 10264 atomic_add_long(&segvn_textrepl_bytes, -size);
10265 10265 anon_unresv_zone(size, NULL);
10266 10266 SEGVN_TR_ADDSTAT(nokmem);
10267 10267 return;
10268 10268 }
10269 10269 ASSERT(amp->refcnt == 1);
10270 10270 amp->a_szc = seg->s_szc;
10271 10271 svntrp->tr_amp[lgrp_id] = amp;
10272 10272 }
10273 10273 /*
10274 10274 * We don't need to drop the bucket lock but here we give other
10275 10275 * threads a chance. svntr and svd can't be unlinked as long as
10276 10276 * segment lock is held as a writer and AS held as well. After we
10277 10277 * retake bucket lock we'll continue from where we left. We'll be able
10278 10278 * to reach the end of either list since new entries are always added
10279 10279 * to the beginning of the lists.
10280 10280 */
10281 10281 mutex_exit(&svntr_hashtab[hash].tr_lock);
10282 10282 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL);
10283 10283 mutex_enter(&svntr_hashtab[hash].tr_lock);
10284 10284
10285 10285 ASSERT(svd->tr_state == SEGVN_TR_ON);
10286 10286 ASSERT(svd->amp != NULL);
10287 10287 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10288 10288 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10289 10289 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10290 10290
10291 10291 svd->tr_policy_info.mem_lgrpid = lgrp_id;
10292 10292 svd->amp = svntrp->tr_amp[lgrp_id];
10293 10293 p->p_tr_lgrpid = NLGRPS_MAX;
10294 10294 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10295 10295 AS_LOCK_EXIT(as, &as->a_lock);
10296 10296
10297 10297 ASSERT(svntrp->tr_refcnt != 0);
10298 10298 ASSERT(svd->vp == svntrp->tr_vp);
10299 10299 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10300 10300 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10301 10301 ASSERT(svd->seg == seg);
10302 10302 ASSERT(svd->tr_state == SEGVN_TR_ON);
10303 10303
10304 10304 SEGVN_TR_ADDSTAT(asyncrepl);
10305 10305 }
↓ open down ↓ |
10156 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX