Print this page
PVN_GETPAGE_{SZ,NUM} are misnamed and unnecessarily complicated
There is really no reason to not allow 8 pages all the time. With the
current logic, we get the following:
Assuming 4kB pages (x86):
_SZ = ptob(8) /* 32kB */
_NUM = 8
Assuming 8kB pages (sparc):
_SZ = ptob(8) /* 64kB */
_NUM = 8
We'd have to deal with 16kB base pages in order for the _NUM #define to not
be 8 (it'd be 4 in that case). So, in the spirit of simplicity, let's just
always grab 8 pages as there are no interesting systems with 16kB+ base pages.
Finally, the defines are poorly named.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_vn.c
+++ new/usr/src/uts/common/vm/seg_vn.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2015, Joyent, Inc. All rights reserved.
24 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 32 * The Regents of the University of California
33 33 * All Rights Reserved
34 34 *
35 35 * University Acknowledgment- Portions of this document are derived from
36 36 * software developed by the University of California, Berkeley, and its
37 37 * contributors.
38 38 */
39 39
40 40 /*
41 41 * VM - shared or copy-on-write from a vnode/anonymous memory.
42 42 */
43 43
44 44 #include <sys/types.h>
45 45 #include <sys/param.h>
46 46 #include <sys/t_lock.h>
47 47 #include <sys/errno.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/mman.h>
50 50 #include <sys/debug.h>
51 51 #include <sys/cred.h>
52 52 #include <sys/vmsystm.h>
53 53 #include <sys/tuneable.h>
54 54 #include <sys/bitmap.h>
55 55 #include <sys/swap.h>
56 56 #include <sys/kmem.h>
57 57 #include <sys/sysmacros.h>
58 58 #include <sys/vtrace.h>
59 59 #include <sys/cmn_err.h>
60 60 #include <sys/callb.h>
61 61 #include <sys/vm.h>
62 62 #include <sys/dumphdr.h>
63 63 #include <sys/lgrp.h>
64 64
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_vn.h>
69 69 #include <vm/pvn.h>
70 70 #include <vm/anon.h>
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
71 71 #include <vm/page.h>
72 72 #include <vm/vpage.h>
73 73 #include <sys/proc.h>
74 74 #include <sys/task.h>
75 75 #include <sys/project.h>
76 76 #include <sys/zone.h>
77 77 #include <sys/shm_impl.h>
78 78
79 79 /*
80 80 * segvn_fault needs a temporary page list array. To avoid calling kmem all
81 - * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if
82 - * it can. In the rare case when this page list is not large enough, it
83 - * goes and gets a large enough array from kmem.
84 - *
85 - * This small page list array covers either 8 pages or 64kB worth of pages -
86 - * whichever is smaller.
81 + * the time, it creates a small (FAULT_TMP_PAGES_NUM entry) array and uses
82 + * it if it can. In the rare case when this page list is not large enough,
83 + * it goes and gets a large enough array from kmem.
87 84 */
88 -#define PVN_MAX_GETPAGE_SZ 0x10000
89 -#define PVN_MAX_GETPAGE_NUM 0x8
90 -
91 -#if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE
92 -#define PVN_GETPAGE_SZ ptob(PVN_MAX_GETPAGE_NUM)
93 -#define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM
94 -#else
95 -#define PVN_GETPAGE_SZ PVN_MAX_GETPAGE_SZ
96 -#define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ)
97 -#endif
85 +#define FAULT_TMP_PAGES_NUM 0x8
86 +#define FAULT_TMP_PAGES_SZ ptob(FAULT_TMP_PAGES_NUM)
98 87
99 88 /*
100 89 * Private seg op routines.
101 90 */
102 91 static int segvn_dup(struct seg *seg, struct seg *newseg);
103 92 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len);
104 93 static void segvn_free(struct seg *seg);
105 94 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg,
106 95 caddr_t addr, size_t len, enum fault_type type,
107 96 enum seg_rw rw);
108 97 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr);
109 98 static int segvn_setprot(struct seg *seg, caddr_t addr,
110 99 size_t len, uint_t prot);
111 100 static int segvn_checkprot(struct seg *seg, caddr_t addr,
112 101 size_t len, uint_t prot);
113 102 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
114 103 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len,
115 104 int attr, uint_t flags);
116 105 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len,
117 106 char *vec);
118 107 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
119 108 int attr, int op, ulong_t *lockmap, size_t pos);
120 109 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len,
121 110 uint_t *protv);
122 111 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr);
123 112 static int segvn_gettype(struct seg *seg, caddr_t addr);
124 113 static int segvn_getvp(struct seg *seg, caddr_t addr,
125 114 struct vnode **vpp);
126 115 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len,
127 116 uint_t behav);
128 117 static void segvn_dump(struct seg *seg);
129 118 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len,
130 119 struct page ***ppp, enum lock_type type, enum seg_rw rw);
131 120 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len,
132 121 uint_t szc);
133 122 static int segvn_getmemid(struct seg *seg, caddr_t addr,
134 123 memid_t *memidp);
135 124 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t);
136 125 static int segvn_inherit(struct seg *, caddr_t, size_t, uint_t);
137 126
138 127 const struct seg_ops segvn_ops = {
139 128 .dup = segvn_dup,
140 129 .unmap = segvn_unmap,
141 130 .free = segvn_free,
142 131 .fault = segvn_fault,
143 132 .faulta = segvn_faulta,
144 133 .setprot = segvn_setprot,
145 134 .checkprot = segvn_checkprot,
146 135 .kluster = segvn_kluster,
147 136 .sync = segvn_sync,
148 137 .incore = segvn_incore,
149 138 .lockop = segvn_lockop,
150 139 .getprot = segvn_getprot,
151 140 .getoffset = segvn_getoffset,
152 141 .gettype = segvn_gettype,
153 142 .getvp = segvn_getvp,
154 143 .advise = segvn_advise,
155 144 .dump = segvn_dump,
156 145 .pagelock = segvn_pagelock,
157 146 .setpagesize = segvn_setpagesize,
158 147 .getmemid = segvn_getmemid,
159 148 .getpolicy = segvn_getpolicy,
160 149 .inherit = segvn_inherit,
161 150 };
162 151
163 152 /*
164 153 * Common zfod structures, provided as a shorthand for others to use.
165 154 */
166 155 static segvn_crargs_t zfod_segvn_crargs =
167 156 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
168 157 static segvn_crargs_t kzfod_segvn_crargs =
169 158 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER,
170 159 PROT_ALL & ~PROT_USER);
171 160 static segvn_crargs_t stack_noexec_crargs =
172 161 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL);
173 162
174 163 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */
175 164 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */
176 165 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */
177 166 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */
178 167
179 168 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
180 169
181 170 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */
182 171
183 172 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */
184 173 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */
185 174 uint_t segvn_pglock_comb_bshift;
186 175 size_t segvn_pglock_comb_palign;
187 176
188 177 static int segvn_concat(struct seg *, struct seg *, int);
189 178 static int segvn_extend_prev(struct seg *, struct seg *,
190 179 struct segvn_crargs *, size_t);
191 180 static int segvn_extend_next(struct seg *, struct seg *,
192 181 struct segvn_crargs *, size_t);
193 182 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw);
194 183 static void segvn_pagelist_rele(page_t **);
195 184 static void segvn_setvnode_mpss(vnode_t *);
196 185 static void segvn_relocate_pages(page_t **, page_t *);
197 186 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *);
198 187 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t,
199 188 uint_t, page_t **, page_t **, uint_t *, int *);
200 189 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t,
201 190 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
202 191 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t,
203 192 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
204 193 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t,
205 194 u_offset_t, struct vpage *, page_t **, uint_t,
206 195 enum fault_type, enum seg_rw, int);
207 196 static void segvn_vpage(struct seg *);
208 197 static size_t segvn_count_swap_by_vpages(struct seg *);
209 198
210 199 static void segvn_purge(struct seg *seg);
211 200 static int segvn_reclaim(void *, caddr_t, size_t, struct page **,
212 201 enum seg_rw, int);
213 202 static int shamp_reclaim(void *, caddr_t, size_t, struct page **,
214 203 enum seg_rw, int);
215 204
216 205 static int sameprot(struct seg *, caddr_t, size_t);
217 206
218 207 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t);
219 208 static int segvn_clrszc(struct seg *);
220 209 static struct seg *segvn_split_seg(struct seg *, caddr_t);
221 210 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t,
222 211 ulong_t, uint_t);
223 212
224 213 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t,
225 214 size_t, void *, u_offset_t);
226 215
227 216 static struct kmem_cache *segvn_cache;
228 217 static struct kmem_cache **segvn_szc_cache;
229 218
230 219 #ifdef VM_STATS
231 220 static struct segvnvmstats_str {
232 221 ulong_t fill_vp_pages[31];
233 222 ulong_t fltvnpages[49];
234 223 ulong_t fullszcpages[10];
235 224 ulong_t relocatepages[3];
236 225 ulong_t fltanpages[17];
237 226 ulong_t pagelock[2];
238 227 ulong_t demoterange[3];
239 228 } segvnvmstats;
240 229 #endif /* VM_STATS */
241 230
242 231 #define SDR_RANGE 1 /* demote entire range */
243 232 #define SDR_END 2 /* demote non aligned ends only */
244 233
245 234 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
246 235 if ((len) != 0) { \
247 236 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
248 237 ASSERT(lpgaddr >= (seg)->s_base); \
249 238 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
250 239 (len)), pgsz); \
251 240 ASSERT(lpgeaddr > lpgaddr); \
252 241 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
253 242 } else { \
254 243 lpgeaddr = lpgaddr = (addr); \
255 244 } \
256 245 }
257 246
258 247 /*ARGSUSED*/
259 248 static int
260 249 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags)
261 250 {
262 251 struct segvn_data *svd = buf;
263 252
264 253 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
265 254 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
266 255 svd->svn_trnext = svd->svn_trprev = NULL;
267 256 return (0);
268 257 }
269 258
270 259 /*ARGSUSED1*/
271 260 static void
272 261 segvn_cache_destructor(void *buf, void *cdrarg)
273 262 {
274 263 struct segvn_data *svd = buf;
275 264
276 265 rw_destroy(&svd->lock);
277 266 mutex_destroy(&svd->segfree_syncmtx);
278 267 }
279 268
280 269 /*ARGSUSED*/
281 270 static int
282 271 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags)
283 272 {
284 273 bzero(buf, sizeof (svntr_t));
285 274 return (0);
286 275 }
287 276
288 277 /*
289 278 * Patching this variable to non-zero allows the system to run with
290 279 * stacks marked as "not executable". It's a bit of a kludge, but is
291 280 * provided as a tweakable for platforms that export those ABIs
292 281 * (e.g. sparc V8) that have executable stacks enabled by default.
293 282 * There are also some restrictions for platforms that don't actually
294 283 * implement 'noexec' protections.
295 284 *
296 285 * Once enabled, the system is (therefore) unable to provide a fully
297 286 * ABI-compliant execution environment, though practically speaking,
298 287 * most everything works. The exceptions are generally some interpreters
299 288 * and debuggers that create executable code on the stack and jump
300 289 * into it (without explicitly mprotecting the address range to include
301 290 * PROT_EXEC).
302 291 *
303 292 * One important class of applications that are disabled are those
304 293 * that have been transformed into malicious agents using one of the
305 294 * numerous "buffer overflow" attacks. See 4007890.
306 295 */
307 296 int noexec_user_stack = 0;
308 297 int noexec_user_stack_log = 1;
309 298
310 299 int segvn_lpg_disable = 0;
311 300 uint_t segvn_maxpgszc = 0;
312 301
313 302 ulong_t segvn_vmpss_clrszc_cnt;
314 303 ulong_t segvn_vmpss_clrszc_err;
315 304 ulong_t segvn_fltvnpages_clrszc_cnt;
316 305 ulong_t segvn_fltvnpages_clrszc_err;
317 306 ulong_t segvn_setpgsz_align_err;
318 307 ulong_t segvn_setpgsz_anon_align_err;
319 308 ulong_t segvn_setpgsz_getattr_err;
320 309 ulong_t segvn_setpgsz_eof_err;
321 310 ulong_t segvn_faultvnmpss_align_err1;
322 311 ulong_t segvn_faultvnmpss_align_err2;
323 312 ulong_t segvn_faultvnmpss_align_err3;
324 313 ulong_t segvn_faultvnmpss_align_err4;
325 314 ulong_t segvn_faultvnmpss_align_err5;
326 315 ulong_t segvn_vmpss_pageio_deadlk_err;
327 316
328 317 int segvn_use_regions = 1;
329 318
330 319 /*
331 320 * Segvn supports text replication optimization for NUMA platforms. Text
332 321 * replica's are represented by anon maps (amp). There's one amp per text file
333 322 * region per lgroup. A process chooses the amp for each of its text mappings
334 323 * based on the lgroup assignment of its main thread (t_tid = 1). All
335 324 * processes that want a replica on a particular lgroup for the same text file
336 325 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
337 326 * with vp,off,size,szc used as a key. Text replication segments are read only
338 327 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
339 328 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
340 329 * pages. Replication amp is assigned to a segment when it gets its first
341 330 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
342 331 * rechecks periodically if the process still maps an amp local to the main
343 332 * thread. If not async thread forces process to remap to an amp in the new
344 333 * home lgroup of the main thread. Current text replication implementation
345 334 * only provides the benefit to workloads that do most of their work in the
346 335 * main thread of a process or all the threads of a process run in the same
347 336 * lgroup. To extend text replication benefit to different types of
348 337 * multithreaded workloads further work would be needed in the hat layer to
349 338 * allow the same virtual address in the same hat to simultaneously map
350 339 * different physical addresses (i.e. page table replication would be needed
351 340 * for x86).
352 341 *
353 342 * amp pages are used instead of vnode pages as long as segment has a very
354 343 * simple life cycle. It's created via segvn_create(), handles S_EXEC
355 344 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
356 345 * happens such as protection is changed, real COW fault happens, pagesize is
357 346 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
358 347 * text replication by converting the segment back to vnode only segment
359 348 * (unmap segment's address range and set svd->amp to NULL).
360 349 *
361 350 * The original file can be changed after amp is inserted into
362 351 * svntr_hashtab. Processes that are launched after the file is already
363 352 * changed can't use the replica's created prior to the file change. To
364 353 * implement this functionality hash entries are timestamped. Replica's can
365 354 * only be used if current file modification time is the same as the timestamp
366 355 * saved when hash entry was created. However just timestamps alone are not
367 356 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
368 357 * deal with file changes via MAP_SHARED mappings differently. When writable
369 358 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
370 359 * existing replica's for this vnode as not usable for future text
371 360 * mappings. And we don't create new replica's for files that currently have
372 361 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
373 362 * true).
374 363 */
375 364
376 365 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
377 366 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR;
378 367
379 368 static ulong_t svntr_hashtab_sz = 512;
380 369 static svntr_bucket_t *svntr_hashtab = NULL;
381 370 static struct kmem_cache *svntr_cache;
382 371 static svntr_stats_t *segvn_textrepl_stats;
383 372 static ksema_t segvn_trasync_sem;
384 373
385 374 int segvn_disable_textrepl = 1;
386 375 size_t textrepl_size_thresh = (size_t)-1;
387 376 size_t segvn_textrepl_bytes = 0;
388 377 size_t segvn_textrepl_max_bytes = 0;
389 378 clock_t segvn_update_textrepl_interval = 0;
390 379 int segvn_update_tr_time = 10;
391 380 int segvn_disable_textrepl_update = 0;
392 381
393 382 static void segvn_textrepl(struct seg *);
394 383 static void segvn_textunrepl(struct seg *, int);
395 384 static void segvn_inval_trcache(vnode_t *);
396 385 static void segvn_trasync_thread(void);
397 386 static void segvn_trupdate_wakeup(void *);
398 387 static void segvn_trupdate(void);
399 388 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *,
400 389 ulong_t);
401 390
402 391 /*
403 392 * Initialize segvn data structures
404 393 */
405 394 void
406 395 segvn_init(void)
407 396 {
408 397 uint_t maxszc;
409 398 uint_t szc;
410 399 size_t pgsz;
411 400
412 401 segvn_cache = kmem_cache_create("segvn_cache",
413 402 sizeof (struct segvn_data), 0,
414 403 segvn_cache_constructor, segvn_cache_destructor, NULL,
415 404 NULL, NULL, 0);
416 405
417 406 if (segvn_lpg_disable == 0) {
418 407 szc = maxszc = page_num_pagesizes() - 1;
419 408 if (szc == 0) {
420 409 segvn_lpg_disable = 1;
421 410 }
422 411 if (page_get_pagesize(0) != PAGESIZE) {
423 412 panic("segvn_init: bad szc 0");
424 413 /*NOTREACHED*/
425 414 }
426 415 while (szc != 0) {
427 416 pgsz = page_get_pagesize(szc);
428 417 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) {
429 418 panic("segvn_init: bad szc %d", szc);
430 419 /*NOTREACHED*/
431 420 }
432 421 szc--;
433 422 }
434 423 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc)
435 424 segvn_maxpgszc = maxszc;
436 425 }
437 426
438 427 if (segvn_maxpgszc) {
439 428 segvn_szc_cache = (struct kmem_cache **)kmem_alloc(
440 429 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *),
441 430 KM_SLEEP);
442 431 }
443 432
444 433 for (szc = 1; szc <= segvn_maxpgszc; szc++) {
445 434 char str[32];
446 435
447 436 (void) sprintf(str, "segvn_szc_cache%d", szc);
448 437 segvn_szc_cache[szc] = kmem_cache_create(str,
449 438 page_get_pagecnt(szc) * sizeof (page_t *), 0,
450 439 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
451 440 }
452 441
453 442
454 443 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL))
455 444 segvn_use_regions = 0;
456 445
457 446 /*
458 447 * For now shared regions and text replication segvn support
459 448 * are mutually exclusive. This is acceptable because
460 449 * currently significant benefit from text replication was
461 450 * only observed on AMD64 NUMA platforms (due to relatively
462 451 * small L2$ size) and currently we don't support shared
463 452 * regions on x86.
464 453 */
465 454 if (segvn_use_regions && !segvn_disable_textrepl) {
466 455 segvn_disable_textrepl = 1;
467 456 }
468 457
469 458 #if defined(_LP64)
470 459 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 &&
471 460 !segvn_disable_textrepl) {
472 461 ulong_t i;
473 462 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t);
474 463
475 464 svntr_cache = kmem_cache_create("svntr_cache",
476 465 sizeof (svntr_t), 0, svntr_cache_constructor, NULL,
477 466 NULL, NULL, NULL, 0);
478 467 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP);
479 468 for (i = 0; i < svntr_hashtab_sz; i++) {
480 469 mutex_init(&svntr_hashtab[i].tr_lock, NULL,
481 470 MUTEX_DEFAULT, NULL);
482 471 }
483 472 segvn_textrepl_max_bytes = ptob(physmem) /
484 473 segvn_textrepl_max_bytes_factor;
485 474 segvn_textrepl_stats = kmem_zalloc(NCPU *
486 475 sizeof (svntr_stats_t), KM_SLEEP);
487 476 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL);
488 477 (void) thread_create(NULL, 0, segvn_trasync_thread,
489 478 NULL, 0, &p0, TS_RUN, minclsyspri);
490 479 }
491 480 #endif
492 481
493 482 if (!ISP2(segvn_pglock_comb_balign) ||
494 483 segvn_pglock_comb_balign < PAGESIZE) {
495 484 segvn_pglock_comb_balign = 1UL << 16; /* 64K */
496 485 }
497 486 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1;
498 487 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign);
499 488 }
500 489
501 490 #define SEGVN_PAGEIO ((void *)0x1)
502 491 #define SEGVN_NOPAGEIO ((void *)0x2)
503 492
504 493 static void
505 494 segvn_setvnode_mpss(vnode_t *vp)
506 495 {
507 496 int err;
508 497
509 498 ASSERT(vp->v_mpssdata == NULL ||
510 499 vp->v_mpssdata == SEGVN_PAGEIO ||
511 500 vp->v_mpssdata == SEGVN_NOPAGEIO);
512 501
513 502 if (vp->v_mpssdata == NULL) {
514 503 if (vn_vmpss_usepageio(vp)) {
515 504 err = VOP_PAGEIO(vp, (page_t *)NULL,
516 505 (u_offset_t)0, 0, 0, CRED(), NULL);
517 506 } else {
518 507 err = ENOSYS;
519 508 }
520 509 /*
521 510 * set v_mpssdata just once per vnode life
522 511 * so that it never changes.
523 512 */
524 513 mutex_enter(&vp->v_lock);
525 514 if (vp->v_mpssdata == NULL) {
526 515 if (err == EINVAL) {
527 516 vp->v_mpssdata = SEGVN_PAGEIO;
528 517 } else {
529 518 vp->v_mpssdata = SEGVN_NOPAGEIO;
530 519 }
531 520 }
532 521 mutex_exit(&vp->v_lock);
533 522 }
534 523 }
535 524
536 525 int
537 526 segvn_create(struct seg *seg, void *argsp)
538 527 {
539 528 struct segvn_crargs *a = (struct segvn_crargs *)argsp;
540 529 struct segvn_data *svd;
541 530 size_t swresv = 0;
542 531 struct cred *cred;
543 532 struct anon_map *amp;
544 533 int error = 0;
545 534 size_t pgsz;
546 535 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT;
547 536 int use_rgn = 0;
548 537 int trok = 0;
549 538
550 539 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
551 540
552 541 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) {
553 542 panic("segvn_create type");
554 543 /*NOTREACHED*/
555 544 }
556 545
557 546 /*
558 547 * Check arguments. If a shared anon structure is given then
559 548 * it is illegal to also specify a vp.
560 549 */
561 550 if (a->amp != NULL && a->vp != NULL) {
562 551 panic("segvn_create anon_map");
563 552 /*NOTREACHED*/
564 553 }
565 554
566 555 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) &&
567 556 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) &&
568 557 segvn_use_regions) {
569 558 use_rgn = 1;
570 559 }
571 560
572 561 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
573 562 if (a->type == MAP_SHARED)
574 563 a->flags &= ~MAP_NORESERVE;
575 564
576 565 if (a->szc != 0) {
577 566 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) ||
578 567 (a->amp != NULL && a->type == MAP_PRIVATE) ||
579 568 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) {
580 569 a->szc = 0;
581 570 } else {
582 571 if (a->szc > segvn_maxpgszc)
583 572 a->szc = segvn_maxpgszc;
584 573 pgsz = page_get_pagesize(a->szc);
585 574 if (!IS_P2ALIGNED(seg->s_base, pgsz) ||
586 575 !IS_P2ALIGNED(seg->s_size, pgsz)) {
587 576 a->szc = 0;
588 577 } else if (a->vp != NULL) {
589 578 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) {
590 579 /*
591 580 * paranoid check.
592 581 * hat_page_demote() is not supported
593 582 * on swapfs pages.
594 583 */
595 584 a->szc = 0;
596 585 } else if (map_addr_vacalign_check(seg->s_base,
597 586 a->offset & PAGEMASK)) {
598 587 a->szc = 0;
599 588 }
600 589 } else if (a->amp != NULL) {
601 590 pgcnt_t anum = btopr(a->offset);
602 591 pgcnt_t pgcnt = page_get_pagecnt(a->szc);
603 592 if (!IS_P2ALIGNED(anum, pgcnt)) {
604 593 a->szc = 0;
605 594 }
606 595 }
607 596 }
608 597 }
609 598
610 599 /*
611 600 * If segment may need private pages, reserve them now.
612 601 */
613 602 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) ||
614 603 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) {
615 604 if (anon_resv_zone(seg->s_size,
616 605 seg->s_as->a_proc->p_zone) == 0)
617 606 return (EAGAIN);
618 607 swresv = seg->s_size;
619 608 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
620 609 seg, swresv, 1);
621 610 }
622 611
623 612 /*
624 613 * Reserve any mapping structures that may be required.
625 614 *
626 615 * Don't do it for segments that may use regions. It's currently a
627 616 * noop in the hat implementations anyway.
628 617 */
629 618 if (!use_rgn) {
630 619 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
631 620 }
632 621
633 622 if (a->cred) {
634 623 cred = a->cred;
635 624 crhold(cred);
636 625 } else {
637 626 crhold(cred = CRED());
638 627 }
639 628
640 629 /* Inform the vnode of the new mapping */
641 630 if (a->vp != NULL) {
642 631 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK,
643 632 seg->s_as, seg->s_base, seg->s_size, a->prot,
644 633 a->maxprot, a->type, cred, NULL);
645 634 if (error) {
646 635 if (swresv != 0) {
647 636 anon_unresv_zone(swresv,
648 637 seg->s_as->a_proc->p_zone);
649 638 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
650 639 "anon proc:%p %lu %u", seg, swresv, 0);
651 640 }
652 641 crfree(cred);
653 642 if (!use_rgn) {
654 643 hat_unload(seg->s_as->a_hat, seg->s_base,
655 644 seg->s_size, HAT_UNLOAD_UNMAP);
656 645 }
657 646 return (error);
658 647 }
659 648 /*
660 649 * svntr_hashtab will be NULL if we support shared regions.
661 650 */
662 651 trok = ((a->flags & MAP_TEXT) &&
663 652 (seg->s_size > textrepl_size_thresh ||
664 653 (a->flags & _MAP_TEXTREPL)) &&
665 654 lgrp_optimizations() && svntr_hashtab != NULL &&
666 655 a->type == MAP_PRIVATE && swresv == 0 &&
667 656 !(a->flags & MAP_NORESERVE) &&
668 657 seg->s_as != &kas && a->vp->v_type == VREG);
669 658
670 659 ASSERT(!trok || !use_rgn);
671 660 }
672 661
673 662 /*
674 663 * MAP_NORESERVE mappings don't count towards the VSZ of a process
675 664 * until we fault the pages in.
676 665 */
677 666 if ((a->vp == NULL || a->vp->v_type != VREG) &&
678 667 a->flags & MAP_NORESERVE) {
679 668 seg->s_as->a_resvsize -= seg->s_size;
680 669 }
681 670
682 671 /*
683 672 * If more than one segment in the address space, and they're adjacent
684 673 * virtually, try to concatenate them. Don't concatenate if an
685 674 * explicit anon_map structure was supplied (e.g., SystemV shared
686 675 * memory) or if we'll use text replication for this segment.
687 676 */
688 677 if (a->amp == NULL && !use_rgn && !trok) {
689 678 struct seg *pseg, *nseg;
690 679 struct segvn_data *psvd, *nsvd;
691 680 lgrp_mem_policy_t ppolicy, npolicy;
692 681 uint_t lgrp_mem_policy_flags = 0;
693 682 extern lgrp_mem_policy_t lgrp_mem_default_policy;
694 683
695 684 /*
696 685 * Memory policy flags (lgrp_mem_policy_flags) is valid when
697 686 * extending stack/heap segments.
698 687 */
699 688 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) &&
700 689 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) {
701 690 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags;
702 691 } else {
703 692 /*
704 693 * Get policy when not extending it from another segment
705 694 */
706 695 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type);
707 696 }
708 697
709 698 /*
710 699 * First, try to concatenate the previous and new segments
711 700 */
712 701 pseg = AS_SEGPREV(seg->s_as, seg);
713 702 if (pseg != NULL &&
714 703 pseg->s_base + pseg->s_size == seg->s_base &&
715 704 pseg->s_ops == &segvn_ops) {
716 705 /*
717 706 * Get memory allocation policy from previous segment.
718 707 * When extension is specified (e.g. for heap) apply
719 708 * this policy to the new segment regardless of the
720 709 * outcome of segment concatenation. Extension occurs
721 710 * for non-default policy otherwise default policy is
722 711 * used and is based on extended segment size.
723 712 */
724 713 psvd = (struct segvn_data *)pseg->s_data;
725 714 ppolicy = psvd->policy_info.mem_policy;
726 715 if (lgrp_mem_policy_flags ==
727 716 LGRP_MP_FLAG_EXTEND_UP) {
728 717 if (ppolicy != lgrp_mem_default_policy) {
729 718 mpolicy = ppolicy;
730 719 } else {
731 720 mpolicy = lgrp_mem_policy_default(
732 721 pseg->s_size + seg->s_size,
733 722 a->type);
734 723 }
735 724 }
736 725
737 726 if (mpolicy == ppolicy &&
738 727 (pseg->s_size + seg->s_size <=
739 728 segvn_comb_thrshld || psvd->amp == NULL) &&
740 729 segvn_extend_prev(pseg, seg, a, swresv) == 0) {
741 730 /*
742 731 * success! now try to concatenate
743 732 * with following seg
744 733 */
745 734 crfree(cred);
746 735 nseg = AS_SEGNEXT(pseg->s_as, pseg);
747 736 if (nseg != NULL &&
748 737 nseg != pseg &&
749 738 nseg->s_ops == &segvn_ops &&
750 739 pseg->s_base + pseg->s_size ==
751 740 nseg->s_base)
752 741 (void) segvn_concat(pseg, nseg, 0);
753 742 ASSERT(pseg->s_szc == 0 ||
754 743 (a->szc == pseg->s_szc &&
755 744 IS_P2ALIGNED(pseg->s_base, pgsz) &&
756 745 IS_P2ALIGNED(pseg->s_size, pgsz)));
757 746 return (0);
758 747 }
759 748 }
760 749
761 750 /*
762 751 * Failed, so try to concatenate with following seg
763 752 */
764 753 nseg = AS_SEGNEXT(seg->s_as, seg);
765 754 if (nseg != NULL &&
766 755 seg->s_base + seg->s_size == nseg->s_base &&
767 756 nseg->s_ops == &segvn_ops) {
768 757 /*
769 758 * Get memory allocation policy from next segment.
770 759 * When extension is specified (e.g. for stack) apply
771 760 * this policy to the new segment regardless of the
772 761 * outcome of segment concatenation. Extension occurs
773 762 * for non-default policy otherwise default policy is
774 763 * used and is based on extended segment size.
775 764 */
776 765 nsvd = (struct segvn_data *)nseg->s_data;
777 766 npolicy = nsvd->policy_info.mem_policy;
778 767 if (lgrp_mem_policy_flags ==
779 768 LGRP_MP_FLAG_EXTEND_DOWN) {
780 769 if (npolicy != lgrp_mem_default_policy) {
781 770 mpolicy = npolicy;
782 771 } else {
783 772 mpolicy = lgrp_mem_policy_default(
784 773 nseg->s_size + seg->s_size,
785 774 a->type);
786 775 }
787 776 }
788 777
789 778 if (mpolicy == npolicy &&
790 779 segvn_extend_next(seg, nseg, a, swresv) == 0) {
791 780 crfree(cred);
792 781 ASSERT(nseg->s_szc == 0 ||
793 782 (a->szc == nseg->s_szc &&
794 783 IS_P2ALIGNED(nseg->s_base, pgsz) &&
795 784 IS_P2ALIGNED(nseg->s_size, pgsz)));
796 785 return (0);
797 786 }
798 787 }
799 788 }
800 789
801 790 if (a->vp != NULL) {
802 791 VN_HOLD(a->vp);
803 792 if (a->type == MAP_SHARED)
804 793 lgrp_shm_policy_init(NULL, a->vp);
805 794 }
806 795 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
807 796
808 797 seg->s_ops = &segvn_ops;
809 798 seg->s_data = (void *)svd;
810 799 seg->s_szc = a->szc;
811 800
812 801 svd->seg = seg;
813 802 svd->vp = a->vp;
814 803 /*
815 804 * Anonymous mappings have no backing file so the offset is meaningless.
816 805 */
817 806 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0;
818 807 svd->prot = a->prot;
819 808 svd->maxprot = a->maxprot;
820 809 svd->pageprot = 0;
821 810 svd->type = a->type;
822 811 svd->vpage = NULL;
823 812 svd->cred = cred;
824 813 svd->advice = MADV_NORMAL;
825 814 svd->pageadvice = 0;
826 815 svd->flags = (ushort_t)a->flags;
827 816 svd->softlockcnt = 0;
828 817 svd->softlockcnt_sbase = 0;
829 818 svd->softlockcnt_send = 0;
830 819 svd->svn_inz = 0;
831 820 svd->rcookie = HAT_INVALID_REGION_COOKIE;
832 821 svd->pageswap = 0;
833 822
834 823 if (a->szc != 0 && a->vp != NULL) {
835 824 segvn_setvnode_mpss(a->vp);
836 825 }
837 826 if (svd->type == MAP_SHARED && svd->vp != NULL &&
838 827 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) {
839 828 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
840 829 segvn_inval_trcache(svd->vp);
841 830 }
842 831
843 832 amp = a->amp;
844 833 if ((svd->amp = amp) == NULL) {
845 834 svd->anon_index = 0;
846 835 if (svd->type == MAP_SHARED) {
847 836 svd->swresv = 0;
848 837 /*
849 838 * Shared mappings to a vp need no other setup.
850 839 * If we have a shared mapping to an anon_map object
851 840 * which hasn't been allocated yet, allocate the
852 841 * struct now so that it will be properly shared
853 842 * by remembering the swap reservation there.
854 843 */
855 844 if (a->vp == NULL) {
856 845 svd->amp = anonmap_alloc(seg->s_size, swresv,
857 846 ANON_SLEEP);
858 847 svd->amp->a_szc = seg->s_szc;
859 848 }
860 849 } else {
861 850 /*
862 851 * Private mapping (with or without a vp).
863 852 * Allocate anon_map when needed.
864 853 */
865 854 svd->swresv = swresv;
866 855 }
867 856 } else {
868 857 pgcnt_t anon_num;
869 858
870 859 /*
871 860 * Mapping to an existing anon_map structure without a vp.
872 861 * For now we will insure that the segment size isn't larger
873 862 * than the size - offset gives us. Later on we may wish to
874 863 * have the anon array dynamically allocated itself so that
875 864 * we don't always have to allocate all the anon pointer slots.
876 865 * This of course involves adding extra code to check that we
877 866 * aren't trying to use an anon pointer slot beyond the end
878 867 * of the currently allocated anon array.
879 868 */
880 869 if ((amp->size - a->offset) < seg->s_size) {
881 870 panic("segvn_create anon_map size");
882 871 /*NOTREACHED*/
883 872 }
884 873
885 874 anon_num = btopr(a->offset);
886 875
887 876 if (a->type == MAP_SHARED) {
888 877 /*
889 878 * SHARED mapping to a given anon_map.
890 879 */
891 880 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
892 881 amp->refcnt++;
893 882 if (a->szc > amp->a_szc) {
894 883 amp->a_szc = a->szc;
895 884 }
896 885 ANON_LOCK_EXIT(&->a_rwlock);
897 886 svd->anon_index = anon_num;
898 887 svd->swresv = 0;
899 888 } else {
900 889 /*
901 890 * PRIVATE mapping to a given anon_map.
902 891 * Make sure that all the needed anon
903 892 * structures are created (so that we will
904 893 * share the underlying pages if nothing
905 894 * is written by this mapping) and then
906 895 * duplicate the anon array as is done
907 896 * when a privately mapped segment is dup'ed.
908 897 */
909 898 struct anon *ap;
910 899 caddr_t addr;
911 900 caddr_t eaddr;
912 901 ulong_t anon_idx;
913 902 int hat_flag = HAT_LOAD;
914 903
915 904 if (svd->flags & MAP_TEXT) {
916 905 hat_flag |= HAT_LOAD_TEXT;
917 906 }
918 907
919 908 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
920 909 svd->amp->a_szc = seg->s_szc;
921 910 svd->anon_index = 0;
922 911 svd->swresv = swresv;
923 912
924 913 /*
925 914 * Prevent 2 threads from allocating anon
926 915 * slots simultaneously.
927 916 */
928 917 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
929 918 eaddr = seg->s_base + seg->s_size;
930 919
931 920 for (anon_idx = anon_num, addr = seg->s_base;
932 921 addr < eaddr; addr += PAGESIZE, anon_idx++) {
933 922 page_t *pp;
934 923
935 924 if ((ap = anon_get_ptr(amp->ahp,
936 925 anon_idx)) != NULL)
937 926 continue;
938 927
939 928 /*
940 929 * Allocate the anon struct now.
941 930 * Might as well load up translation
942 931 * to the page while we're at it...
943 932 */
944 933 pp = anon_zero(seg, addr, &ap, cred);
945 934 if (ap == NULL || pp == NULL) {
946 935 panic("segvn_create anon_zero");
947 936 /*NOTREACHED*/
948 937 }
949 938
950 939 /*
951 940 * Re-acquire the anon_map lock and
952 941 * initialize the anon array entry.
953 942 */
954 943 ASSERT(anon_get_ptr(amp->ahp,
955 944 anon_idx) == NULL);
956 945 (void) anon_set_ptr(amp->ahp, anon_idx, ap,
957 946 ANON_SLEEP);
958 947
959 948 ASSERT(seg->s_szc == 0);
960 949 ASSERT(!IS_VMODSORT(pp->p_vnode));
961 950
962 951 ASSERT(use_rgn == 0);
963 952 hat_memload(seg->s_as->a_hat, addr, pp,
964 953 svd->prot & ~PROT_WRITE, hat_flag);
965 954
966 955 page_unlock(pp);
967 956 }
968 957 ASSERT(seg->s_szc == 0);
969 958 anon_dup(amp->ahp, anon_num, svd->amp->ahp,
970 959 0, seg->s_size);
971 960 ANON_LOCK_EXIT(&->a_rwlock);
972 961 }
973 962 }
974 963
975 964 /*
976 965 * Set default memory allocation policy for segment
977 966 *
978 967 * Always set policy for private memory at least for initialization
979 968 * even if this is a shared memory segment
980 969 */
981 970 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size);
982 971
983 972 if (svd->type == MAP_SHARED)
984 973 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index,
985 974 svd->vp, svd->offset, seg->s_size);
986 975
987 976 if (use_rgn) {
988 977 ASSERT(!trok);
989 978 ASSERT(svd->amp == NULL);
990 979 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base,
991 980 seg->s_size, (void *)svd->vp, svd->offset, svd->prot,
992 981 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback,
993 982 HAT_REGION_TEXT);
994 983 }
995 984
996 985 ASSERT(!trok || !(svd->prot & PROT_WRITE));
997 986 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF;
998 987
999 988 return (0);
1000 989 }
1001 990
1002 991 /*
1003 992 * Concatenate two existing segments, if possible.
1004 993 * Return 0 on success, -1 if two segments are not compatible
1005 994 * or -2 on memory allocation failure.
1006 995 * If amp_cat == 1 then try and concat segments with anon maps
1007 996 */
1008 997 static int
1009 998 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat)
1010 999 {
1011 1000 struct segvn_data *svd1 = seg1->s_data;
1012 1001 struct segvn_data *svd2 = seg2->s_data;
1013 1002 struct anon_map *amp1 = svd1->amp;
1014 1003 struct anon_map *amp2 = svd2->amp;
1015 1004 struct vpage *vpage1 = svd1->vpage;
1016 1005 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL;
1017 1006 size_t size, nvpsize;
1018 1007 pgcnt_t npages1, npages2;
1019 1008
1020 1009 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as);
1021 1010 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1022 1011 ASSERT(seg1->s_ops == seg2->s_ops);
1023 1012
1024 1013 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) ||
1025 1014 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1026 1015 return (-1);
1027 1016 }
1028 1017
1029 1018 /* both segments exist, try to merge them */
1030 1019 #define incompat(x) (svd1->x != svd2->x)
1031 1020 if (incompat(vp) || incompat(maxprot) ||
1032 1021 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) ||
1033 1022 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) ||
1034 1023 incompat(type) || incompat(cred) || incompat(flags) ||
1035 1024 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) ||
1036 1025 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0)
1037 1026 return (-1);
1038 1027 #undef incompat
1039 1028
1040 1029 /*
1041 1030 * vp == NULL implies zfod, offset doesn't matter
1042 1031 */
1043 1032 if (svd1->vp != NULL &&
1044 1033 svd1->offset + seg1->s_size != svd2->offset) {
1045 1034 return (-1);
1046 1035 }
1047 1036
1048 1037 /*
1049 1038 * Don't concatenate if either segment uses text replication.
1050 1039 */
1051 1040 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) {
1052 1041 return (-1);
1053 1042 }
1054 1043
1055 1044 /*
1056 1045 * Fail early if we're not supposed to concatenate
1057 1046 * segments with non NULL amp.
1058 1047 */
1059 1048 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) {
1060 1049 return (-1);
1061 1050 }
1062 1051
1063 1052 if (svd1->vp == NULL && svd1->type == MAP_SHARED) {
1064 1053 if (amp1 != amp2) {
1065 1054 return (-1);
1066 1055 }
1067 1056 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) !=
1068 1057 svd2->anon_index) {
1069 1058 return (-1);
1070 1059 }
1071 1060 ASSERT(amp1 == NULL || amp1->refcnt >= 2);
1072 1061 }
1073 1062
1074 1063 /*
1075 1064 * If either seg has vpages, create a new merged vpage array.
1076 1065 */
1077 1066 if (vpage1 != NULL || vpage2 != NULL) {
1078 1067 struct vpage *vp, *evp;
1079 1068
1080 1069 npages1 = seg_pages(seg1);
1081 1070 npages2 = seg_pages(seg2);
1082 1071 nvpsize = vpgtob(npages1 + npages2);
1083 1072
1084 1073 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) {
1085 1074 return (-2);
1086 1075 }
1087 1076
1088 1077 if (vpage1 != NULL) {
1089 1078 bcopy(vpage1, nvpage, vpgtob(npages1));
1090 1079 } else {
1091 1080 evp = nvpage + npages1;
1092 1081 for (vp = nvpage; vp < evp; vp++) {
1093 1082 VPP_SETPROT(vp, svd1->prot);
1094 1083 VPP_SETADVICE(vp, svd1->advice);
1095 1084 }
1096 1085 }
1097 1086
1098 1087 if (vpage2 != NULL) {
1099 1088 bcopy(vpage2, nvpage + npages1, vpgtob(npages2));
1100 1089 } else {
1101 1090 evp = nvpage + npages1 + npages2;
1102 1091 for (vp = nvpage + npages1; vp < evp; vp++) {
1103 1092 VPP_SETPROT(vp, svd2->prot);
1104 1093 VPP_SETADVICE(vp, svd2->advice);
1105 1094 }
1106 1095 }
1107 1096
1108 1097 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) {
1109 1098 ASSERT(svd1->swresv == seg1->s_size);
1110 1099 ASSERT(!(svd1->flags & MAP_NORESERVE));
1111 1100 ASSERT(!(svd2->flags & MAP_NORESERVE));
1112 1101 evp = nvpage + npages1;
1113 1102 for (vp = nvpage; vp < evp; vp++) {
1114 1103 VPP_SETSWAPRES(vp);
1115 1104 }
1116 1105 }
1117 1106
1118 1107 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) {
1119 1108 ASSERT(svd2->swresv == seg2->s_size);
1120 1109 ASSERT(!(svd1->flags & MAP_NORESERVE));
1121 1110 ASSERT(!(svd2->flags & MAP_NORESERVE));
1122 1111 vp = nvpage + npages1;
1123 1112 evp = vp + npages2;
1124 1113 for (; vp < evp; vp++) {
1125 1114 VPP_SETSWAPRES(vp);
1126 1115 }
1127 1116 }
1128 1117 }
1129 1118 ASSERT((vpage1 != NULL || vpage2 != NULL) ||
1130 1119 (svd1->pageswap == 0 && svd2->pageswap == 0));
1131 1120
1132 1121 /*
1133 1122 * If either segment has private pages, create a new merged anon
1134 1123 * array. If mergeing shared anon segments just decrement anon map's
1135 1124 * refcnt.
1136 1125 */
1137 1126 if (amp1 != NULL && svd1->type == MAP_SHARED) {
1138 1127 ASSERT(amp1 == amp2 && svd1->vp == NULL);
1139 1128 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1140 1129 ASSERT(amp1->refcnt >= 2);
1141 1130 amp1->refcnt--;
1142 1131 ANON_LOCK_EXIT(&1->a_rwlock);
1143 1132 svd2->amp = NULL;
1144 1133 } else if (amp1 != NULL || amp2 != NULL) {
1145 1134 struct anon_hdr *nahp;
1146 1135 struct anon_map *namp = NULL;
1147 1136 size_t asize;
1148 1137
1149 1138 ASSERT(svd1->type == MAP_PRIVATE);
1150 1139
1151 1140 asize = seg1->s_size + seg2->s_size;
1152 1141 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) {
1153 1142 if (nvpage != NULL) {
1154 1143 kmem_free(nvpage, nvpsize);
1155 1144 }
1156 1145 return (-2);
1157 1146 }
1158 1147 if (amp1 != NULL) {
1159 1148 /*
1160 1149 * XXX anon rwlock is not really needed because
1161 1150 * this is a private segment and we are writers.
1162 1151 */
1163 1152 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1164 1153 ASSERT(amp1->refcnt == 1);
1165 1154 if (anon_copy_ptr(amp1->ahp, svd1->anon_index,
1166 1155 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) {
1167 1156 anon_release(nahp, btop(asize));
1168 1157 ANON_LOCK_EXIT(&1->a_rwlock);
1169 1158 if (nvpage != NULL) {
1170 1159 kmem_free(nvpage, nvpsize);
1171 1160 }
1172 1161 return (-2);
1173 1162 }
1174 1163 }
1175 1164 if (amp2 != NULL) {
1176 1165 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1177 1166 ASSERT(amp2->refcnt == 1);
1178 1167 if (anon_copy_ptr(amp2->ahp, svd2->anon_index,
1179 1168 nahp, btop(seg1->s_size), btop(seg2->s_size),
1180 1169 ANON_NOSLEEP)) {
1181 1170 anon_release(nahp, btop(asize));
1182 1171 ANON_LOCK_EXIT(&2->a_rwlock);
1183 1172 if (amp1 != NULL) {
1184 1173 ANON_LOCK_EXIT(&1->a_rwlock);
1185 1174 }
1186 1175 if (nvpage != NULL) {
1187 1176 kmem_free(nvpage, nvpsize);
1188 1177 }
1189 1178 return (-2);
1190 1179 }
1191 1180 }
1192 1181 if (amp1 != NULL) {
1193 1182 namp = amp1;
1194 1183 anon_release(amp1->ahp, btop(amp1->size));
1195 1184 }
1196 1185 if (amp2 != NULL) {
1197 1186 if (namp == NULL) {
1198 1187 ASSERT(amp1 == NULL);
1199 1188 namp = amp2;
1200 1189 anon_release(amp2->ahp, btop(amp2->size));
1201 1190 } else {
1202 1191 amp2->refcnt--;
1203 1192 ANON_LOCK_EXIT(&2->a_rwlock);
1204 1193 anonmap_free(amp2);
1205 1194 }
1206 1195 svd2->amp = NULL; /* needed for seg_free */
1207 1196 }
1208 1197 namp->ahp = nahp;
1209 1198 namp->size = asize;
1210 1199 svd1->amp = namp;
1211 1200 svd1->anon_index = 0;
1212 1201 ANON_LOCK_EXIT(&namp->a_rwlock);
1213 1202 }
1214 1203 /*
1215 1204 * Now free the old vpage structures.
1216 1205 */
1217 1206 if (nvpage != NULL) {
1218 1207 if (vpage1 != NULL) {
1219 1208 kmem_free(vpage1, vpgtob(npages1));
1220 1209 }
1221 1210 if (vpage2 != NULL) {
1222 1211 svd2->vpage = NULL;
1223 1212 kmem_free(vpage2, vpgtob(npages2));
1224 1213 }
1225 1214 if (svd2->pageprot) {
1226 1215 svd1->pageprot = 1;
1227 1216 }
1228 1217 if (svd2->pageadvice) {
1229 1218 svd1->pageadvice = 1;
1230 1219 }
1231 1220 if (svd2->pageswap) {
1232 1221 svd1->pageswap = 1;
1233 1222 }
1234 1223 svd1->vpage = nvpage;
1235 1224 }
1236 1225
1237 1226 /* all looks ok, merge segments */
1238 1227 svd1->swresv += svd2->swresv;
1239 1228 svd2->swresv = 0; /* so seg_free doesn't release swap space */
1240 1229 size = seg2->s_size;
1241 1230 seg_free(seg2);
1242 1231 seg1->s_size += size;
1243 1232 return (0);
1244 1233 }
1245 1234
1246 1235 /*
1247 1236 * Extend the previous segment (seg1) to include the
1248 1237 * new segment (seg2 + a), if possible.
1249 1238 * Return 0 on success.
1250 1239 */
1251 1240 static int
1252 1241 segvn_extend_prev(seg1, seg2, a, swresv)
1253 1242 struct seg *seg1, *seg2;
1254 1243 struct segvn_crargs *a;
1255 1244 size_t swresv;
1256 1245 {
1257 1246 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data;
1258 1247 size_t size;
1259 1248 struct anon_map *amp1;
1260 1249 struct vpage *new_vpage;
1261 1250
1262 1251 /*
1263 1252 * We don't need any segment level locks for "segvn" data
1264 1253 * since the address space is "write" locked.
1265 1254 */
1266 1255 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1267 1256
1268 1257 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) {
1269 1258 return (-1);
1270 1259 }
1271 1260
1272 1261 /* second segment is new, try to extend first */
1273 1262 /* XXX - should also check cred */
1274 1263 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot ||
1275 1264 (!svd1->pageprot && (svd1->prot != a->prot)) ||
1276 1265 svd1->type != a->type || svd1->flags != a->flags ||
1277 1266 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0)
1278 1267 return (-1);
1279 1268
1280 1269 /* vp == NULL implies zfod, offset doesn't matter */
1281 1270 if (svd1->vp != NULL &&
1282 1271 svd1->offset + seg1->s_size != (a->offset & PAGEMASK))
1283 1272 return (-1);
1284 1273
1285 1274 if (svd1->tr_state != SEGVN_TR_OFF) {
1286 1275 return (-1);
1287 1276 }
1288 1277
1289 1278 amp1 = svd1->amp;
1290 1279 if (amp1) {
1291 1280 pgcnt_t newpgs;
1292 1281
1293 1282 /*
1294 1283 * Segment has private pages, can data structures
1295 1284 * be expanded?
1296 1285 *
1297 1286 * Acquire the anon_map lock to prevent it from changing,
1298 1287 * if it is shared. This ensures that the anon_map
1299 1288 * will not change while a thread which has a read/write
1300 1289 * lock on an address space references it.
1301 1290 * XXX - Don't need the anon_map lock at all if "refcnt"
1302 1291 * is 1.
1303 1292 *
1304 1293 * Can't grow a MAP_SHARED segment with an anonmap because
1305 1294 * there may be existing anon slots where we want to extend
1306 1295 * the segment and we wouldn't know what to do with them
1307 1296 * (e.g., for tmpfs right thing is to just leave them there,
1308 1297 * for /dev/zero they should be cleared out).
1309 1298 */
1310 1299 if (svd1->type == MAP_SHARED)
1311 1300 return (-1);
1312 1301
1313 1302 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1314 1303 if (amp1->refcnt > 1) {
1315 1304 ANON_LOCK_EXIT(&1->a_rwlock);
1316 1305 return (-1);
1317 1306 }
1318 1307 newpgs = anon_grow(amp1->ahp, &svd1->anon_index,
1319 1308 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP);
1320 1309
1321 1310 if (newpgs == 0) {
1322 1311 ANON_LOCK_EXIT(&1->a_rwlock);
1323 1312 return (-1);
1324 1313 }
1325 1314 amp1->size = ptob(newpgs);
1326 1315 ANON_LOCK_EXIT(&1->a_rwlock);
1327 1316 }
1328 1317 if (svd1->vpage != NULL) {
1329 1318 struct vpage *vp, *evp;
1330 1319 new_vpage =
1331 1320 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1332 1321 KM_NOSLEEP);
1333 1322 if (new_vpage == NULL)
1334 1323 return (-1);
1335 1324 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1)));
1336 1325 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1)));
1337 1326 svd1->vpage = new_vpage;
1338 1327
1339 1328 vp = new_vpage + seg_pages(seg1);
1340 1329 evp = vp + seg_pages(seg2);
1341 1330 for (; vp < evp; vp++)
1342 1331 VPP_SETPROT(vp, a->prot);
1343 1332 if (svd1->pageswap && swresv) {
1344 1333 ASSERT(!(svd1->flags & MAP_NORESERVE));
1345 1334 ASSERT(swresv == seg2->s_size);
1346 1335 vp = new_vpage + seg_pages(seg1);
1347 1336 for (; vp < evp; vp++) {
1348 1337 VPP_SETSWAPRES(vp);
1349 1338 }
1350 1339 }
1351 1340 }
1352 1341 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0);
1353 1342 size = seg2->s_size;
1354 1343 seg_free(seg2);
1355 1344 seg1->s_size += size;
1356 1345 svd1->swresv += swresv;
1357 1346 if (svd1->pageprot && (a->prot & PROT_WRITE) &&
1358 1347 svd1->type == MAP_SHARED && svd1->vp != NULL &&
1359 1348 (svd1->vp->v_flag & VVMEXEC)) {
1360 1349 ASSERT(vn_is_mapped(svd1->vp, V_WRITE));
1361 1350 segvn_inval_trcache(svd1->vp);
1362 1351 }
1363 1352 return (0);
1364 1353 }
1365 1354
1366 1355 /*
1367 1356 * Extend the next segment (seg2) to include the
1368 1357 * new segment (seg1 + a), if possible.
1369 1358 * Return 0 on success.
1370 1359 */
1371 1360 static int
1372 1361 segvn_extend_next(
1373 1362 struct seg *seg1,
1374 1363 struct seg *seg2,
1375 1364 struct segvn_crargs *a,
1376 1365 size_t swresv)
1377 1366 {
1378 1367 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data;
1379 1368 size_t size;
1380 1369 struct anon_map *amp2;
1381 1370 struct vpage *new_vpage;
1382 1371
1383 1372 /*
1384 1373 * We don't need any segment level locks for "segvn" data
1385 1374 * since the address space is "write" locked.
1386 1375 */
1387 1376 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock));
1388 1377
1389 1378 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1390 1379 return (-1);
1391 1380 }
1392 1381
1393 1382 /* first segment is new, try to extend second */
1394 1383 /* XXX - should also check cred */
1395 1384 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot ||
1396 1385 (!svd2->pageprot && (svd2->prot != a->prot)) ||
1397 1386 svd2->type != a->type || svd2->flags != a->flags ||
1398 1387 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0)
1399 1388 return (-1);
1400 1389 /* vp == NULL implies zfod, offset doesn't matter */
1401 1390 if (svd2->vp != NULL &&
1402 1391 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset)
1403 1392 return (-1);
1404 1393
1405 1394 if (svd2->tr_state != SEGVN_TR_OFF) {
1406 1395 return (-1);
1407 1396 }
1408 1397
1409 1398 amp2 = svd2->amp;
1410 1399 if (amp2) {
1411 1400 pgcnt_t newpgs;
1412 1401
1413 1402 /*
1414 1403 * Segment has private pages, can data structures
1415 1404 * be expanded?
1416 1405 *
1417 1406 * Acquire the anon_map lock to prevent it from changing,
1418 1407 * if it is shared. This ensures that the anon_map
1419 1408 * will not change while a thread which has a read/write
1420 1409 * lock on an address space references it.
1421 1410 *
1422 1411 * XXX - Don't need the anon_map lock at all if "refcnt"
1423 1412 * is 1.
1424 1413 */
1425 1414 if (svd2->type == MAP_SHARED)
1426 1415 return (-1);
1427 1416
1428 1417 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1429 1418 if (amp2->refcnt > 1) {
1430 1419 ANON_LOCK_EXIT(&2->a_rwlock);
1431 1420 return (-1);
1432 1421 }
1433 1422 newpgs = anon_grow(amp2->ahp, &svd2->anon_index,
1434 1423 btop(seg2->s_size), btop(seg1->s_size),
1435 1424 ANON_NOSLEEP | ANON_GROWDOWN);
1436 1425
1437 1426 if (newpgs == 0) {
1438 1427 ANON_LOCK_EXIT(&2->a_rwlock);
1439 1428 return (-1);
1440 1429 }
1441 1430 amp2->size = ptob(newpgs);
1442 1431 ANON_LOCK_EXIT(&2->a_rwlock);
1443 1432 }
1444 1433 if (svd2->vpage != NULL) {
1445 1434 struct vpage *vp, *evp;
1446 1435 new_vpage =
1447 1436 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1448 1437 KM_NOSLEEP);
1449 1438 if (new_vpage == NULL) {
1450 1439 /* Not merging segments so adjust anon_index back */
1451 1440 if (amp2)
1452 1441 svd2->anon_index += seg_pages(seg1);
1453 1442 return (-1);
1454 1443 }
1455 1444 bcopy(svd2->vpage, new_vpage + seg_pages(seg1),
1456 1445 vpgtob(seg_pages(seg2)));
1457 1446 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2)));
1458 1447 svd2->vpage = new_vpage;
1459 1448
1460 1449 vp = new_vpage;
1461 1450 evp = vp + seg_pages(seg1);
1462 1451 for (; vp < evp; vp++)
1463 1452 VPP_SETPROT(vp, a->prot);
1464 1453 if (svd2->pageswap && swresv) {
1465 1454 ASSERT(!(svd2->flags & MAP_NORESERVE));
1466 1455 ASSERT(swresv == seg1->s_size);
1467 1456 vp = new_vpage;
1468 1457 for (; vp < evp; vp++) {
1469 1458 VPP_SETSWAPRES(vp);
1470 1459 }
1471 1460 }
1472 1461 }
1473 1462 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0);
1474 1463 size = seg1->s_size;
1475 1464 seg_free(seg1);
1476 1465 seg2->s_size += size;
1477 1466 seg2->s_base -= size;
1478 1467 svd2->offset -= size;
1479 1468 svd2->swresv += swresv;
1480 1469 if (svd2->pageprot && (a->prot & PROT_WRITE) &&
1481 1470 svd2->type == MAP_SHARED && svd2->vp != NULL &&
1482 1471 (svd2->vp->v_flag & VVMEXEC)) {
1483 1472 ASSERT(vn_is_mapped(svd2->vp, V_WRITE));
1484 1473 segvn_inval_trcache(svd2->vp);
1485 1474 }
1486 1475 return (0);
1487 1476 }
1488 1477
1489 1478 /*
1490 1479 * Duplicate all the pages in the segment. This may break COW sharing for a
1491 1480 * given page. If the page is marked with inherit zero set, then instead of
1492 1481 * duplicating the page, we zero the page.
1493 1482 */
1494 1483 static int
1495 1484 segvn_dup_pages(struct seg *seg, struct seg *newseg)
1496 1485 {
1497 1486 int error;
1498 1487 uint_t prot;
1499 1488 page_t *pp;
1500 1489 struct anon *ap, *newap;
1501 1490 size_t i;
1502 1491 caddr_t addr;
1503 1492
1504 1493 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1505 1494 struct segvn_data *newsvd = (struct segvn_data *)newseg->s_data;
1506 1495 ulong_t old_idx = svd->anon_index;
1507 1496 ulong_t new_idx = 0;
1508 1497
1509 1498 i = btopr(seg->s_size);
1510 1499 addr = seg->s_base;
1511 1500
1512 1501 /*
1513 1502 * XXX break cow sharing using PAGESIZE
1514 1503 * pages. They will be relocated into larger
1515 1504 * pages at fault time.
1516 1505 */
1517 1506 while (i-- > 0) {
1518 1507 if ((ap = anon_get_ptr(svd->amp->ahp, old_idx)) != NULL) {
1519 1508 struct vpage *vpp;
1520 1509
1521 1510 vpp = &svd->vpage[seg_page(seg, addr)];
1522 1511
1523 1512 /*
1524 1513 * prot need not be computed below 'cause anon_private
1525 1514 * is going to ignore it anyway as child doesn't inherit
1526 1515 * pagelock from parent.
1527 1516 */
1528 1517 prot = svd->pageprot ? VPP_PROT(vpp) : svd->prot;
1529 1518
1530 1519 /*
1531 1520 * Check whether we should zero this or dup it.
1532 1521 */
1533 1522 if (svd->svn_inz == SEGVN_INZ_ALL ||
1534 1523 (svd->svn_inz == SEGVN_INZ_VPP &&
1535 1524 VPP_ISINHZERO(vpp))) {
1536 1525 pp = anon_zero(newseg, addr, &newap,
1537 1526 newsvd->cred);
1538 1527 } else {
1539 1528 page_t *anon_pl[1+1];
1540 1529 uint_t vpprot;
1541 1530 error = anon_getpage(&ap, &vpprot, anon_pl,
1542 1531 PAGESIZE, seg, addr, S_READ, svd->cred);
1543 1532 if (error != 0)
1544 1533 return (error);
1545 1534
1546 1535 pp = anon_private(&newap, newseg, addr, prot,
1547 1536 anon_pl[0], 0, newsvd->cred);
1548 1537 }
1549 1538 if (pp == NULL) {
1550 1539 return (ENOMEM);
1551 1540 }
1552 1541 (void) anon_set_ptr(newsvd->amp->ahp, new_idx, newap,
1553 1542 ANON_SLEEP);
1554 1543 page_unlock(pp);
1555 1544 }
1556 1545 addr += PAGESIZE;
1557 1546 old_idx++;
1558 1547 new_idx++;
1559 1548 }
1560 1549
1561 1550 return (0);
1562 1551 }
1563 1552
1564 1553 static int
1565 1554 segvn_dup(struct seg *seg, struct seg *newseg)
1566 1555 {
1567 1556 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1568 1557 struct segvn_data *newsvd;
1569 1558 pgcnt_t npages = seg_pages(seg);
1570 1559 int error = 0;
1571 1560 size_t len;
1572 1561 struct anon_map *amp;
1573 1562
1574 1563 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1575 1564 ASSERT(newseg->s_as->a_proc->p_parent == curproc);
1576 1565
1577 1566 /*
1578 1567 * If segment has anon reserved, reserve more for the new seg.
1579 1568 * For a MAP_NORESERVE segment swresv will be a count of all the
1580 1569 * allocated anon slots; thus we reserve for the child as many slots
1581 1570 * as the parent has allocated. This semantic prevents the child or
1582 1571 * parent from dieing during a copy-on-write fault caused by trying
1583 1572 * to write a shared pre-existing anon page.
1584 1573 */
1585 1574 if ((len = svd->swresv) != 0) {
1586 1575 if (anon_resv(svd->swresv) == 0)
1587 1576 return (ENOMEM);
1588 1577
1589 1578 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
1590 1579 seg, len, 0);
1591 1580 }
1592 1581
1593 1582 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
1594 1583
1595 1584 newseg->s_ops = &segvn_ops;
1596 1585 newseg->s_data = (void *)newsvd;
1597 1586 newseg->s_szc = seg->s_szc;
1598 1587
1599 1588 newsvd->seg = newseg;
1600 1589 if ((newsvd->vp = svd->vp) != NULL) {
1601 1590 VN_HOLD(svd->vp);
1602 1591 if (svd->type == MAP_SHARED)
1603 1592 lgrp_shm_policy_init(NULL, svd->vp);
1604 1593 }
1605 1594 newsvd->offset = svd->offset;
1606 1595 newsvd->prot = svd->prot;
1607 1596 newsvd->maxprot = svd->maxprot;
1608 1597 newsvd->pageprot = svd->pageprot;
1609 1598 newsvd->type = svd->type;
1610 1599 newsvd->cred = svd->cred;
1611 1600 crhold(newsvd->cred);
1612 1601 newsvd->advice = svd->advice;
1613 1602 newsvd->pageadvice = svd->pageadvice;
1614 1603 newsvd->svn_inz = svd->svn_inz;
1615 1604 newsvd->swresv = svd->swresv;
1616 1605 newsvd->pageswap = svd->pageswap;
1617 1606 newsvd->flags = svd->flags;
1618 1607 newsvd->softlockcnt = 0;
1619 1608 newsvd->softlockcnt_sbase = 0;
1620 1609 newsvd->softlockcnt_send = 0;
1621 1610 newsvd->policy_info = svd->policy_info;
1622 1611 newsvd->rcookie = HAT_INVALID_REGION_COOKIE;
1623 1612
1624 1613 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) {
1625 1614 /*
1626 1615 * Not attaching to a shared anon object.
1627 1616 */
1628 1617 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) ||
1629 1618 svd->tr_state == SEGVN_TR_OFF);
1630 1619 if (svd->tr_state == SEGVN_TR_ON) {
1631 1620 ASSERT(newsvd->vp != NULL && amp != NULL);
1632 1621 newsvd->tr_state = SEGVN_TR_INIT;
1633 1622 } else {
1634 1623 newsvd->tr_state = svd->tr_state;
1635 1624 }
1636 1625 newsvd->amp = NULL;
1637 1626 newsvd->anon_index = 0;
1638 1627 } else {
1639 1628 /* regions for now are only used on pure vnode segments */
1640 1629 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
1641 1630 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1642 1631 newsvd->tr_state = SEGVN_TR_OFF;
1643 1632 if (svd->type == MAP_SHARED) {
1644 1633 ASSERT(svd->svn_inz == SEGVN_INZ_NONE);
1645 1634 newsvd->amp = amp;
1646 1635 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1647 1636 amp->refcnt++;
1648 1637 ANON_LOCK_EXIT(&->a_rwlock);
1649 1638 newsvd->anon_index = svd->anon_index;
1650 1639 } else {
1651 1640 int reclaim = 1;
1652 1641
1653 1642 /*
1654 1643 * Allocate and initialize new anon_map structure.
1655 1644 */
1656 1645 newsvd->amp = anonmap_alloc(newseg->s_size, 0,
1657 1646 ANON_SLEEP);
1658 1647 newsvd->amp->a_szc = newseg->s_szc;
1659 1648 newsvd->anon_index = 0;
1660 1649 ASSERT(svd->svn_inz == SEGVN_INZ_NONE ||
1661 1650 svd->svn_inz == SEGVN_INZ_ALL ||
1662 1651 svd->svn_inz == SEGVN_INZ_VPP);
1663 1652
1664 1653 /*
1665 1654 * We don't have to acquire the anon_map lock
1666 1655 * for the new segment (since it belongs to an
1667 1656 * address space that is still not associated
1668 1657 * with any process), or the segment in the old
1669 1658 * address space (since all threads in it
1670 1659 * are stopped while duplicating the address space).
1671 1660 */
1672 1661
1673 1662 /*
1674 1663 * The goal of the following code is to make sure that
1675 1664 * softlocked pages do not end up as copy on write
1676 1665 * pages. This would cause problems where one
1677 1666 * thread writes to a page that is COW and a different
1678 1667 * thread in the same process has softlocked it. The
1679 1668 * softlock lock would move away from this process
1680 1669 * because the write would cause this process to get
1681 1670 * a copy (without the softlock).
1682 1671 *
1683 1672 * The strategy here is to just break the
1684 1673 * sharing on pages that could possibly be
1685 1674 * softlocked.
1686 1675 *
1687 1676 * In addition, if any pages have been marked that they
1688 1677 * should be inherited as zero, then we immediately go
1689 1678 * ahead and break COW and zero them. In the case of a
1690 1679 * softlocked page that should be inherited zero, we
1691 1680 * break COW and just get a zero page.
1692 1681 */
1693 1682 retry:
1694 1683 if (svd->softlockcnt ||
1695 1684 svd->svn_inz != SEGVN_INZ_NONE) {
1696 1685 /*
1697 1686 * The softlock count might be non zero
1698 1687 * because some pages are still stuck in the
1699 1688 * cache for lazy reclaim. Flush the cache
1700 1689 * now. This should drop the count to zero.
1701 1690 * [or there is really I/O going on to these
1702 1691 * pages]. Note, we have the writers lock so
1703 1692 * nothing gets inserted during the flush.
1704 1693 */
1705 1694 if (svd->softlockcnt && reclaim == 1) {
1706 1695 segvn_purge(seg);
1707 1696 reclaim = 0;
1708 1697 goto retry;
1709 1698 }
1710 1699
1711 1700 error = segvn_dup_pages(seg, newseg);
1712 1701 if (error != 0) {
1713 1702 newsvd->vpage = NULL;
1714 1703 goto out;
1715 1704 }
1716 1705 } else { /* common case */
1717 1706 if (seg->s_szc != 0) {
1718 1707 /*
1719 1708 * If at least one of anon slots of a
1720 1709 * large page exists then make sure
1721 1710 * all anon slots of a large page
1722 1711 * exist to avoid partial cow sharing
1723 1712 * of a large page in the future.
1724 1713 */
1725 1714 anon_dup_fill_holes(amp->ahp,
1726 1715 svd->anon_index, newsvd->amp->ahp,
1727 1716 0, seg->s_size, seg->s_szc,
1728 1717 svd->vp != NULL);
1729 1718 } else {
1730 1719 anon_dup(amp->ahp, svd->anon_index,
1731 1720 newsvd->amp->ahp, 0, seg->s_size);
1732 1721 }
1733 1722
1734 1723 hat_clrattr(seg->s_as->a_hat, seg->s_base,
1735 1724 seg->s_size, PROT_WRITE);
1736 1725 }
1737 1726 }
1738 1727 }
1739 1728 /*
1740 1729 * If necessary, create a vpage structure for the new segment.
1741 1730 * Do not copy any page lock indications.
1742 1731 */
1743 1732 if (svd->vpage != NULL) {
1744 1733 uint_t i;
1745 1734 struct vpage *ovp = svd->vpage;
1746 1735 struct vpage *nvp;
1747 1736
1748 1737 nvp = newsvd->vpage =
1749 1738 kmem_alloc(vpgtob(npages), KM_SLEEP);
1750 1739 for (i = 0; i < npages; i++) {
1751 1740 *nvp = *ovp++;
1752 1741 VPP_CLRPPLOCK(nvp++);
1753 1742 }
1754 1743 } else
1755 1744 newsvd->vpage = NULL;
1756 1745
1757 1746 /* Inform the vnode of the new mapping */
1758 1747 if (newsvd->vp != NULL) {
1759 1748 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset,
1760 1749 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot,
1761 1750 newsvd->maxprot, newsvd->type, newsvd->cred, NULL);
1762 1751 }
1763 1752 out:
1764 1753 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1765 1754 ASSERT(newsvd->amp == NULL);
1766 1755 ASSERT(newsvd->tr_state == SEGVN_TR_OFF);
1767 1756 newsvd->rcookie = svd->rcookie;
1768 1757 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie);
1769 1758 }
1770 1759 return (error);
1771 1760 }
1772 1761
1773 1762
1774 1763 /*
1775 1764 * callback function to invoke free_vp_pages() for only those pages actually
1776 1765 * processed by the HAT when a shared region is destroyed.
1777 1766 */
1778 1767 extern int free_pages;
1779 1768
1780 1769 static void
1781 1770 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
1782 1771 size_t r_size, void *r_obj, u_offset_t r_objoff)
1783 1772 {
1784 1773 u_offset_t off;
1785 1774 size_t len;
1786 1775 vnode_t *vp = (vnode_t *)r_obj;
1787 1776
1788 1777 ASSERT(eaddr > saddr);
1789 1778 ASSERT(saddr >= r_saddr);
1790 1779 ASSERT(saddr < r_saddr + r_size);
1791 1780 ASSERT(eaddr > r_saddr);
1792 1781 ASSERT(eaddr <= r_saddr + r_size);
1793 1782 ASSERT(vp != NULL);
1794 1783
1795 1784 if (!free_pages) {
1796 1785 return;
1797 1786 }
1798 1787
1799 1788 len = eaddr - saddr;
1800 1789 off = (saddr - r_saddr) + r_objoff;
1801 1790 free_vp_pages(vp, off, len);
1802 1791 }
1803 1792
1804 1793 /*
1805 1794 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1806 1795 * those pages actually processed by the HAT
1807 1796 */
1808 1797 static void
1809 1798 segvn_hat_unload_callback(hat_callback_t *cb)
1810 1799 {
1811 1800 struct seg *seg = cb->hcb_data;
1812 1801 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1813 1802 size_t len;
1814 1803 u_offset_t off;
1815 1804
1816 1805 ASSERT(svd->vp != NULL);
1817 1806 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr);
1818 1807 ASSERT(cb->hcb_start_addr >= seg->s_base);
1819 1808
1820 1809 len = cb->hcb_end_addr - cb->hcb_start_addr;
1821 1810 off = cb->hcb_start_addr - seg->s_base;
1822 1811 free_vp_pages(svd->vp, svd->offset + off, len);
1823 1812 }
1824 1813
1825 1814 /*
1826 1815 * This function determines the number of bytes of swap reserved by
1827 1816 * a segment for which per-page accounting is present. It is used to
1828 1817 * calculate the correct value of a segvn_data's swresv.
1829 1818 */
1830 1819 static size_t
1831 1820 segvn_count_swap_by_vpages(struct seg *seg)
1832 1821 {
1833 1822 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1834 1823 struct vpage *vp, *evp;
1835 1824 size_t nswappages = 0;
1836 1825
1837 1826 ASSERT(svd->pageswap);
1838 1827 ASSERT(svd->vpage != NULL);
1839 1828
1840 1829 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
1841 1830
1842 1831 for (vp = svd->vpage; vp < evp; vp++) {
1843 1832 if (VPP_ISSWAPRES(vp))
1844 1833 nswappages++;
1845 1834 }
1846 1835
1847 1836 return (nswappages << PAGESHIFT);
1848 1837 }
1849 1838
1850 1839 static int
1851 1840 segvn_unmap(struct seg *seg, caddr_t addr, size_t len)
1852 1841 {
1853 1842 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1854 1843 struct segvn_data *nsvd;
1855 1844 struct seg *nseg;
1856 1845 struct anon_map *amp;
1857 1846 pgcnt_t opages; /* old segment size in pages */
1858 1847 pgcnt_t npages; /* new segment size in pages */
1859 1848 pgcnt_t dpages; /* pages being deleted (unmapped) */
1860 1849 hat_callback_t callback; /* used for free_vp_pages() */
1861 1850 hat_callback_t *cbp = NULL;
1862 1851 caddr_t nbase;
1863 1852 size_t nsize;
1864 1853 size_t oswresv;
1865 1854 int reclaim = 1;
1866 1855
1867 1856 /*
1868 1857 * We don't need any segment level locks for "segvn" data
1869 1858 * since the address space is "write" locked.
1870 1859 */
1871 1860 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1872 1861
1873 1862 /*
1874 1863 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1875 1864 * softlockcnt is protected from change by the as write lock.
1876 1865 */
1877 1866 retry:
1878 1867 if (svd->softlockcnt > 0) {
1879 1868 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1880 1869
1881 1870 /*
1882 1871 * If this is shared segment non 0 softlockcnt
1883 1872 * means locked pages are still in use.
1884 1873 */
1885 1874 if (svd->type == MAP_SHARED) {
1886 1875 return (EAGAIN);
1887 1876 }
1888 1877
1889 1878 /*
1890 1879 * since we do have the writers lock nobody can fill
1891 1880 * the cache during the purge. The flush either succeeds
1892 1881 * or we still have pending I/Os.
1893 1882 */
1894 1883 if (reclaim == 1) {
1895 1884 segvn_purge(seg);
1896 1885 reclaim = 0;
1897 1886 goto retry;
1898 1887 }
1899 1888 return (EAGAIN);
1900 1889 }
1901 1890
1902 1891 /*
1903 1892 * Check for bad sizes
1904 1893 */
1905 1894 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
1906 1895 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
1907 1896 panic("segvn_unmap");
1908 1897 /*NOTREACHED*/
1909 1898 }
1910 1899
1911 1900 if (seg->s_szc != 0) {
1912 1901 size_t pgsz = page_get_pagesize(seg->s_szc);
1913 1902 int err;
1914 1903 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
1915 1904 ASSERT(seg->s_base != addr || seg->s_size != len);
1916 1905 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1917 1906 ASSERT(svd->amp == NULL);
1918 1907 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1919 1908 hat_leave_region(seg->s_as->a_hat,
1920 1909 svd->rcookie, HAT_REGION_TEXT);
1921 1910 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1922 1911 /*
1923 1912 * could pass a flag to segvn_demote_range()
1924 1913 * below to tell it not to do any unloads but
1925 1914 * this case is rare enough to not bother for
1926 1915 * now.
1927 1916 */
1928 1917 } else if (svd->tr_state == SEGVN_TR_INIT) {
1929 1918 svd->tr_state = SEGVN_TR_OFF;
1930 1919 } else if (svd->tr_state == SEGVN_TR_ON) {
1931 1920 ASSERT(svd->amp != NULL);
1932 1921 segvn_textunrepl(seg, 1);
1933 1922 ASSERT(svd->amp == NULL);
1934 1923 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1935 1924 }
1936 1925 VM_STAT_ADD(segvnvmstats.demoterange[0]);
1937 1926 err = segvn_demote_range(seg, addr, len, SDR_END, 0);
1938 1927 if (err == 0) {
1939 1928 return (IE_RETRY);
1940 1929 }
1941 1930 return (err);
1942 1931 }
1943 1932 }
1944 1933
1945 1934 /* Inform the vnode of the unmapping. */
1946 1935 if (svd->vp) {
1947 1936 int error;
1948 1937
1949 1938 error = VOP_DELMAP(svd->vp,
1950 1939 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1951 1940 seg->s_as, addr, len, svd->prot, svd->maxprot,
1952 1941 svd->type, svd->cred, NULL);
1953 1942
1954 1943 if (error == EAGAIN)
1955 1944 return (error);
1956 1945 }
1957 1946
1958 1947 /*
1959 1948 * Remove any page locks set through this mapping.
1960 1949 * If text replication is not off no page locks could have been
1961 1950 * established via this mapping.
1962 1951 */
1963 1952 if (svd->tr_state == SEGVN_TR_OFF) {
1964 1953 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0);
1965 1954 }
1966 1955
1967 1956 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1968 1957 ASSERT(svd->amp == NULL);
1969 1958 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1970 1959 ASSERT(svd->type == MAP_PRIVATE);
1971 1960 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
1972 1961 HAT_REGION_TEXT);
1973 1962 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1974 1963 } else if (svd->tr_state == SEGVN_TR_ON) {
1975 1964 ASSERT(svd->amp != NULL);
1976 1965 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE));
1977 1966 segvn_textunrepl(seg, 1);
1978 1967 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
1979 1968 } else {
1980 1969 if (svd->tr_state != SEGVN_TR_OFF) {
1981 1970 ASSERT(svd->tr_state == SEGVN_TR_INIT);
1982 1971 svd->tr_state = SEGVN_TR_OFF;
1983 1972 }
1984 1973 /*
1985 1974 * Unload any hardware translations in the range to be taken
1986 1975 * out. Use a callback to invoke free_vp_pages() effectively.
1987 1976 */
1988 1977 if (svd->vp != NULL && free_pages != 0) {
1989 1978 callback.hcb_data = seg;
1990 1979 callback.hcb_function = segvn_hat_unload_callback;
1991 1980 cbp = &callback;
1992 1981 }
1993 1982 hat_unload_callback(seg->s_as->a_hat, addr, len,
1994 1983 HAT_UNLOAD_UNMAP, cbp);
1995 1984
1996 1985 if (svd->type == MAP_SHARED && svd->vp != NULL &&
1997 1986 (svd->vp->v_flag & VVMEXEC) &&
1998 1987 ((svd->prot & PROT_WRITE) || svd->pageprot)) {
1999 1988 segvn_inval_trcache(svd->vp);
2000 1989 }
2001 1990 }
2002 1991
2003 1992 /*
2004 1993 * Check for entire segment
2005 1994 */
2006 1995 if (addr == seg->s_base && len == seg->s_size) {
2007 1996 seg_free(seg);
2008 1997 return (0);
2009 1998 }
2010 1999
2011 2000 opages = seg_pages(seg);
2012 2001 dpages = btop(len);
2013 2002 npages = opages - dpages;
2014 2003 amp = svd->amp;
2015 2004 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc);
2016 2005
2017 2006 /*
2018 2007 * Check for beginning of segment
2019 2008 */
2020 2009 if (addr == seg->s_base) {
2021 2010 if (svd->vpage != NULL) {
2022 2011 size_t nbytes;
2023 2012 struct vpage *ovpage;
2024 2013
2025 2014 ovpage = svd->vpage; /* keep pointer to vpage */
2026 2015
2027 2016 nbytes = vpgtob(npages);
2028 2017 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2029 2018 bcopy(&ovpage[dpages], svd->vpage, nbytes);
2030 2019
2031 2020 /* free up old vpage */
2032 2021 kmem_free(ovpage, vpgtob(opages));
2033 2022 }
2034 2023 if (amp != NULL) {
2035 2024 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2036 2025 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2037 2026 /*
2038 2027 * Shared anon map is no longer in use. Before
2039 2028 * freeing its pages purge all entries from
2040 2029 * pcache that belong to this amp.
2041 2030 */
2042 2031 if (svd->type == MAP_SHARED) {
2043 2032 ASSERT(amp->refcnt == 1);
2044 2033 ASSERT(svd->softlockcnt == 0);
2045 2034 anonmap_purge(amp);
2046 2035 }
2047 2036 /*
2048 2037 * Free up now unused parts of anon_map array.
2049 2038 */
2050 2039 if (amp->a_szc == seg->s_szc) {
2051 2040 if (seg->s_szc != 0) {
2052 2041 anon_free_pages(amp->ahp,
2053 2042 svd->anon_index, len,
2054 2043 seg->s_szc);
2055 2044 } else {
2056 2045 anon_free(amp->ahp,
2057 2046 svd->anon_index,
2058 2047 len);
2059 2048 }
2060 2049 } else {
2061 2050 ASSERT(svd->type == MAP_SHARED);
2062 2051 ASSERT(amp->a_szc > seg->s_szc);
2063 2052 anon_shmap_free_pages(amp,
2064 2053 svd->anon_index, len);
2065 2054 }
2066 2055
2067 2056 /*
2068 2057 * Unreserve swap space for the
2069 2058 * unmapped chunk of this segment in
2070 2059 * case it's MAP_SHARED
2071 2060 */
2072 2061 if (svd->type == MAP_SHARED) {
2073 2062 anon_unresv_zone(len,
2074 2063 seg->s_as->a_proc->p_zone);
2075 2064 amp->swresv -= len;
2076 2065 }
2077 2066 }
2078 2067 ANON_LOCK_EXIT(&->a_rwlock);
2079 2068 svd->anon_index += dpages;
2080 2069 }
2081 2070 if (svd->vp != NULL)
2082 2071 svd->offset += len;
2083 2072
2084 2073 seg->s_base += len;
2085 2074 seg->s_size -= len;
2086 2075
2087 2076 if (svd->swresv) {
2088 2077 if (svd->flags & MAP_NORESERVE) {
2089 2078 ASSERT(amp);
2090 2079 oswresv = svd->swresv;
2091 2080
2092 2081 svd->swresv = ptob(anon_pages(amp->ahp,
2093 2082 svd->anon_index, npages));
2094 2083 anon_unresv_zone(oswresv - svd->swresv,
2095 2084 seg->s_as->a_proc->p_zone);
2096 2085 if (SEG_IS_PARTIAL_RESV(seg))
2097 2086 seg->s_as->a_resvsize -= oswresv -
2098 2087 svd->swresv;
2099 2088 } else {
2100 2089 size_t unlen;
2101 2090
2102 2091 if (svd->pageswap) {
2103 2092 oswresv = svd->swresv;
2104 2093 svd->swresv =
2105 2094 segvn_count_swap_by_vpages(seg);
2106 2095 ASSERT(oswresv >= svd->swresv);
2107 2096 unlen = oswresv - svd->swresv;
2108 2097 } else {
2109 2098 svd->swresv -= len;
2110 2099 ASSERT(svd->swresv == seg->s_size);
2111 2100 unlen = len;
2112 2101 }
2113 2102 anon_unresv_zone(unlen,
2114 2103 seg->s_as->a_proc->p_zone);
2115 2104 }
2116 2105 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2117 2106 seg, len, 0);
2118 2107 }
2119 2108
2120 2109 return (0);
2121 2110 }
2122 2111
2123 2112 /*
2124 2113 * Check for end of segment
2125 2114 */
2126 2115 if (addr + len == seg->s_base + seg->s_size) {
2127 2116 if (svd->vpage != NULL) {
2128 2117 size_t nbytes;
2129 2118 struct vpage *ovpage;
2130 2119
2131 2120 ovpage = svd->vpage; /* keep pointer to vpage */
2132 2121
2133 2122 nbytes = vpgtob(npages);
2134 2123 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2135 2124 bcopy(ovpage, svd->vpage, nbytes);
2136 2125
2137 2126 /* free up old vpage */
2138 2127 kmem_free(ovpage, vpgtob(opages));
2139 2128
2140 2129 }
2141 2130 if (amp != NULL) {
2142 2131 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2143 2132 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2144 2133 /*
2145 2134 * Free up now unused parts of anon_map array.
2146 2135 */
2147 2136 ulong_t an_idx = svd->anon_index + npages;
2148 2137
2149 2138 /*
2150 2139 * Shared anon map is no longer in use. Before
2151 2140 * freeing its pages purge all entries from
2152 2141 * pcache that belong to this amp.
2153 2142 */
2154 2143 if (svd->type == MAP_SHARED) {
2155 2144 ASSERT(amp->refcnt == 1);
2156 2145 ASSERT(svd->softlockcnt == 0);
2157 2146 anonmap_purge(amp);
2158 2147 }
2159 2148
2160 2149 if (amp->a_szc == seg->s_szc) {
2161 2150 if (seg->s_szc != 0) {
2162 2151 anon_free_pages(amp->ahp,
2163 2152 an_idx, len,
2164 2153 seg->s_szc);
2165 2154 } else {
2166 2155 anon_free(amp->ahp, an_idx,
2167 2156 len);
2168 2157 }
2169 2158 } else {
2170 2159 ASSERT(svd->type == MAP_SHARED);
2171 2160 ASSERT(amp->a_szc > seg->s_szc);
2172 2161 anon_shmap_free_pages(amp,
2173 2162 an_idx, len);
2174 2163 }
2175 2164
2176 2165 /*
2177 2166 * Unreserve swap space for the
2178 2167 * unmapped chunk of this segment in
2179 2168 * case it's MAP_SHARED
2180 2169 */
2181 2170 if (svd->type == MAP_SHARED) {
2182 2171 anon_unresv_zone(len,
2183 2172 seg->s_as->a_proc->p_zone);
2184 2173 amp->swresv -= len;
2185 2174 }
2186 2175 }
2187 2176 ANON_LOCK_EXIT(&->a_rwlock);
2188 2177 }
2189 2178
2190 2179 seg->s_size -= len;
2191 2180
2192 2181 if (svd->swresv) {
2193 2182 if (svd->flags & MAP_NORESERVE) {
2194 2183 ASSERT(amp);
2195 2184 oswresv = svd->swresv;
2196 2185 svd->swresv = ptob(anon_pages(amp->ahp,
2197 2186 svd->anon_index, npages));
2198 2187 anon_unresv_zone(oswresv - svd->swresv,
2199 2188 seg->s_as->a_proc->p_zone);
2200 2189 if (SEG_IS_PARTIAL_RESV(seg))
2201 2190 seg->s_as->a_resvsize -= oswresv -
2202 2191 svd->swresv;
2203 2192 } else {
2204 2193 size_t unlen;
2205 2194
2206 2195 if (svd->pageswap) {
2207 2196 oswresv = svd->swresv;
2208 2197 svd->swresv =
2209 2198 segvn_count_swap_by_vpages(seg);
2210 2199 ASSERT(oswresv >= svd->swresv);
2211 2200 unlen = oswresv - svd->swresv;
2212 2201 } else {
2213 2202 svd->swresv -= len;
2214 2203 ASSERT(svd->swresv == seg->s_size);
2215 2204 unlen = len;
2216 2205 }
2217 2206 anon_unresv_zone(unlen,
2218 2207 seg->s_as->a_proc->p_zone);
2219 2208 }
2220 2209 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2221 2210 "anon proc:%p %lu %u", seg, len, 0);
2222 2211 }
2223 2212
2224 2213 return (0);
2225 2214 }
2226 2215
2227 2216 /*
2228 2217 * The section to go is in the middle of the segment,
2229 2218 * have to make it into two segments. nseg is made for
2230 2219 * the high end while seg is cut down at the low end.
2231 2220 */
2232 2221 nbase = addr + len; /* new seg base */
2233 2222 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
2234 2223 seg->s_size = addr - seg->s_base; /* shrink old seg */
2235 2224 nseg = seg_alloc(seg->s_as, nbase, nsize);
2236 2225 if (nseg == NULL) {
2237 2226 panic("segvn_unmap seg_alloc");
2238 2227 /*NOTREACHED*/
2239 2228 }
2240 2229 nseg->s_ops = seg->s_ops;
2241 2230 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
2242 2231 nseg->s_data = (void *)nsvd;
2243 2232 nseg->s_szc = seg->s_szc;
2244 2233 *nsvd = *svd;
2245 2234 nsvd->seg = nseg;
2246 2235 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base);
2247 2236 nsvd->swresv = 0;
2248 2237 nsvd->softlockcnt = 0;
2249 2238 nsvd->softlockcnt_sbase = 0;
2250 2239 nsvd->softlockcnt_send = 0;
2251 2240 nsvd->svn_inz = svd->svn_inz;
2252 2241 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
2253 2242
2254 2243 if (svd->vp != NULL) {
2255 2244 VN_HOLD(nsvd->vp);
2256 2245 if (nsvd->type == MAP_SHARED)
2257 2246 lgrp_shm_policy_init(NULL, nsvd->vp);
2258 2247 }
2259 2248 crhold(svd->cred);
2260 2249
2261 2250 if (svd->vpage == NULL) {
2262 2251 nsvd->vpage = NULL;
2263 2252 } else {
2264 2253 /* need to split vpage into two arrays */
2265 2254 size_t nbytes;
2266 2255 struct vpage *ovpage;
2267 2256
2268 2257 ovpage = svd->vpage; /* keep pointer to vpage */
2269 2258
2270 2259 npages = seg_pages(seg); /* seg has shrunk */
2271 2260 nbytes = vpgtob(npages);
2272 2261 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2273 2262
2274 2263 bcopy(ovpage, svd->vpage, nbytes);
2275 2264
2276 2265 npages = seg_pages(nseg);
2277 2266 nbytes = vpgtob(npages);
2278 2267 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2279 2268
2280 2269 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes);
2281 2270
2282 2271 /* free up old vpage */
2283 2272 kmem_free(ovpage, vpgtob(opages));
2284 2273 }
2285 2274
2286 2275 if (amp == NULL) {
2287 2276 nsvd->amp = NULL;
2288 2277 nsvd->anon_index = 0;
2289 2278 } else {
2290 2279 /*
2291 2280 * Need to create a new anon map for the new segment.
2292 2281 * We'll also allocate a new smaller array for the old
2293 2282 * smaller segment to save space.
2294 2283 */
2295 2284 opages = btop((uintptr_t)(addr - seg->s_base));
2296 2285 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2297 2286 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2298 2287 /*
2299 2288 * Free up now unused parts of anon_map array.
2300 2289 */
2301 2290 ulong_t an_idx = svd->anon_index + opages;
2302 2291
2303 2292 /*
2304 2293 * Shared anon map is no longer in use. Before
2305 2294 * freeing its pages purge all entries from
2306 2295 * pcache that belong to this amp.
2307 2296 */
2308 2297 if (svd->type == MAP_SHARED) {
2309 2298 ASSERT(amp->refcnt == 1);
2310 2299 ASSERT(svd->softlockcnt == 0);
2311 2300 anonmap_purge(amp);
2312 2301 }
2313 2302
2314 2303 if (amp->a_szc == seg->s_szc) {
2315 2304 if (seg->s_szc != 0) {
2316 2305 anon_free_pages(amp->ahp, an_idx, len,
2317 2306 seg->s_szc);
2318 2307 } else {
2319 2308 anon_free(amp->ahp, an_idx,
2320 2309 len);
2321 2310 }
2322 2311 } else {
2323 2312 ASSERT(svd->type == MAP_SHARED);
2324 2313 ASSERT(amp->a_szc > seg->s_szc);
2325 2314 anon_shmap_free_pages(amp, an_idx, len);
2326 2315 }
2327 2316
2328 2317 /*
2329 2318 * Unreserve swap space for the
2330 2319 * unmapped chunk of this segment in
2331 2320 * case it's MAP_SHARED
2332 2321 */
2333 2322 if (svd->type == MAP_SHARED) {
2334 2323 anon_unresv_zone(len,
2335 2324 seg->s_as->a_proc->p_zone);
2336 2325 amp->swresv -= len;
2337 2326 }
2338 2327 }
2339 2328 nsvd->anon_index = svd->anon_index +
2340 2329 btop((uintptr_t)(nseg->s_base - seg->s_base));
2341 2330 if (svd->type == MAP_SHARED) {
2342 2331 amp->refcnt++;
2343 2332 nsvd->amp = amp;
2344 2333 } else {
2345 2334 struct anon_map *namp;
2346 2335 struct anon_hdr *nahp;
2347 2336
2348 2337 ASSERT(svd->type == MAP_PRIVATE);
2349 2338 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
2350 2339 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
2351 2340 namp->a_szc = seg->s_szc;
2352 2341 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp,
2353 2342 0, btop(seg->s_size), ANON_SLEEP);
2354 2343 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index,
2355 2344 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
2356 2345 anon_release(amp->ahp, btop(amp->size));
2357 2346 svd->anon_index = 0;
2358 2347 nsvd->anon_index = 0;
2359 2348 amp->ahp = nahp;
2360 2349 amp->size = seg->s_size;
2361 2350 nsvd->amp = namp;
2362 2351 }
2363 2352 ANON_LOCK_EXIT(&->a_rwlock);
2364 2353 }
2365 2354 if (svd->swresv) {
2366 2355 if (svd->flags & MAP_NORESERVE) {
2367 2356 ASSERT(amp);
2368 2357 oswresv = svd->swresv;
2369 2358 svd->swresv = ptob(anon_pages(amp->ahp,
2370 2359 svd->anon_index, btop(seg->s_size)));
2371 2360 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
2372 2361 nsvd->anon_index, btop(nseg->s_size)));
2373 2362 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2374 2363 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv),
2375 2364 seg->s_as->a_proc->p_zone);
2376 2365 if (SEG_IS_PARTIAL_RESV(seg))
2377 2366 seg->s_as->a_resvsize -= oswresv -
2378 2367 (svd->swresv + nsvd->swresv);
2379 2368 } else {
2380 2369 size_t unlen;
2381 2370
2382 2371 if (svd->pageswap) {
2383 2372 oswresv = svd->swresv;
2384 2373 svd->swresv = segvn_count_swap_by_vpages(seg);
2385 2374 nsvd->swresv = segvn_count_swap_by_vpages(nseg);
2386 2375 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2387 2376 unlen = oswresv - (svd->swresv + nsvd->swresv);
2388 2377 } else {
2389 2378 if (seg->s_size + nseg->s_size + len !=
2390 2379 svd->swresv) {
2391 2380 panic("segvn_unmap: cannot split "
2392 2381 "swap reservation");
2393 2382 /*NOTREACHED*/
2394 2383 }
2395 2384 svd->swresv = seg->s_size;
2396 2385 nsvd->swresv = nseg->s_size;
2397 2386 unlen = len;
2398 2387 }
2399 2388 anon_unresv_zone(unlen,
2400 2389 seg->s_as->a_proc->p_zone);
2401 2390 }
2402 2391 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2403 2392 seg, len, 0);
2404 2393 }
2405 2394
2406 2395 return (0); /* I'm glad that's all over with! */
2407 2396 }
2408 2397
2409 2398 static void
2410 2399 segvn_free(struct seg *seg)
2411 2400 {
2412 2401 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2413 2402 pgcnt_t npages = seg_pages(seg);
2414 2403 struct anon_map *amp;
2415 2404 size_t len;
2416 2405
2417 2406 /*
2418 2407 * We don't need any segment level locks for "segvn" data
2419 2408 * since the address space is "write" locked.
2420 2409 */
2421 2410 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2422 2411 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2423 2412
2424 2413 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2425 2414
2426 2415 /*
2427 2416 * Be sure to unlock pages. XXX Why do things get free'ed instead
2428 2417 * of unmapped? XXX
2429 2418 */
2430 2419 (void) segvn_lockop(seg, seg->s_base, seg->s_size,
2431 2420 0, MC_UNLOCK, NULL, 0);
2432 2421
2433 2422 /*
2434 2423 * Deallocate the vpage and anon pointers if necessary and possible.
2435 2424 */
2436 2425 if (svd->vpage != NULL) {
2437 2426 kmem_free(svd->vpage, vpgtob(npages));
2438 2427 svd->vpage = NULL;
2439 2428 }
2440 2429 if ((amp = svd->amp) != NULL) {
2441 2430 /*
2442 2431 * If there are no more references to this anon_map
2443 2432 * structure, then deallocate the structure after freeing
2444 2433 * up all the anon slot pointers that we can.
2445 2434 */
2446 2435 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2447 2436 ASSERT(amp->a_szc >= seg->s_szc);
2448 2437 if (--amp->refcnt == 0) {
2449 2438 if (svd->type == MAP_PRIVATE) {
2450 2439 /*
2451 2440 * Private - we only need to anon_free
2452 2441 * the part that this segment refers to.
2453 2442 */
2454 2443 if (seg->s_szc != 0) {
2455 2444 anon_free_pages(amp->ahp,
2456 2445 svd->anon_index, seg->s_size,
2457 2446 seg->s_szc);
2458 2447 } else {
2459 2448 anon_free(amp->ahp, svd->anon_index,
2460 2449 seg->s_size);
2461 2450 }
2462 2451 } else {
2463 2452
2464 2453 /*
2465 2454 * Shared anon map is no longer in use. Before
2466 2455 * freeing its pages purge all entries from
2467 2456 * pcache that belong to this amp.
2468 2457 */
2469 2458 ASSERT(svd->softlockcnt == 0);
2470 2459 anonmap_purge(amp);
2471 2460
2472 2461 /*
2473 2462 * Shared - anon_free the entire
2474 2463 * anon_map's worth of stuff and
2475 2464 * release any swap reservation.
2476 2465 */
2477 2466 if (amp->a_szc != 0) {
2478 2467 anon_shmap_free_pages(amp, 0,
2479 2468 amp->size);
2480 2469 } else {
2481 2470 anon_free(amp->ahp, 0, amp->size);
2482 2471 }
2483 2472 if ((len = amp->swresv) != 0) {
2484 2473 anon_unresv_zone(len,
2485 2474 seg->s_as->a_proc->p_zone);
2486 2475 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2487 2476 "anon proc:%p %lu %u", seg, len, 0);
2488 2477 }
2489 2478 }
2490 2479 svd->amp = NULL;
2491 2480 ANON_LOCK_EXIT(&->a_rwlock);
2492 2481 anonmap_free(amp);
2493 2482 } else if (svd->type == MAP_PRIVATE) {
2494 2483 /*
2495 2484 * We had a private mapping which still has
2496 2485 * a held anon_map so just free up all the
2497 2486 * anon slot pointers that we were using.
2498 2487 */
2499 2488 if (seg->s_szc != 0) {
2500 2489 anon_free_pages(amp->ahp, svd->anon_index,
2501 2490 seg->s_size, seg->s_szc);
2502 2491 } else {
2503 2492 anon_free(amp->ahp, svd->anon_index,
2504 2493 seg->s_size);
2505 2494 }
2506 2495 ANON_LOCK_EXIT(&->a_rwlock);
2507 2496 } else {
2508 2497 ANON_LOCK_EXIT(&->a_rwlock);
2509 2498 }
2510 2499 }
2511 2500
2512 2501 /*
2513 2502 * Release swap reservation.
2514 2503 */
2515 2504 if ((len = svd->swresv) != 0) {
2516 2505 anon_unresv_zone(svd->swresv,
2517 2506 seg->s_as->a_proc->p_zone);
2518 2507 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2519 2508 seg, len, 0);
2520 2509 if (SEG_IS_PARTIAL_RESV(seg))
2521 2510 seg->s_as->a_resvsize -= svd->swresv;
2522 2511 svd->swresv = 0;
2523 2512 }
2524 2513 /*
2525 2514 * Release claim on vnode, credentials, and finally free the
2526 2515 * private data.
2527 2516 */
2528 2517 if (svd->vp != NULL) {
2529 2518 if (svd->type == MAP_SHARED)
2530 2519 lgrp_shm_policy_fini(NULL, svd->vp);
2531 2520 VN_RELE(svd->vp);
2532 2521 svd->vp = NULL;
2533 2522 }
2534 2523 crfree(svd->cred);
2535 2524 svd->pageprot = 0;
2536 2525 svd->pageadvice = 0;
2537 2526 svd->pageswap = 0;
2538 2527 svd->cred = NULL;
2539 2528
2540 2529 /*
2541 2530 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2542 2531 * still working with this segment without holding as lock (in case
2543 2532 * it's called by pcache async thread).
2544 2533 */
2545 2534 ASSERT(svd->softlockcnt == 0);
2546 2535 mutex_enter(&svd->segfree_syncmtx);
2547 2536 mutex_exit(&svd->segfree_syncmtx);
2548 2537
2549 2538 seg->s_data = NULL;
2550 2539 kmem_cache_free(segvn_cache, svd);
2551 2540 }
2552 2541
2553 2542 /*
2554 2543 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2555 2544 * already been F_SOFTLOCK'ed.
2556 2545 * Caller must always match addr and len of a softunlock with a previous
2557 2546 * softlock with exactly the same addr and len.
2558 2547 */
2559 2548 static void
2560 2549 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
2561 2550 {
2562 2551 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2563 2552 page_t *pp;
2564 2553 caddr_t adr;
2565 2554 struct vnode *vp;
2566 2555 u_offset_t offset;
2567 2556 ulong_t anon_index;
2568 2557 struct anon_map *amp;
2569 2558 struct anon *ap = NULL;
2570 2559
2571 2560 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2572 2561 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2573 2562
2574 2563 if ((amp = svd->amp) != NULL)
2575 2564 anon_index = svd->anon_index + seg_page(seg, addr);
2576 2565
2577 2566 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2578 2567 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2579 2568 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2580 2569 } else {
2581 2570 hat_unlock(seg->s_as->a_hat, addr, len);
2582 2571 }
2583 2572 for (adr = addr; adr < addr + len; adr += PAGESIZE) {
2584 2573 if (amp != NULL) {
2585 2574 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2586 2575 if ((ap = anon_get_ptr(amp->ahp, anon_index++))
2587 2576 != NULL) {
2588 2577 swap_xlate(ap, &vp, &offset);
2589 2578 } else {
2590 2579 vp = svd->vp;
2591 2580 offset = svd->offset +
2592 2581 (uintptr_t)(adr - seg->s_base);
2593 2582 }
2594 2583 ANON_LOCK_EXIT(&->a_rwlock);
2595 2584 } else {
2596 2585 vp = svd->vp;
2597 2586 offset = svd->offset +
2598 2587 (uintptr_t)(adr - seg->s_base);
2599 2588 }
2600 2589
2601 2590 /*
2602 2591 * Use page_find() instead of page_lookup() to
2603 2592 * find the page since we know that it is locked.
2604 2593 */
2605 2594 pp = page_find(vp, offset);
2606 2595 if (pp == NULL) {
2607 2596 panic(
2608 2597 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2609 2598 (void *)adr, (void *)ap, (void *)vp, offset);
2610 2599 /*NOTREACHED*/
2611 2600 }
2612 2601
2613 2602 if (rw == S_WRITE) {
2614 2603 hat_setrefmod(pp);
2615 2604 if (seg->s_as->a_vbits)
2616 2605 hat_setstat(seg->s_as, adr, PAGESIZE,
2617 2606 P_REF | P_MOD);
2618 2607 } else if (rw != S_OTHER) {
2619 2608 hat_setref(pp);
2620 2609 if (seg->s_as->a_vbits)
2621 2610 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF);
2622 2611 }
2623 2612 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2624 2613 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset);
2625 2614 page_unlock(pp);
2626 2615 }
2627 2616 ASSERT(svd->softlockcnt >= btop(len));
2628 2617 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) {
2629 2618 /*
2630 2619 * All SOFTLOCKS are gone. Wakeup any waiting
2631 2620 * unmappers so they can try again to unmap.
2632 2621 * Check for waiters first without the mutex
2633 2622 * held so we don't always grab the mutex on
2634 2623 * softunlocks.
2635 2624 */
2636 2625 if (AS_ISUNMAPWAIT(seg->s_as)) {
2637 2626 mutex_enter(&seg->s_as->a_contents);
2638 2627 if (AS_ISUNMAPWAIT(seg->s_as)) {
2639 2628 AS_CLRUNMAPWAIT(seg->s_as);
2640 2629 cv_broadcast(&seg->s_as->a_cv);
2641 2630 }
2642 2631 mutex_exit(&seg->s_as->a_contents);
2643 2632 }
2644 2633 }
2645 2634 }
2646 2635
2647 2636 #define PAGE_HANDLED ((page_t *)-1)
2648 2637
2649 2638 /*
2650 2639 * Release all the pages in the NULL terminated ppp list
2651 2640 * which haven't already been converted to PAGE_HANDLED.
2652 2641 */
2653 2642 static void
2654 2643 segvn_pagelist_rele(page_t **ppp)
2655 2644 {
2656 2645 for (; *ppp != NULL; ppp++) {
2657 2646 if (*ppp != PAGE_HANDLED)
2658 2647 page_unlock(*ppp);
2659 2648 }
2660 2649 }
2661 2650
2662 2651 static int stealcow = 1;
2663 2652
2664 2653 /*
2665 2654 * Workaround for viking chip bug. See bug id 1220902.
2666 2655 * To fix this down in pagefault() would require importing so
2667 2656 * much as and segvn code as to be unmaintainable.
2668 2657 */
2669 2658 int enable_mbit_wa = 0;
2670 2659
2671 2660 /*
2672 2661 * Handles all the dirty work of getting the right
2673 2662 * anonymous pages and loading up the translations.
2674 2663 * This routine is called only from segvn_fault()
2675 2664 * when looping over the range of addresses requested.
2676 2665 *
2677 2666 * The basic algorithm here is:
2678 2667 * If this is an anon_zero case
2679 2668 * Call anon_zero to allocate page
2680 2669 * Load up translation
2681 2670 * Return
2682 2671 * endif
2683 2672 * If this is an anon page
2684 2673 * Use anon_getpage to get the page
2685 2674 * else
2686 2675 * Find page in pl[] list passed in
2687 2676 * endif
2688 2677 * If not a cow
2689 2678 * Load up the translation to the page
2690 2679 * return
2691 2680 * endif
2692 2681 * Call anon_private to handle cow
2693 2682 * Load up (writable) translation to new page
2694 2683 */
2695 2684 static faultcode_t
2696 2685 segvn_faultpage(
2697 2686 struct hat *hat, /* the hat to use for mapping */
2698 2687 struct seg *seg, /* seg_vn of interest */
2699 2688 caddr_t addr, /* address in as */
2700 2689 u_offset_t off, /* offset in vp */
2701 2690 struct vpage *vpage, /* pointer to vpage for vp, off */
2702 2691 page_t *pl[], /* object source page pointer */
2703 2692 uint_t vpprot, /* access allowed to object pages */
2704 2693 enum fault_type type, /* type of fault */
2705 2694 enum seg_rw rw, /* type of access at fault */
2706 2695 int brkcow) /* we may need to break cow */
2707 2696 {
2708 2697 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2709 2698 page_t *pp, **ppp;
2710 2699 uint_t pageflags = 0;
2711 2700 page_t *anon_pl[1 + 1];
2712 2701 page_t *opp = NULL; /* original page */
2713 2702 uint_t prot;
2714 2703 int err;
2715 2704 int cow;
2716 2705 int claim;
2717 2706 int steal = 0;
2718 2707 ulong_t anon_index;
2719 2708 struct anon *ap, *oldap;
2720 2709 struct anon_map *amp;
2721 2710 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
2722 2711 int anon_lock = 0;
2723 2712 anon_sync_obj_t cookie;
2724 2713
2725 2714 if (svd->flags & MAP_TEXT) {
2726 2715 hat_flag |= HAT_LOAD_TEXT;
2727 2716 }
2728 2717
2729 2718 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
2730 2719 ASSERT(seg->s_szc == 0);
2731 2720 ASSERT(svd->tr_state != SEGVN_TR_INIT);
2732 2721
2733 2722 /*
2734 2723 * Initialize protection value for this page.
2735 2724 * If we have per page protection values check it now.
2736 2725 */
2737 2726 if (svd->pageprot) {
2738 2727 uint_t protchk;
2739 2728
2740 2729 switch (rw) {
2741 2730 case S_READ:
2742 2731 protchk = PROT_READ;
2743 2732 break;
2744 2733 case S_WRITE:
2745 2734 protchk = PROT_WRITE;
2746 2735 break;
2747 2736 case S_EXEC:
2748 2737 protchk = PROT_EXEC;
2749 2738 break;
2750 2739 case S_OTHER:
2751 2740 default:
2752 2741 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2753 2742 break;
2754 2743 }
2755 2744
2756 2745 prot = VPP_PROT(vpage);
2757 2746 if ((prot & protchk) == 0)
2758 2747 return (FC_PROT); /* illegal access type */
2759 2748 } else {
2760 2749 prot = svd->prot;
2761 2750 }
2762 2751
2763 2752 if (type == F_SOFTLOCK) {
2764 2753 atomic_inc_ulong((ulong_t *)&svd->softlockcnt);
2765 2754 }
2766 2755
2767 2756 /*
2768 2757 * Always acquire the anon array lock to prevent 2 threads from
2769 2758 * allocating separate anon slots for the same "addr".
2770 2759 */
2771 2760
2772 2761 if ((amp = svd->amp) != NULL) {
2773 2762 ASSERT(RW_READ_HELD(&->a_rwlock));
2774 2763 anon_index = svd->anon_index + seg_page(seg, addr);
2775 2764 anon_array_enter(amp, anon_index, &cookie);
2776 2765 anon_lock = 1;
2777 2766 }
2778 2767
2779 2768 if (svd->vp == NULL && amp != NULL) {
2780 2769 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2781 2770 /*
2782 2771 * Allocate a (normally) writable anonymous page of
2783 2772 * zeroes. If no advance reservations, reserve now.
2784 2773 */
2785 2774 if (svd->flags & MAP_NORESERVE) {
2786 2775 if (anon_resv_zone(ptob(1),
2787 2776 seg->s_as->a_proc->p_zone)) {
2788 2777 atomic_add_long(&svd->swresv, ptob(1));
2789 2778 atomic_add_long(&seg->s_as->a_resvsize,
2790 2779 ptob(1));
2791 2780 } else {
2792 2781 err = ENOMEM;
2793 2782 goto out;
2794 2783 }
2795 2784 }
2796 2785 if ((pp = anon_zero(seg, addr, &ap,
2797 2786 svd->cred)) == NULL) {
2798 2787 err = ENOMEM;
2799 2788 goto out; /* out of swap space */
2800 2789 }
2801 2790 /*
2802 2791 * Re-acquire the anon_map lock and
2803 2792 * initialize the anon array entry.
2804 2793 */
2805 2794 (void) anon_set_ptr(amp->ahp, anon_index, ap,
2806 2795 ANON_SLEEP);
2807 2796
2808 2797 ASSERT(pp->p_szc == 0);
2809 2798
2810 2799 /*
2811 2800 * Handle pages that have been marked for migration
2812 2801 */
2813 2802 if (lgrp_optimizations())
2814 2803 page_migrate(seg, addr, &pp, 1);
2815 2804
2816 2805 if (enable_mbit_wa) {
2817 2806 if (rw == S_WRITE)
2818 2807 hat_setmod(pp);
2819 2808 else if (!hat_ismod(pp))
2820 2809 prot &= ~PROT_WRITE;
2821 2810 }
2822 2811 /*
2823 2812 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2824 2813 * with MC_LOCKAS, MCL_FUTURE) and this is a
2825 2814 * MAP_NORESERVE segment, we may need to
2826 2815 * permanently lock the page as it is being faulted
2827 2816 * for the first time. The following text applies
2828 2817 * only to MAP_NORESERVE segments:
2829 2818 *
2830 2819 * As per memcntl(2), if this segment was created
2831 2820 * after MCL_FUTURE was applied (a "future"
2832 2821 * segment), its pages must be locked. If this
2833 2822 * segment existed at MCL_FUTURE application (a
2834 2823 * "past" segment), the interface is unclear.
2835 2824 *
2836 2825 * We decide to lock only if vpage is present:
2837 2826 *
2838 2827 * - "future" segments will have a vpage array (see
2839 2828 * as_map), and so will be locked as required
2840 2829 *
2841 2830 * - "past" segments may not have a vpage array,
2842 2831 * depending on whether events (such as
2843 2832 * mprotect) have occurred. Locking if vpage
2844 2833 * exists will preserve legacy behavior. Not
2845 2834 * locking if vpage is absent, will not break
2846 2835 * the interface or legacy behavior. Note that
2847 2836 * allocating vpage here if it's absent requires
2848 2837 * upgrading the segvn reader lock, the cost of
2849 2838 * which does not seem worthwhile.
2850 2839 *
2851 2840 * Usually testing and setting VPP_ISPPLOCK and
2852 2841 * VPP_SETPPLOCK requires holding the segvn lock as
2853 2842 * writer, but in this case all readers are
2854 2843 * serializing on the anon array lock.
2855 2844 */
2856 2845 if (AS_ISPGLCK(seg->s_as) && vpage != NULL &&
2857 2846 (svd->flags & MAP_NORESERVE) &&
2858 2847 !VPP_ISPPLOCK(vpage)) {
2859 2848 proc_t *p = seg->s_as->a_proc;
2860 2849 ASSERT(svd->type == MAP_PRIVATE);
2861 2850 mutex_enter(&p->p_lock);
2862 2851 if (rctl_incr_locked_mem(p, NULL, PAGESIZE,
2863 2852 1) == 0) {
2864 2853 claim = VPP_PROT(vpage) & PROT_WRITE;
2865 2854 if (page_pp_lock(pp, claim, 0)) {
2866 2855 VPP_SETPPLOCK(vpage);
2867 2856 } else {
2868 2857 rctl_decr_locked_mem(p, NULL,
2869 2858 PAGESIZE, 1);
2870 2859 }
2871 2860 }
2872 2861 mutex_exit(&p->p_lock);
2873 2862 }
2874 2863
2875 2864 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2876 2865 hat_memload(hat, addr, pp, prot, hat_flag);
2877 2866
2878 2867 if (!(hat_flag & HAT_LOAD_LOCK))
2879 2868 page_unlock(pp);
2880 2869
2881 2870 anon_array_exit(&cookie);
2882 2871 return (0);
2883 2872 }
2884 2873 }
2885 2874
2886 2875 /*
2887 2876 * Obtain the page structure via anon_getpage() if it is
2888 2877 * a private copy of an object (the result of a previous
2889 2878 * copy-on-write).
2890 2879 */
2891 2880 if (amp != NULL) {
2892 2881 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) {
2893 2882 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE,
2894 2883 seg, addr, rw, svd->cred);
2895 2884 if (err)
2896 2885 goto out;
2897 2886
2898 2887 if (svd->type == MAP_SHARED) {
2899 2888 /*
2900 2889 * If this is a shared mapping to an
2901 2890 * anon_map, then ignore the write
2902 2891 * permissions returned by anon_getpage().
2903 2892 * They apply to the private mappings
2904 2893 * of this anon_map.
2905 2894 */
2906 2895 vpprot |= PROT_WRITE;
2907 2896 }
2908 2897 opp = anon_pl[0];
2909 2898 }
2910 2899 }
2911 2900
2912 2901 /*
2913 2902 * Search the pl[] list passed in if it is from the
2914 2903 * original object (i.e., not a private copy).
2915 2904 */
2916 2905 if (opp == NULL) {
2917 2906 /*
2918 2907 * Find original page. We must be bringing it in
2919 2908 * from the list in pl[].
2920 2909 */
2921 2910 for (ppp = pl; (opp = *ppp) != NULL; ppp++) {
2922 2911 if (opp == PAGE_HANDLED)
2923 2912 continue;
2924 2913 ASSERT(opp->p_vnode == svd->vp); /* XXX */
2925 2914 if (opp->p_offset == off)
2926 2915 break;
2927 2916 }
2928 2917 if (opp == NULL) {
2929 2918 panic("segvn_faultpage not found");
2930 2919 /*NOTREACHED*/
2931 2920 }
2932 2921 *ppp = PAGE_HANDLED;
2933 2922
2934 2923 }
2935 2924
2936 2925 ASSERT(PAGE_LOCKED(opp));
2937 2926
2938 2927 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2939 2928 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0);
2940 2929
2941 2930 /*
2942 2931 * The fault is treated as a copy-on-write fault if a
2943 2932 * write occurs on a private segment and the object
2944 2933 * page (i.e., mapping) is write protected. We assume
2945 2934 * that fatal protection checks have already been made.
2946 2935 */
2947 2936
2948 2937 if (brkcow) {
2949 2938 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2950 2939 cow = !(vpprot & PROT_WRITE);
2951 2940 } else if (svd->tr_state == SEGVN_TR_ON) {
2952 2941 /*
2953 2942 * If we are doing text replication COW on first touch.
2954 2943 */
2955 2944 ASSERT(amp != NULL);
2956 2945 ASSERT(svd->vp != NULL);
2957 2946 ASSERT(rw != S_WRITE);
2958 2947 cow = (ap == NULL);
2959 2948 } else {
2960 2949 cow = 0;
2961 2950 }
2962 2951
2963 2952 /*
2964 2953 * If not a copy-on-write case load the translation
2965 2954 * and return.
2966 2955 */
2967 2956 if (cow == 0) {
2968 2957
2969 2958 /*
2970 2959 * Handle pages that have been marked for migration
2971 2960 */
2972 2961 if (lgrp_optimizations())
2973 2962 page_migrate(seg, addr, &opp, 1);
2974 2963
2975 2964 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) {
2976 2965 if (rw == S_WRITE)
2977 2966 hat_setmod(opp);
2978 2967 else if (rw != S_OTHER && !hat_ismod(opp))
2979 2968 prot &= ~PROT_WRITE;
2980 2969 }
2981 2970
2982 2971 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
2983 2972 (!svd->pageprot && svd->prot == (prot & vpprot)));
2984 2973 ASSERT(amp == NULL ||
2985 2974 svd->rcookie == HAT_INVALID_REGION_COOKIE);
2986 2975 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag,
2987 2976 svd->rcookie);
2988 2977
2989 2978 if (!(hat_flag & HAT_LOAD_LOCK))
2990 2979 page_unlock(opp);
2991 2980
2992 2981 if (anon_lock) {
2993 2982 anon_array_exit(&cookie);
2994 2983 }
2995 2984 return (0);
2996 2985 }
2997 2986
2998 2987 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2999 2988
3000 2989 hat_setref(opp);
3001 2990
3002 2991 ASSERT(amp != NULL && anon_lock);
3003 2992
3004 2993 /*
3005 2994 * Steal the page only if it isn't a private page
3006 2995 * since stealing a private page is not worth the effort.
3007 2996 */
3008 2997 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL)
3009 2998 steal = 1;
3010 2999
3011 3000 /*
3012 3001 * Steal the original page if the following conditions are true:
3013 3002 *
3014 3003 * We are low on memory, the page is not private, page is not large,
3015 3004 * not shared, not modified, not `locked' or if we have it `locked'
3016 3005 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
3017 3006 * that the page is not shared) and if it doesn't have any
3018 3007 * translations. page_struct_lock isn't needed to look at p_cowcnt
3019 3008 * and p_lckcnt because we first get exclusive lock on page.
3020 3009 */
3021 3010 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD);
3022 3011
3023 3012 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 &&
3024 3013 page_tryupgrade(opp) && !hat_ismod(opp) &&
3025 3014 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) ||
3026 3015 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 &&
3027 3016 vpage != NULL && VPP_ISPPLOCK(vpage)))) {
3028 3017 /*
3029 3018 * Check if this page has other translations
3030 3019 * after unloading our translation.
3031 3020 */
3032 3021 if (hat_page_is_mapped(opp)) {
3033 3022 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3034 3023 hat_unload(seg->s_as->a_hat, addr, PAGESIZE,
3035 3024 HAT_UNLOAD);
3036 3025 }
3037 3026
3038 3027 /*
3039 3028 * hat_unload() might sync back someone else's recent
3040 3029 * modification, so check again.
3041 3030 */
3042 3031 if (!hat_ismod(opp) && !hat_page_is_mapped(opp))
3043 3032 pageflags |= STEAL_PAGE;
3044 3033 }
3045 3034
3046 3035 /*
3047 3036 * If we have a vpage pointer, see if it indicates that we have
3048 3037 * ``locked'' the page we map -- if so, tell anon_private to
3049 3038 * transfer the locking resource to the new page.
3050 3039 *
3051 3040 * See Statement at the beginning of segvn_lockop regarding
3052 3041 * the way lockcnts/cowcnts are handled during COW.
3053 3042 *
3054 3043 */
3055 3044 if (vpage != NULL && VPP_ISPPLOCK(vpage))
3056 3045 pageflags |= LOCK_PAGE;
3057 3046
3058 3047 /*
3059 3048 * Allocate a private page and perform the copy.
3060 3049 * For MAP_NORESERVE reserve swap space now, unless this
3061 3050 * is a cow fault on an existing anon page in which case
3062 3051 * MAP_NORESERVE will have made advance reservations.
3063 3052 */
3064 3053 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) {
3065 3054 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) {
3066 3055 atomic_add_long(&svd->swresv, ptob(1));
3067 3056 atomic_add_long(&seg->s_as->a_resvsize, ptob(1));
3068 3057 } else {
3069 3058 page_unlock(opp);
3070 3059 err = ENOMEM;
3071 3060 goto out;
3072 3061 }
3073 3062 }
3074 3063 oldap = ap;
3075 3064 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3076 3065 if (pp == NULL) {
3077 3066 err = ENOMEM; /* out of swap space */
3078 3067 goto out;
3079 3068 }
3080 3069
3081 3070 /*
3082 3071 * If we copied away from an anonymous page, then
3083 3072 * we are one step closer to freeing up an anon slot.
3084 3073 *
3085 3074 * NOTE: The original anon slot must be released while
3086 3075 * holding the "anon_map" lock. This is necessary to prevent
3087 3076 * other threads from obtaining a pointer to the anon slot
3088 3077 * which may be freed if its "refcnt" is 1.
3089 3078 */
3090 3079 if (oldap != NULL)
3091 3080 anon_decref(oldap);
3092 3081
3093 3082 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3094 3083
3095 3084 /*
3096 3085 * Handle pages that have been marked for migration
3097 3086 */
3098 3087 if (lgrp_optimizations())
3099 3088 page_migrate(seg, addr, &pp, 1);
3100 3089
3101 3090 ASSERT(pp->p_szc == 0);
3102 3091
3103 3092 ASSERT(!IS_VMODSORT(pp->p_vnode));
3104 3093 if (enable_mbit_wa) {
3105 3094 if (rw == S_WRITE)
3106 3095 hat_setmod(pp);
3107 3096 else if (!hat_ismod(pp))
3108 3097 prot &= ~PROT_WRITE;
3109 3098 }
3110 3099
3111 3100 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3112 3101 hat_memload(hat, addr, pp, prot, hat_flag);
3113 3102
3114 3103 if (!(hat_flag & HAT_LOAD_LOCK))
3115 3104 page_unlock(pp);
3116 3105
3117 3106 ASSERT(anon_lock);
3118 3107 anon_array_exit(&cookie);
3119 3108 return (0);
3120 3109 out:
3121 3110 if (anon_lock)
3122 3111 anon_array_exit(&cookie);
3123 3112
3124 3113 if (type == F_SOFTLOCK) {
3125 3114 atomic_dec_ulong((ulong_t *)&svd->softlockcnt);
3126 3115 }
3127 3116 return (FC_MAKE_ERR(err));
3128 3117 }
3129 3118
3130 3119 /*
3131 3120 * relocate a bunch of smaller targ pages into one large repl page. all targ
3132 3121 * pages must be complete pages smaller than replacement pages.
3133 3122 * it's assumed that no page's szc can change since they are all PAGESIZE or
3134 3123 * complete large pages locked SHARED.
3135 3124 */
3136 3125 static void
3137 3126 segvn_relocate_pages(page_t **targ, page_t *replacement)
3138 3127 {
3139 3128 page_t *pp;
3140 3129 pgcnt_t repl_npgs, curnpgs;
3141 3130 pgcnt_t i;
3142 3131 uint_t repl_szc = replacement->p_szc;
3143 3132 page_t *first_repl = replacement;
3144 3133 page_t *repl;
3145 3134 spgcnt_t npgs;
3146 3135
3147 3136 VM_STAT_ADD(segvnvmstats.relocatepages[0]);
3148 3137
3149 3138 ASSERT(repl_szc != 0);
3150 3139 npgs = repl_npgs = page_get_pagecnt(repl_szc);
3151 3140
3152 3141 i = 0;
3153 3142 while (repl_npgs) {
3154 3143 spgcnt_t nreloc;
3155 3144 int err;
3156 3145 ASSERT(replacement != NULL);
3157 3146 pp = targ[i];
3158 3147 ASSERT(pp->p_szc < repl_szc);
3159 3148 ASSERT(PAGE_EXCL(pp));
3160 3149 ASSERT(!PP_ISFREE(pp));
3161 3150 curnpgs = page_get_pagecnt(pp->p_szc);
3162 3151 if (curnpgs == 1) {
3163 3152 VM_STAT_ADD(segvnvmstats.relocatepages[1]);
3164 3153 repl = replacement;
3165 3154 page_sub(&replacement, repl);
3166 3155 ASSERT(PAGE_EXCL(repl));
3167 3156 ASSERT(!PP_ISFREE(repl));
3168 3157 ASSERT(repl->p_szc == repl_szc);
3169 3158 } else {
3170 3159 page_t *repl_savepp;
3171 3160 int j;
3172 3161 VM_STAT_ADD(segvnvmstats.relocatepages[2]);
3173 3162 repl_savepp = replacement;
3174 3163 for (j = 0; j < curnpgs; j++) {
3175 3164 repl = replacement;
3176 3165 page_sub(&replacement, repl);
3177 3166 ASSERT(PAGE_EXCL(repl));
3178 3167 ASSERT(!PP_ISFREE(repl));
3179 3168 ASSERT(repl->p_szc == repl_szc);
3180 3169 ASSERT(page_pptonum(targ[i + j]) ==
3181 3170 page_pptonum(targ[i]) + j);
3182 3171 }
3183 3172 repl = repl_savepp;
3184 3173 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs));
3185 3174 }
3186 3175 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL);
3187 3176 if (err || nreloc != curnpgs) {
3188 3177 panic("segvn_relocate_pages: "
3189 3178 "page_relocate failed err=%d curnpgs=%ld "
3190 3179 "nreloc=%ld", err, curnpgs, nreloc);
3191 3180 }
3192 3181 ASSERT(curnpgs <= repl_npgs);
3193 3182 repl_npgs -= curnpgs;
3194 3183 i += curnpgs;
3195 3184 }
3196 3185 ASSERT(replacement == NULL);
3197 3186
3198 3187 repl = first_repl;
3199 3188 repl_npgs = npgs;
3200 3189 for (i = 0; i < repl_npgs; i++) {
3201 3190 ASSERT(PAGE_EXCL(repl));
3202 3191 ASSERT(!PP_ISFREE(repl));
3203 3192 targ[i] = repl;
3204 3193 page_downgrade(targ[i]);
3205 3194 repl++;
3206 3195 }
3207 3196 }
3208 3197
3209 3198 /*
3210 3199 * Check if all pages in ppa array are complete smaller than szc pages and
3211 3200 * their roots will still be aligned relative to their current size if the
3212 3201 * entire ppa array is relocated into one szc page. If these conditions are
3213 3202 * not met return 0.
3214 3203 *
3215 3204 * If all pages are properly aligned attempt to upgrade their locks
3216 3205 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3217 3206 * upgrdfail was set to 0 by caller.
3218 3207 *
3219 3208 * Return 1 if all pages are aligned and locked exclusively.
3220 3209 *
3221 3210 * If all pages in ppa array happen to be physically contiguous to make one
3222 3211 * szc page and all exclusive locks are successfully obtained promote the page
3223 3212 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3224 3213 */
3225 3214 static int
3226 3215 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)
3227 3216 {
3228 3217 page_t *pp;
3229 3218 pfn_t pfn;
3230 3219 pgcnt_t totnpgs = page_get_pagecnt(szc);
3231 3220 pfn_t first_pfn;
3232 3221 int contig = 1;
3233 3222 pgcnt_t i;
3234 3223 pgcnt_t j;
3235 3224 uint_t curszc;
3236 3225 pgcnt_t curnpgs;
3237 3226 int root = 0;
3238 3227
3239 3228 ASSERT(szc > 0);
3240 3229
3241 3230 VM_STAT_ADD(segvnvmstats.fullszcpages[0]);
3242 3231
3243 3232 for (i = 0; i < totnpgs; i++) {
3244 3233 pp = ppa[i];
3245 3234 ASSERT(PAGE_SHARED(pp));
3246 3235 ASSERT(!PP_ISFREE(pp));
3247 3236 pfn = page_pptonum(pp);
3248 3237 if (i == 0) {
3249 3238 if (!IS_P2ALIGNED(pfn, totnpgs)) {
3250 3239 contig = 0;
3251 3240 } else {
3252 3241 first_pfn = pfn;
3253 3242 }
3254 3243 } else if (contig && pfn != first_pfn + i) {
3255 3244 contig = 0;
3256 3245 }
3257 3246 if (pp->p_szc == 0) {
3258 3247 if (root) {
3259 3248 VM_STAT_ADD(segvnvmstats.fullszcpages[1]);
3260 3249 return (0);
3261 3250 }
3262 3251 } else if (!root) {
3263 3252 if ((curszc = pp->p_szc) >= szc) {
3264 3253 VM_STAT_ADD(segvnvmstats.fullszcpages[2]);
3265 3254 return (0);
3266 3255 }
3267 3256 if (curszc == 0) {
3268 3257 /*
3269 3258 * p_szc changed means we don't have all pages
3270 3259 * locked. return failure.
3271 3260 */
3272 3261 VM_STAT_ADD(segvnvmstats.fullszcpages[3]);
3273 3262 return (0);
3274 3263 }
3275 3264 curnpgs = page_get_pagecnt(curszc);
3276 3265 if (!IS_P2ALIGNED(pfn, curnpgs) ||
3277 3266 !IS_P2ALIGNED(i, curnpgs)) {
3278 3267 VM_STAT_ADD(segvnvmstats.fullszcpages[4]);
3279 3268 return (0);
3280 3269 }
3281 3270 root = 1;
3282 3271 } else {
3283 3272 ASSERT(i > 0);
3284 3273 VM_STAT_ADD(segvnvmstats.fullszcpages[5]);
3285 3274 if (pp->p_szc != curszc) {
3286 3275 VM_STAT_ADD(segvnvmstats.fullszcpages[6]);
3287 3276 return (0);
3288 3277 }
3289 3278 if (pfn - 1 != page_pptonum(ppa[i - 1])) {
3290 3279 panic("segvn_full_szcpages: "
3291 3280 "large page not physically contiguous");
3292 3281 }
3293 3282 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) {
3294 3283 root = 0;
3295 3284 }
3296 3285 }
3297 3286 }
3298 3287
3299 3288 for (i = 0; i < totnpgs; i++) {
3300 3289 ASSERT(ppa[i]->p_szc < szc);
3301 3290 if (!page_tryupgrade(ppa[i])) {
3302 3291 for (j = 0; j < i; j++) {
3303 3292 page_downgrade(ppa[j]);
3304 3293 }
3305 3294 *pszc = ppa[i]->p_szc;
3306 3295 *upgrdfail = 1;
3307 3296 VM_STAT_ADD(segvnvmstats.fullszcpages[7]);
3308 3297 return (0);
3309 3298 }
3310 3299 }
3311 3300
3312 3301 /*
3313 3302 * When a page is put a free cachelist its szc is set to 0. if file
3314 3303 * system reclaimed pages from cachelist targ pages will be physically
3315 3304 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3316 3305 * pages without any relocations.
3317 3306 * To avoid any hat issues with previous small mappings
3318 3307 * hat_pageunload() the target pages first.
3319 3308 */
3320 3309 if (contig) {
3321 3310 VM_STAT_ADD(segvnvmstats.fullszcpages[8]);
3322 3311 for (i = 0; i < totnpgs; i++) {
3323 3312 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD);
3324 3313 }
3325 3314 for (i = 0; i < totnpgs; i++) {
3326 3315 ppa[i]->p_szc = szc;
3327 3316 }
3328 3317 for (i = 0; i < totnpgs; i++) {
3329 3318 ASSERT(PAGE_EXCL(ppa[i]));
3330 3319 page_downgrade(ppa[i]);
3331 3320 }
3332 3321 if (pszc != NULL) {
3333 3322 *pszc = szc;
3334 3323 }
3335 3324 }
3336 3325 VM_STAT_ADD(segvnvmstats.fullszcpages[9]);
3337 3326 return (1);
3338 3327 }
3339 3328
3340 3329 /*
3341 3330 * Create physically contiguous pages for [vp, off] - [vp, off +
3342 3331 * page_size(szc)) range and for private segment return them in ppa array.
3343 3332 * Pages are created either via IO or relocations.
3344 3333 *
3345 3334 * Return 1 on success and 0 on failure.
3346 3335 *
3347 3336 * If physically contiguous pages already exist for this range return 1 without
3348 3337 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3349 3338 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3350 3339 */
3351 3340
3352 3341 static int
3353 3342 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
3354 3343 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc,
3355 3344 int *downsize)
3356 3345
3357 3346 {
3358 3347 page_t *pplist = *ppplist;
3359 3348 size_t pgsz = page_get_pagesize(szc);
3360 3349 pgcnt_t pages = btop(pgsz);
3361 3350 ulong_t start_off = off;
3362 3351 u_offset_t eoff = off + pgsz;
3363 3352 spgcnt_t nreloc;
3364 3353 u_offset_t io_off = off;
3365 3354 size_t io_len;
3366 3355 page_t *io_pplist = NULL;
3367 3356 page_t *done_pplist = NULL;
3368 3357 pgcnt_t pgidx = 0;
3369 3358 page_t *pp;
3370 3359 page_t *newpp;
3371 3360 page_t *targpp;
3372 3361 int io_err = 0;
3373 3362 int i;
3374 3363 pfn_t pfn;
3375 3364 ulong_t ppages;
3376 3365 page_t *targ_pplist = NULL;
3377 3366 page_t *repl_pplist = NULL;
3378 3367 page_t *tmp_pplist;
3379 3368 int nios = 0;
3380 3369 uint_t pszc;
3381 3370 struct vattr va;
3382 3371
3383 3372 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]);
3384 3373
3385 3374 ASSERT(szc != 0);
3386 3375 ASSERT(pplist->p_szc == szc);
3387 3376
3388 3377 /*
3389 3378 * downsize will be set to 1 only if we fail to lock pages. this will
3390 3379 * allow subsequent faults to try to relocate the page again. If we
3391 3380 * fail due to misalignment don't downsize and let the caller map the
3392 3381 * whole region with small mappings to avoid more faults into the area
3393 3382 * where we can't get large pages anyway.
3394 3383 */
3395 3384 *downsize = 0;
3396 3385
3397 3386 while (off < eoff) {
3398 3387 newpp = pplist;
3399 3388 ASSERT(newpp != NULL);
3400 3389 ASSERT(PAGE_EXCL(newpp));
3401 3390 ASSERT(!PP_ISFREE(newpp));
3402 3391 /*
3403 3392 * we pass NULL for nrelocp to page_lookup_create()
3404 3393 * so that it doesn't relocate. We relocate here
3405 3394 * later only after we make sure we can lock all
3406 3395 * pages in the range we handle and they are all
3407 3396 * aligned.
3408 3397 */
3409 3398 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0);
3410 3399 ASSERT(pp != NULL);
3411 3400 ASSERT(!PP_ISFREE(pp));
3412 3401 ASSERT(pp->p_vnode == vp);
3413 3402 ASSERT(pp->p_offset == off);
3414 3403 if (pp == newpp) {
3415 3404 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]);
3416 3405 page_sub(&pplist, pp);
3417 3406 ASSERT(PAGE_EXCL(pp));
3418 3407 ASSERT(page_iolock_assert(pp));
3419 3408 page_list_concat(&io_pplist, &pp);
3420 3409 off += PAGESIZE;
3421 3410 continue;
3422 3411 }
3423 3412 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]);
3424 3413 pfn = page_pptonum(pp);
3425 3414 pszc = pp->p_szc;
3426 3415 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL &&
3427 3416 IS_P2ALIGNED(pfn, pages)) {
3428 3417 ASSERT(repl_pplist == NULL);
3429 3418 ASSERT(done_pplist == NULL);
3430 3419 ASSERT(pplist == *ppplist);
3431 3420 page_unlock(pp);
3432 3421 page_free_replacement_page(pplist);
3433 3422 page_create_putback(pages);
3434 3423 *ppplist = NULL;
3435 3424 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]);
3436 3425 return (1);
3437 3426 }
3438 3427 if (pszc >= szc) {
3439 3428 page_unlock(pp);
3440 3429 segvn_faultvnmpss_align_err1++;
3441 3430 goto out;
3442 3431 }
3443 3432 ppages = page_get_pagecnt(pszc);
3444 3433 if (!IS_P2ALIGNED(pfn, ppages)) {
3445 3434 ASSERT(pszc > 0);
3446 3435 /*
3447 3436 * sizing down to pszc won't help.
3448 3437 */
3449 3438 page_unlock(pp);
3450 3439 segvn_faultvnmpss_align_err2++;
3451 3440 goto out;
3452 3441 }
3453 3442 pfn = page_pptonum(newpp);
3454 3443 if (!IS_P2ALIGNED(pfn, ppages)) {
3455 3444 ASSERT(pszc > 0);
3456 3445 /*
3457 3446 * sizing down to pszc won't help.
3458 3447 */
3459 3448 page_unlock(pp);
3460 3449 segvn_faultvnmpss_align_err3++;
3461 3450 goto out;
3462 3451 }
3463 3452 if (!PAGE_EXCL(pp)) {
3464 3453 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]);
3465 3454 page_unlock(pp);
3466 3455 *downsize = 1;
3467 3456 *ret_pszc = pp->p_szc;
3468 3457 goto out;
3469 3458 }
3470 3459 targpp = pp;
3471 3460 if (io_pplist != NULL) {
3472 3461 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]);
3473 3462 io_len = off - io_off;
3474 3463 /*
3475 3464 * Some file systems like NFS don't check EOF
3476 3465 * conditions in VOP_PAGEIO(). Check it here
3477 3466 * now that pages are locked SE_EXCL. Any file
3478 3467 * truncation will wait until the pages are
3479 3468 * unlocked so no need to worry that file will
3480 3469 * be truncated after we check its size here.
3481 3470 * XXX fix NFS to remove this check.
3482 3471 */
3483 3472 va.va_mask = AT_SIZE;
3484 3473 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) {
3485 3474 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]);
3486 3475 page_unlock(targpp);
3487 3476 goto out;
3488 3477 }
3489 3478 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3490 3479 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]);
3491 3480 *downsize = 1;
3492 3481 *ret_pszc = 0;
3493 3482 page_unlock(targpp);
3494 3483 goto out;
3495 3484 }
3496 3485 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3497 3486 B_READ, svd->cred, NULL);
3498 3487 if (io_err) {
3499 3488 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]);
3500 3489 page_unlock(targpp);
3501 3490 if (io_err == EDEADLK) {
3502 3491 segvn_vmpss_pageio_deadlk_err++;
3503 3492 }
3504 3493 goto out;
3505 3494 }
3506 3495 nios++;
3507 3496 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]);
3508 3497 while (io_pplist != NULL) {
3509 3498 pp = io_pplist;
3510 3499 page_sub(&io_pplist, pp);
3511 3500 ASSERT(page_iolock_assert(pp));
3512 3501 page_io_unlock(pp);
3513 3502 pgidx = (pp->p_offset - start_off) >>
3514 3503 PAGESHIFT;
3515 3504 ASSERT(pgidx < pages);
3516 3505 ppa[pgidx] = pp;
3517 3506 page_list_concat(&done_pplist, &pp);
3518 3507 }
3519 3508 }
3520 3509 pp = targpp;
3521 3510 ASSERT(PAGE_EXCL(pp));
3522 3511 ASSERT(pp->p_szc <= pszc);
3523 3512 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) {
3524 3513 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]);
3525 3514 page_unlock(pp);
3526 3515 *downsize = 1;
3527 3516 *ret_pszc = pp->p_szc;
3528 3517 goto out;
3529 3518 }
3530 3519 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]);
3531 3520 /*
3532 3521 * page szc chould have changed before the entire group was
3533 3522 * locked. reread page szc.
3534 3523 */
3535 3524 pszc = pp->p_szc;
3536 3525 ppages = page_get_pagecnt(pszc);
3537 3526
3538 3527 /* link just the roots */
3539 3528 page_list_concat(&targ_pplist, &pp);
3540 3529 page_sub(&pplist, newpp);
3541 3530 page_list_concat(&repl_pplist, &newpp);
3542 3531 off += PAGESIZE;
3543 3532 while (--ppages != 0) {
3544 3533 newpp = pplist;
3545 3534 page_sub(&pplist, newpp);
3546 3535 off += PAGESIZE;
3547 3536 }
3548 3537 io_off = off;
3549 3538 }
3550 3539 if (io_pplist != NULL) {
3551 3540 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]);
3552 3541 io_len = eoff - io_off;
3553 3542 va.va_mask = AT_SIZE;
3554 3543 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) {
3555 3544 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]);
3556 3545 goto out;
3557 3546 }
3558 3547 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3559 3548 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]);
3560 3549 *downsize = 1;
3561 3550 *ret_pszc = 0;
3562 3551 goto out;
3563 3552 }
3564 3553 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3565 3554 B_READ, svd->cred, NULL);
3566 3555 if (io_err) {
3567 3556 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]);
3568 3557 if (io_err == EDEADLK) {
3569 3558 segvn_vmpss_pageio_deadlk_err++;
3570 3559 }
3571 3560 goto out;
3572 3561 }
3573 3562 nios++;
3574 3563 while (io_pplist != NULL) {
3575 3564 pp = io_pplist;
3576 3565 page_sub(&io_pplist, pp);
3577 3566 ASSERT(page_iolock_assert(pp));
3578 3567 page_io_unlock(pp);
3579 3568 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3580 3569 ASSERT(pgidx < pages);
3581 3570 ppa[pgidx] = pp;
3582 3571 }
3583 3572 }
3584 3573 /*
3585 3574 * we're now bound to succeed or panic.
3586 3575 * remove pages from done_pplist. it's not needed anymore.
3587 3576 */
3588 3577 while (done_pplist != NULL) {
3589 3578 pp = done_pplist;
3590 3579 page_sub(&done_pplist, pp);
3591 3580 }
3592 3581 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]);
3593 3582 ASSERT(pplist == NULL);
3594 3583 *ppplist = NULL;
3595 3584 while (targ_pplist != NULL) {
3596 3585 int ret;
3597 3586 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]);
3598 3587 ASSERT(repl_pplist);
3599 3588 pp = targ_pplist;
3600 3589 page_sub(&targ_pplist, pp);
3601 3590 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3602 3591 newpp = repl_pplist;
3603 3592 page_sub(&repl_pplist, newpp);
3604 3593 #ifdef DEBUG
3605 3594 pfn = page_pptonum(pp);
3606 3595 pszc = pp->p_szc;
3607 3596 ppages = page_get_pagecnt(pszc);
3608 3597 ASSERT(IS_P2ALIGNED(pfn, ppages));
3609 3598 pfn = page_pptonum(newpp);
3610 3599 ASSERT(IS_P2ALIGNED(pfn, ppages));
3611 3600 ASSERT(P2PHASE(pfn, pages) == pgidx);
3612 3601 #endif
3613 3602 nreloc = 0;
3614 3603 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL);
3615 3604 if (ret != 0 || nreloc == 0) {
3616 3605 panic("segvn_fill_vp_pages: "
3617 3606 "page_relocate failed");
3618 3607 }
3619 3608 pp = newpp;
3620 3609 while (nreloc-- != 0) {
3621 3610 ASSERT(PAGE_EXCL(pp));
3622 3611 ASSERT(pp->p_vnode == vp);
3623 3612 ASSERT(pgidx ==
3624 3613 ((pp->p_offset - start_off) >> PAGESHIFT));
3625 3614 ppa[pgidx++] = pp;
3626 3615 pp++;
3627 3616 }
3628 3617 }
3629 3618
3630 3619 if (svd->type == MAP_PRIVATE) {
3631 3620 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]);
3632 3621 for (i = 0; i < pages; i++) {
3633 3622 ASSERT(ppa[i] != NULL);
3634 3623 ASSERT(PAGE_EXCL(ppa[i]));
3635 3624 ASSERT(ppa[i]->p_vnode == vp);
3636 3625 ASSERT(ppa[i]->p_offset ==
3637 3626 start_off + (i << PAGESHIFT));
3638 3627 page_downgrade(ppa[i]);
3639 3628 }
3640 3629 ppa[pages] = NULL;
3641 3630 } else {
3642 3631 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]);
3643 3632 /*
3644 3633 * the caller will still call VOP_GETPAGE() for shared segments
3645 3634 * to check FS write permissions. For private segments we map
3646 3635 * file read only anyway. so no VOP_GETPAGE is needed.
3647 3636 */
3648 3637 for (i = 0; i < pages; i++) {
3649 3638 ASSERT(ppa[i] != NULL);
3650 3639 ASSERT(PAGE_EXCL(ppa[i]));
3651 3640 ASSERT(ppa[i]->p_vnode == vp);
3652 3641 ASSERT(ppa[i]->p_offset ==
3653 3642 start_off + (i << PAGESHIFT));
3654 3643 page_unlock(ppa[i]);
3655 3644 }
3656 3645 ppa[0] = NULL;
3657 3646 }
3658 3647
3659 3648 return (1);
3660 3649 out:
3661 3650 /*
3662 3651 * Do the cleanup. Unlock target pages we didn't relocate. They are
3663 3652 * linked on targ_pplist by root pages. reassemble unused replacement
3664 3653 * and io pages back to pplist.
3665 3654 */
3666 3655 if (io_pplist != NULL) {
3667 3656 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]);
3668 3657 pp = io_pplist;
3669 3658 do {
3670 3659 ASSERT(pp->p_vnode == vp);
3671 3660 ASSERT(pp->p_offset == io_off);
3672 3661 ASSERT(page_iolock_assert(pp));
3673 3662 page_io_unlock(pp);
3674 3663 page_hashout(pp, NULL);
3675 3664 io_off += PAGESIZE;
3676 3665 } while ((pp = pp->p_next) != io_pplist);
3677 3666 page_list_concat(&io_pplist, &pplist);
3678 3667 pplist = io_pplist;
3679 3668 }
3680 3669 tmp_pplist = NULL;
3681 3670 while (targ_pplist != NULL) {
3682 3671 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]);
3683 3672 pp = targ_pplist;
3684 3673 ASSERT(PAGE_EXCL(pp));
3685 3674 page_sub(&targ_pplist, pp);
3686 3675
3687 3676 pszc = pp->p_szc;
3688 3677 ppages = page_get_pagecnt(pszc);
3689 3678 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3690 3679
3691 3680 if (pszc != 0) {
3692 3681 group_page_unlock(pp);
3693 3682 }
3694 3683 page_unlock(pp);
3695 3684
3696 3685 pp = repl_pplist;
3697 3686 ASSERT(pp != NULL);
3698 3687 ASSERT(PAGE_EXCL(pp));
3699 3688 ASSERT(pp->p_szc == szc);
3700 3689 page_sub(&repl_pplist, pp);
3701 3690
3702 3691 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3703 3692
3704 3693 /* relink replacement page */
3705 3694 page_list_concat(&tmp_pplist, &pp);
3706 3695 while (--ppages != 0) {
3707 3696 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]);
3708 3697 pp++;
3709 3698 ASSERT(PAGE_EXCL(pp));
3710 3699 ASSERT(pp->p_szc == szc);
3711 3700 page_list_concat(&tmp_pplist, &pp);
3712 3701 }
3713 3702 }
3714 3703 if (tmp_pplist != NULL) {
3715 3704 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]);
3716 3705 page_list_concat(&tmp_pplist, &pplist);
3717 3706 pplist = tmp_pplist;
3718 3707 }
3719 3708 /*
3720 3709 * at this point all pages are either on done_pplist or
3721 3710 * pplist. They can't be all on done_pplist otherwise
3722 3711 * we'd've been done.
3723 3712 */
3724 3713 ASSERT(pplist != NULL);
3725 3714 if (nios != 0) {
3726 3715 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]);
3727 3716 pp = pplist;
3728 3717 do {
3729 3718 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]);
3730 3719 ASSERT(pp->p_szc == szc);
3731 3720 ASSERT(PAGE_EXCL(pp));
3732 3721 ASSERT(pp->p_vnode != vp);
3733 3722 pp->p_szc = 0;
3734 3723 } while ((pp = pp->p_next) != pplist);
3735 3724
3736 3725 pp = done_pplist;
3737 3726 do {
3738 3727 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]);
3739 3728 ASSERT(pp->p_szc == szc);
3740 3729 ASSERT(PAGE_EXCL(pp));
3741 3730 ASSERT(pp->p_vnode == vp);
3742 3731 pp->p_szc = 0;
3743 3732 } while ((pp = pp->p_next) != done_pplist);
3744 3733
3745 3734 while (pplist != NULL) {
3746 3735 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]);
3747 3736 pp = pplist;
3748 3737 page_sub(&pplist, pp);
3749 3738 page_free(pp, 0);
3750 3739 }
3751 3740
3752 3741 while (done_pplist != NULL) {
3753 3742 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]);
3754 3743 pp = done_pplist;
3755 3744 page_sub(&done_pplist, pp);
3756 3745 page_unlock(pp);
3757 3746 }
3758 3747 *ppplist = NULL;
3759 3748 return (0);
3760 3749 }
3761 3750 ASSERT(pplist == *ppplist);
3762 3751 if (io_err) {
3763 3752 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]);
3764 3753 /*
3765 3754 * don't downsize on io error.
3766 3755 * see if vop_getpage succeeds.
3767 3756 * pplist may still be used in this case
3768 3757 * for relocations.
3769 3758 */
3770 3759 return (0);
3771 3760 }
3772 3761 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]);
3773 3762 page_free_replacement_page(pplist);
3774 3763 page_create_putback(pages);
3775 3764 *ppplist = NULL;
3776 3765 return (0);
3777 3766 }
3778 3767
3779 3768 int segvn_anypgsz = 0;
3780 3769
3781 3770 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3782 3771 if ((type) == F_SOFTLOCK) { \
3783 3772 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3784 3773 -(pages)); \
3785 3774 }
3786 3775
3787 3776 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3788 3777 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3789 3778 if ((rw) == S_WRITE) { \
3790 3779 for (i = 0; i < (pages); i++) { \
3791 3780 ASSERT((ppa)[i]->p_vnode == \
3792 3781 (ppa)[0]->p_vnode); \
3793 3782 hat_setmod((ppa)[i]); \
3794 3783 } \
3795 3784 } else if ((rw) != S_OTHER && \
3796 3785 ((prot) & (vpprot) & PROT_WRITE)) { \
3797 3786 for (i = 0; i < (pages); i++) { \
3798 3787 ASSERT((ppa)[i]->p_vnode == \
3799 3788 (ppa)[0]->p_vnode); \
3800 3789 if (!hat_ismod((ppa)[i])) { \
3801 3790 prot &= ~PROT_WRITE; \
3802 3791 break; \
3803 3792 } \
3804 3793 } \
3805 3794 } \
3806 3795 }
3807 3796
3808 3797 #ifdef VM_STATS
3809 3798
3810 3799 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3811 3800 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3812 3801
3813 3802 #else /* VM_STATS */
3814 3803
3815 3804 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3816 3805
3817 3806 #endif
3818 3807
3819 3808 static faultcode_t
3820 3809 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
3821 3810 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
3822 3811 caddr_t eaddr, int brkcow)
3823 3812 {
3824 3813 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
3825 3814 struct anon_map *amp = svd->amp;
3826 3815 uchar_t segtype = svd->type;
3827 3816 uint_t szc = seg->s_szc;
3828 3817 size_t pgsz = page_get_pagesize(szc);
3829 3818 size_t maxpgsz = pgsz;
3830 3819 pgcnt_t pages = btop(pgsz);
3831 3820 pgcnt_t maxpages = pages;
3832 3821 size_t ppasize = (pages + 1) * sizeof (page_t *);
3833 3822 caddr_t a = lpgaddr;
3834 3823 caddr_t maxlpgeaddr = lpgeaddr;
3835 3824 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3836 3825 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3837 3826 struct vpage *vpage = (svd->vpage != NULL) ?
3838 3827 &svd->vpage[seg_page(seg, a)] : NULL;
3839 3828 vnode_t *vp = svd->vp;
3840 3829 page_t **ppa;
3841 3830 uint_t pszc;
3842 3831 size_t ppgsz;
3843 3832 pgcnt_t ppages;
3844 3833 faultcode_t err = 0;
3845 3834 int ierr;
3846 3835 int vop_size_err = 0;
3847 3836 uint_t protchk, prot, vpprot;
3848 3837 ulong_t i;
3849 3838 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3850 3839 anon_sync_obj_t an_cookie;
3851 3840 enum seg_rw arw;
3852 3841 int alloc_failed = 0;
3853 3842 int adjszc_chk;
3854 3843 struct vattr va;
3855 3844 page_t *pplist;
3856 3845 pfn_t pfn;
3857 3846 int physcontig;
3858 3847 int upgrdfail;
3859 3848 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3860 3849 int tron = (svd->tr_state == SEGVN_TR_ON);
3861 3850
3862 3851 ASSERT(szc != 0);
3863 3852 ASSERT(vp != NULL);
3864 3853 ASSERT(brkcow == 0 || amp != NULL);
3865 3854 ASSERT(tron == 0 || amp != NULL);
3866 3855 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3867 3856 ASSERT(!(svd->flags & MAP_NORESERVE));
3868 3857 ASSERT(type != F_SOFTUNLOCK);
3869 3858 ASSERT(IS_P2ALIGNED(a, maxpgsz));
3870 3859 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3871 3860 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3872 3861 ASSERT(seg->s_szc < NBBY * sizeof (int));
3873 3862 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3874 3863 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3875 3864
3876 3865 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]);
3877 3866 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]);
3878 3867
3879 3868 if (svd->flags & MAP_TEXT) {
3880 3869 hat_flag |= HAT_LOAD_TEXT;
3881 3870 }
3882 3871
3883 3872 if (svd->pageprot) {
3884 3873 switch (rw) {
3885 3874 case S_READ:
3886 3875 protchk = PROT_READ;
3887 3876 break;
3888 3877 case S_WRITE:
3889 3878 protchk = PROT_WRITE;
3890 3879 break;
3891 3880 case S_EXEC:
3892 3881 protchk = PROT_EXEC;
3893 3882 break;
3894 3883 case S_OTHER:
3895 3884 default:
3896 3885 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3897 3886 break;
3898 3887 }
3899 3888 } else {
3900 3889 prot = svd->prot;
3901 3890 /* caller has already done segment level protection check. */
3902 3891 }
3903 3892
3904 3893 if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3905 3894 SEGVN_VMSTAT_FLTVNPAGES(2);
3906 3895 arw = S_READ;
3907 3896 } else {
3908 3897 arw = rw;
3909 3898 }
3910 3899
3911 3900 ppa = kmem_alloc(ppasize, KM_SLEEP);
3912 3901
3913 3902 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3914 3903
3915 3904 for (;;) {
3916 3905 adjszc_chk = 0;
3917 3906 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3918 3907 if (adjszc_chk) {
3919 3908 while (szc < seg->s_szc) {
3920 3909 uintptr_t e;
3921 3910 uint_t tszc;
3922 3911 tszc = segvn_anypgsz_vnode ? szc + 1 :
3923 3912 seg->s_szc;
3924 3913 ppgsz = page_get_pagesize(tszc);
3925 3914 if (!IS_P2ALIGNED(a, ppgsz) ||
3926 3915 ((alloc_failed >> tszc) & 0x1)) {
3927 3916 break;
3928 3917 }
3929 3918 SEGVN_VMSTAT_FLTVNPAGES(4);
3930 3919 szc = tszc;
3931 3920 pgsz = ppgsz;
3932 3921 pages = btop(pgsz);
3933 3922 e = P2ROUNDUP((uintptr_t)eaddr, pgsz);
3934 3923 lpgeaddr = (caddr_t)e;
3935 3924 }
3936 3925 }
3937 3926
3938 3927 again:
3939 3928 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) {
3940 3929 ASSERT(IS_P2ALIGNED(aindx, maxpages));
3941 3930 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3942 3931 anon_array_enter(amp, aindx, &an_cookie);
3943 3932 if (anon_get_ptr(amp->ahp, aindx) != NULL) {
3944 3933 SEGVN_VMSTAT_FLTVNPAGES(5);
3945 3934 ASSERT(anon_pages(amp->ahp, aindx,
3946 3935 maxpages) == maxpages);
3947 3936 anon_array_exit(&an_cookie);
3948 3937 ANON_LOCK_EXIT(&->a_rwlock);
3949 3938 err = segvn_fault_anonpages(hat, seg,
3950 3939 a, a + maxpgsz, type, rw,
3951 3940 MAX(a, addr),
3952 3941 MIN(a + maxpgsz, eaddr), brkcow);
3953 3942 if (err != 0) {
3954 3943 SEGVN_VMSTAT_FLTVNPAGES(6);
3955 3944 goto out;
3956 3945 }
3957 3946 if (szc < seg->s_szc) {
3958 3947 szc = seg->s_szc;
3959 3948 pgsz = maxpgsz;
3960 3949 pages = maxpages;
3961 3950 lpgeaddr = maxlpgeaddr;
3962 3951 }
3963 3952 goto next;
3964 3953 } else {
3965 3954 ASSERT(anon_pages(amp->ahp, aindx,
3966 3955 maxpages) == 0);
3967 3956 SEGVN_VMSTAT_FLTVNPAGES(7);
3968 3957 anon_array_exit(&an_cookie);
3969 3958 ANON_LOCK_EXIT(&->a_rwlock);
3970 3959 }
3971 3960 }
3972 3961 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz));
3973 3962 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz));
3974 3963
3975 3964 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
3976 3965 ASSERT(vpage != NULL);
3977 3966 prot = VPP_PROT(vpage);
3978 3967 ASSERT(sameprot(seg, a, maxpgsz));
3979 3968 if ((prot & protchk) == 0) {
3980 3969 SEGVN_VMSTAT_FLTVNPAGES(8);
3981 3970 err = FC_PROT;
3982 3971 goto out;
3983 3972 }
3984 3973 }
3985 3974 if (type == F_SOFTLOCK) {
3986 3975 atomic_add_long((ulong_t *)&svd->softlockcnt,
3987 3976 pages);
3988 3977 }
3989 3978
3990 3979 pplist = NULL;
3991 3980 physcontig = 0;
3992 3981 ppa[0] = NULL;
3993 3982 if (!brkcow && !tron && szc &&
3994 3983 !page_exists_physcontig(vp, off, szc,
3995 3984 segtype == MAP_PRIVATE ? ppa : NULL)) {
3996 3985 SEGVN_VMSTAT_FLTVNPAGES(9);
3997 3986 if (page_alloc_pages(vp, seg, a, &pplist, NULL,
3998 3987 szc, 0, 0) && type != F_SOFTLOCK) {
3999 3988 SEGVN_VMSTAT_FLTVNPAGES(10);
4000 3989 pszc = 0;
4001 3990 ierr = -1;
4002 3991 alloc_failed |= (1 << szc);
4003 3992 break;
4004 3993 }
4005 3994 if (pplist != NULL &&
4006 3995 vp->v_mpssdata == SEGVN_PAGEIO) {
4007 3996 int downsize;
4008 3997 SEGVN_VMSTAT_FLTVNPAGES(11);
4009 3998 physcontig = segvn_fill_vp_pages(svd,
4010 3999 vp, off, szc, ppa, &pplist,
4011 4000 &pszc, &downsize);
4012 4001 ASSERT(!physcontig || pplist == NULL);
4013 4002 if (!physcontig && downsize &&
4014 4003 type != F_SOFTLOCK) {
4015 4004 ASSERT(pplist == NULL);
4016 4005 SEGVN_VMSTAT_FLTVNPAGES(12);
4017 4006 ierr = -1;
4018 4007 break;
4019 4008 }
4020 4009 ASSERT(!physcontig ||
4021 4010 segtype == MAP_PRIVATE ||
4022 4011 ppa[0] == NULL);
4023 4012 if (physcontig && ppa[0] == NULL) {
4024 4013 physcontig = 0;
4025 4014 }
4026 4015 }
4027 4016 } else if (!brkcow && !tron && szc && ppa[0] != NULL) {
4028 4017 SEGVN_VMSTAT_FLTVNPAGES(13);
4029 4018 ASSERT(segtype == MAP_PRIVATE);
4030 4019 physcontig = 1;
4031 4020 }
4032 4021
4033 4022 if (!physcontig) {
4034 4023 SEGVN_VMSTAT_FLTVNPAGES(14);
4035 4024 ppa[0] = NULL;
4036 4025 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz,
4037 4026 &vpprot, ppa, pgsz, seg, a, arw,
4038 4027 svd->cred, NULL);
4039 4028 #ifdef DEBUG
4040 4029 if (ierr == 0) {
4041 4030 for (i = 0; i < pages; i++) {
4042 4031 ASSERT(PAGE_LOCKED(ppa[i]));
4043 4032 ASSERT(!PP_ISFREE(ppa[i]));
4044 4033 ASSERT(ppa[i]->p_vnode == vp);
4045 4034 ASSERT(ppa[i]->p_offset ==
4046 4035 off + (i << PAGESHIFT));
4047 4036 }
4048 4037 }
4049 4038 #endif /* DEBUG */
4050 4039 if (segtype == MAP_PRIVATE) {
4051 4040 SEGVN_VMSTAT_FLTVNPAGES(15);
4052 4041 vpprot &= ~PROT_WRITE;
4053 4042 }
4054 4043 } else {
4055 4044 ASSERT(segtype == MAP_PRIVATE);
4056 4045 SEGVN_VMSTAT_FLTVNPAGES(16);
4057 4046 vpprot = PROT_ALL & ~PROT_WRITE;
4058 4047 ierr = 0;
4059 4048 }
4060 4049
4061 4050 if (ierr != 0) {
4062 4051 SEGVN_VMSTAT_FLTVNPAGES(17);
4063 4052 if (pplist != NULL) {
4064 4053 SEGVN_VMSTAT_FLTVNPAGES(18);
4065 4054 page_free_replacement_page(pplist);
4066 4055 page_create_putback(pages);
4067 4056 }
4068 4057 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4069 4058 if (a + pgsz <= eaddr) {
4070 4059 SEGVN_VMSTAT_FLTVNPAGES(19);
4071 4060 err = FC_MAKE_ERR(ierr);
4072 4061 goto out;
4073 4062 }
4074 4063 va.va_mask = AT_SIZE;
4075 4064 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) {
4076 4065 SEGVN_VMSTAT_FLTVNPAGES(20);
4077 4066 err = FC_MAKE_ERR(EIO);
4078 4067 goto out;
4079 4068 }
4080 4069 if (btopr(va.va_size) >= btopr(off + pgsz)) {
4081 4070 SEGVN_VMSTAT_FLTVNPAGES(21);
4082 4071 err = FC_MAKE_ERR(ierr);
4083 4072 goto out;
4084 4073 }
4085 4074 if (btopr(va.va_size) <
4086 4075 btopr(off + (eaddr - a))) {
4087 4076 SEGVN_VMSTAT_FLTVNPAGES(22);
4088 4077 err = FC_MAKE_ERR(ierr);
4089 4078 goto out;
4090 4079 }
4091 4080 if (brkcow || tron || type == F_SOFTLOCK) {
4092 4081 /* can't reduce map area */
4093 4082 SEGVN_VMSTAT_FLTVNPAGES(23);
4094 4083 vop_size_err = 1;
4095 4084 goto out;
4096 4085 }
4097 4086 SEGVN_VMSTAT_FLTVNPAGES(24);
4098 4087 ASSERT(szc != 0);
4099 4088 pszc = 0;
4100 4089 ierr = -1;
4101 4090 break;
4102 4091 }
4103 4092
4104 4093 if (amp != NULL) {
4105 4094 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4106 4095 anon_array_enter(amp, aindx, &an_cookie);
4107 4096 }
4108 4097 if (amp != NULL &&
4109 4098 anon_get_ptr(amp->ahp, aindx) != NULL) {
4110 4099 ulong_t taindx = P2ALIGN(aindx, maxpages);
4111 4100
4112 4101 SEGVN_VMSTAT_FLTVNPAGES(25);
4113 4102 ASSERT(anon_pages(amp->ahp, taindx,
4114 4103 maxpages) == maxpages);
4115 4104 for (i = 0; i < pages; i++) {
4116 4105 page_unlock(ppa[i]);
4117 4106 }
4118 4107 anon_array_exit(&an_cookie);
4119 4108 ANON_LOCK_EXIT(&->a_rwlock);
4120 4109 if (pplist != NULL) {
4121 4110 page_free_replacement_page(pplist);
4122 4111 page_create_putback(pages);
4123 4112 }
4124 4113 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4125 4114 if (szc < seg->s_szc) {
4126 4115 SEGVN_VMSTAT_FLTVNPAGES(26);
4127 4116 /*
4128 4117 * For private segments SOFTLOCK
4129 4118 * either always breaks cow (any rw
4130 4119 * type except S_READ_NOCOW) or
4131 4120 * address space is locked as writer
4132 4121 * (S_READ_NOCOW case) and anon slots
4133 4122 * can't show up on second check.
4134 4123 * Therefore if we are here for
4135 4124 * SOFTLOCK case it must be a cow
4136 4125 * break but cow break never reduces
4137 4126 * szc. text replication (tron) in
4138 4127 * this case works as cow break.
4139 4128 * Thus the assert below.
4140 4129 */
4141 4130 ASSERT(!brkcow && !tron &&
4142 4131 type != F_SOFTLOCK);
4143 4132 pszc = seg->s_szc;
4144 4133 ierr = -2;
4145 4134 break;
4146 4135 }
4147 4136 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4148 4137 goto again;
4149 4138 }
4150 4139 #ifdef DEBUG
4151 4140 if (amp != NULL) {
4152 4141 ulong_t taindx = P2ALIGN(aindx, maxpages);
4153 4142 ASSERT(!anon_pages(amp->ahp, taindx, maxpages));
4154 4143 }
4155 4144 #endif /* DEBUG */
4156 4145
4157 4146 if (brkcow || tron) {
4158 4147 ASSERT(amp != NULL);
4159 4148 ASSERT(pplist == NULL);
4160 4149 ASSERT(szc == seg->s_szc);
4161 4150 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4162 4151 ASSERT(IS_P2ALIGNED(aindx, maxpages));
4163 4152 SEGVN_VMSTAT_FLTVNPAGES(27);
4164 4153 ierr = anon_map_privatepages(amp, aindx, szc,
4165 4154 seg, a, prot, ppa, vpage, segvn_anypgsz,
4166 4155 tron ? PG_LOCAL : 0, svd->cred);
4167 4156 if (ierr != 0) {
4168 4157 SEGVN_VMSTAT_FLTVNPAGES(28);
4169 4158 anon_array_exit(&an_cookie);
4170 4159 ANON_LOCK_EXIT(&->a_rwlock);
4171 4160 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4172 4161 err = FC_MAKE_ERR(ierr);
4173 4162 goto out;
4174 4163 }
4175 4164
4176 4165 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4177 4166 /*
4178 4167 * p_szc can't be changed for locked
4179 4168 * swapfs pages.
4180 4169 */
4181 4170 ASSERT(svd->rcookie ==
4182 4171 HAT_INVALID_REGION_COOKIE);
4183 4172 hat_memload_array(hat, a, pgsz, ppa, prot,
4184 4173 hat_flag);
4185 4174
4186 4175 if (!(hat_flag & HAT_LOAD_LOCK)) {
4187 4176 SEGVN_VMSTAT_FLTVNPAGES(29);
4188 4177 for (i = 0; i < pages; i++) {
4189 4178 page_unlock(ppa[i]);
4190 4179 }
4191 4180 }
4192 4181 anon_array_exit(&an_cookie);
4193 4182 ANON_LOCK_EXIT(&->a_rwlock);
4194 4183 goto next;
4195 4184 }
4196 4185
4197 4186 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
4198 4187 (!svd->pageprot && svd->prot == (prot & vpprot)));
4199 4188
4200 4189 pfn = page_pptonum(ppa[0]);
4201 4190 /*
4202 4191 * hat_page_demote() needs an SE_EXCL lock on one of
4203 4192 * constituent page_t's and it decreases root's p_szc
4204 4193 * last. This means if root's p_szc is equal szc and
4205 4194 * all its constituent pages are locked
4206 4195 * hat_page_demote() that could have changed p_szc to
4207 4196 * szc is already done and no new have page_demote()
4208 4197 * can start for this large page.
4209 4198 */
4210 4199
4211 4200 /*
4212 4201 * we need to make sure same mapping size is used for
4213 4202 * the same address range if there's a possibility the
4214 4203 * adddress is already mapped because hat layer panics
4215 4204 * when translation is loaded for the range already
4216 4205 * mapped with a different page size. We achieve it
4217 4206 * by always using largest page size possible subject
4218 4207 * to the constraints of page size, segment page size
4219 4208 * and page alignment. Since mappings are invalidated
4220 4209 * when those constraints change and make it
4221 4210 * impossible to use previously used mapping size no
4222 4211 * mapping size conflicts should happen.
4223 4212 */
4224 4213
4225 4214 chkszc:
4226 4215 if ((pszc = ppa[0]->p_szc) == szc &&
4227 4216 IS_P2ALIGNED(pfn, pages)) {
4228 4217
4229 4218 SEGVN_VMSTAT_FLTVNPAGES(30);
4230 4219 #ifdef DEBUG
4231 4220 for (i = 0; i < pages; i++) {
4232 4221 ASSERT(PAGE_LOCKED(ppa[i]));
4233 4222 ASSERT(!PP_ISFREE(ppa[i]));
4234 4223 ASSERT(page_pptonum(ppa[i]) ==
4235 4224 pfn + i);
4236 4225 ASSERT(ppa[i]->p_szc == szc);
4237 4226 ASSERT(ppa[i]->p_vnode == vp);
4238 4227 ASSERT(ppa[i]->p_offset ==
4239 4228 off + (i << PAGESHIFT));
4240 4229 }
4241 4230 #endif /* DEBUG */
4242 4231 /*
4243 4232 * All pages are of szc we need and they are
4244 4233 * all locked so they can't change szc. load
4245 4234 * translations.
4246 4235 *
4247 4236 * if page got promoted since last check
4248 4237 * we don't need pplist.
4249 4238 */
4250 4239 if (pplist != NULL) {
4251 4240 page_free_replacement_page(pplist);
4252 4241 page_create_putback(pages);
4253 4242 }
4254 4243 if (PP_ISMIGRATE(ppa[0])) {
4255 4244 page_migrate(seg, a, ppa, pages);
4256 4245 }
4257 4246 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4258 4247 prot, vpprot);
4259 4248 hat_memload_array_region(hat, a, pgsz,
4260 4249 ppa, prot & vpprot, hat_flag,
4261 4250 svd->rcookie);
4262 4251
4263 4252 if (!(hat_flag & HAT_LOAD_LOCK)) {
4264 4253 for (i = 0; i < pages; i++) {
4265 4254 page_unlock(ppa[i]);
4266 4255 }
4267 4256 }
4268 4257 if (amp != NULL) {
4269 4258 anon_array_exit(&an_cookie);
4270 4259 ANON_LOCK_EXIT(&->a_rwlock);
4271 4260 }
4272 4261 goto next;
4273 4262 }
4274 4263
4275 4264 /*
4276 4265 * See if upsize is possible.
4277 4266 */
4278 4267 if (pszc > szc && szc < seg->s_szc &&
4279 4268 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4280 4269 pgcnt_t aphase;
4281 4270 uint_t pszc1 = MIN(pszc, seg->s_szc);
4282 4271 ppgsz = page_get_pagesize(pszc1);
4283 4272 ppages = btop(ppgsz);
4284 4273 aphase = btop(P2PHASE((uintptr_t)a, ppgsz));
4285 4274
4286 4275 ASSERT(type != F_SOFTLOCK);
4287 4276
4288 4277 SEGVN_VMSTAT_FLTVNPAGES(31);
4289 4278 if (aphase != P2PHASE(pfn, ppages)) {
4290 4279 segvn_faultvnmpss_align_err4++;
4291 4280 } else {
4292 4281 SEGVN_VMSTAT_FLTVNPAGES(32);
4293 4282 if (pplist != NULL) {
4294 4283 page_t *pl = pplist;
4295 4284 page_free_replacement_page(pl);
4296 4285 page_create_putback(pages);
4297 4286 }
4298 4287 for (i = 0; i < pages; i++) {
4299 4288 page_unlock(ppa[i]);
4300 4289 }
4301 4290 if (amp != NULL) {
4302 4291 anon_array_exit(&an_cookie);
4303 4292 ANON_LOCK_EXIT(&->a_rwlock);
4304 4293 }
4305 4294 pszc = pszc1;
4306 4295 ierr = -2;
4307 4296 break;
4308 4297 }
4309 4298 }
4310 4299
4311 4300 /*
4312 4301 * check if we should use smallest mapping size.
4313 4302 */
4314 4303 upgrdfail = 0;
4315 4304 if (szc == 0 ||
4316 4305 (pszc >= szc &&
4317 4306 !IS_P2ALIGNED(pfn, pages)) ||
4318 4307 (pszc < szc &&
4319 4308 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4320 4309 &pszc))) {
4321 4310
4322 4311 if (upgrdfail && type != F_SOFTLOCK) {
4323 4312 /*
4324 4313 * segvn_full_szcpages failed to lock
4325 4314 * all pages EXCL. Size down.
4326 4315 */
4327 4316 ASSERT(pszc < szc);
4328 4317
4329 4318 SEGVN_VMSTAT_FLTVNPAGES(33);
4330 4319
4331 4320 if (pplist != NULL) {
4332 4321 page_t *pl = pplist;
4333 4322 page_free_replacement_page(pl);
4334 4323 page_create_putback(pages);
4335 4324 }
4336 4325
4337 4326 for (i = 0; i < pages; i++) {
4338 4327 page_unlock(ppa[i]);
4339 4328 }
4340 4329 if (amp != NULL) {
4341 4330 anon_array_exit(&an_cookie);
4342 4331 ANON_LOCK_EXIT(&->a_rwlock);
4343 4332 }
4344 4333 ierr = -1;
4345 4334 break;
4346 4335 }
4347 4336 if (szc != 0 && !upgrdfail) {
4348 4337 segvn_faultvnmpss_align_err5++;
4349 4338 }
4350 4339 SEGVN_VMSTAT_FLTVNPAGES(34);
4351 4340 if (pplist != NULL) {
4352 4341 page_free_replacement_page(pplist);
4353 4342 page_create_putback(pages);
4354 4343 }
4355 4344 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4356 4345 prot, vpprot);
4357 4346 if (upgrdfail && segvn_anypgsz_vnode) {
4358 4347 /* SOFTLOCK case */
4359 4348 hat_memload_array_region(hat, a, pgsz,
4360 4349 ppa, prot & vpprot, hat_flag,
4361 4350 svd->rcookie);
4362 4351 } else {
4363 4352 for (i = 0; i < pages; i++) {
4364 4353 hat_memload_region(hat,
4365 4354 a + (i << PAGESHIFT),
4366 4355 ppa[i], prot & vpprot,
4367 4356 hat_flag, svd->rcookie);
4368 4357 }
4369 4358 }
4370 4359 if (!(hat_flag & HAT_LOAD_LOCK)) {
4371 4360 for (i = 0; i < pages; i++) {
4372 4361 page_unlock(ppa[i]);
4373 4362 }
4374 4363 }
4375 4364 if (amp != NULL) {
4376 4365 anon_array_exit(&an_cookie);
4377 4366 ANON_LOCK_EXIT(&->a_rwlock);
4378 4367 }
4379 4368 goto next;
4380 4369 }
4381 4370
4382 4371 if (pszc == szc) {
4383 4372 /*
4384 4373 * segvn_full_szcpages() upgraded pages szc.
4385 4374 */
4386 4375 ASSERT(pszc == ppa[0]->p_szc);
4387 4376 ASSERT(IS_P2ALIGNED(pfn, pages));
4388 4377 goto chkszc;
4389 4378 }
4390 4379
4391 4380 if (pszc > szc) {
4392 4381 kmutex_t *szcmtx;
4393 4382 SEGVN_VMSTAT_FLTVNPAGES(35);
4394 4383 /*
4395 4384 * p_szc of ppa[0] can change since we haven't
4396 4385 * locked all constituent pages. Call
4397 4386 * page_lock_szc() to prevent szc changes.
4398 4387 * This should be a rare case that happens when
4399 4388 * multiple segments use a different page size
4400 4389 * to map the same file offsets.
4401 4390 */
4402 4391 szcmtx = page_szc_lock(ppa[0]);
4403 4392 pszc = ppa[0]->p_szc;
4404 4393 ASSERT(szcmtx != NULL || pszc == 0);
4405 4394 ASSERT(ppa[0]->p_szc <= pszc);
4406 4395 if (pszc <= szc) {
4407 4396 SEGVN_VMSTAT_FLTVNPAGES(36);
4408 4397 if (szcmtx != NULL) {
4409 4398 mutex_exit(szcmtx);
4410 4399 }
4411 4400 goto chkszc;
4412 4401 }
4413 4402 if (pplist != NULL) {
4414 4403 /*
4415 4404 * page got promoted since last check.
4416 4405 * we don't need preaalocated large
4417 4406 * page.
4418 4407 */
4419 4408 SEGVN_VMSTAT_FLTVNPAGES(37);
4420 4409 page_free_replacement_page(pplist);
4421 4410 page_create_putback(pages);
4422 4411 }
4423 4412 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4424 4413 prot, vpprot);
4425 4414 hat_memload_array_region(hat, a, pgsz, ppa,
4426 4415 prot & vpprot, hat_flag, svd->rcookie);
4427 4416 mutex_exit(szcmtx);
4428 4417 if (!(hat_flag & HAT_LOAD_LOCK)) {
4429 4418 for (i = 0; i < pages; i++) {
4430 4419 page_unlock(ppa[i]);
4431 4420 }
4432 4421 }
4433 4422 if (amp != NULL) {
4434 4423 anon_array_exit(&an_cookie);
4435 4424 ANON_LOCK_EXIT(&->a_rwlock);
4436 4425 }
4437 4426 goto next;
4438 4427 }
4439 4428
4440 4429 /*
4441 4430 * if page got demoted since last check
4442 4431 * we could have not allocated larger page.
4443 4432 * allocate now.
4444 4433 */
4445 4434 if (pplist == NULL &&
4446 4435 page_alloc_pages(vp, seg, a, &pplist, NULL,
4447 4436 szc, 0, 0) && type != F_SOFTLOCK) {
4448 4437 SEGVN_VMSTAT_FLTVNPAGES(38);
4449 4438 for (i = 0; i < pages; i++) {
4450 4439 page_unlock(ppa[i]);
4451 4440 }
4452 4441 if (amp != NULL) {
4453 4442 anon_array_exit(&an_cookie);
4454 4443 ANON_LOCK_EXIT(&->a_rwlock);
4455 4444 }
4456 4445 ierr = -1;
4457 4446 alloc_failed |= (1 << szc);
4458 4447 break;
4459 4448 }
4460 4449
4461 4450 SEGVN_VMSTAT_FLTVNPAGES(39);
4462 4451
4463 4452 if (pplist != NULL) {
4464 4453 segvn_relocate_pages(ppa, pplist);
4465 4454 #ifdef DEBUG
4466 4455 } else {
4467 4456 ASSERT(type == F_SOFTLOCK);
4468 4457 SEGVN_VMSTAT_FLTVNPAGES(40);
4469 4458 #endif /* DEBUG */
4470 4459 }
4471 4460
4472 4461 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot);
4473 4462
4474 4463 if (pplist == NULL && segvn_anypgsz_vnode == 0) {
4475 4464 ASSERT(type == F_SOFTLOCK);
4476 4465 for (i = 0; i < pages; i++) {
4477 4466 ASSERT(ppa[i]->p_szc < szc);
4478 4467 hat_memload_region(hat,
4479 4468 a + (i << PAGESHIFT),
4480 4469 ppa[i], prot & vpprot, hat_flag,
4481 4470 svd->rcookie);
4482 4471 }
4483 4472 } else {
4484 4473 ASSERT(pplist != NULL || type == F_SOFTLOCK);
4485 4474 hat_memload_array_region(hat, a, pgsz, ppa,
4486 4475 prot & vpprot, hat_flag, svd->rcookie);
4487 4476 }
4488 4477 if (!(hat_flag & HAT_LOAD_LOCK)) {
4489 4478 for (i = 0; i < pages; i++) {
4490 4479 ASSERT(PAGE_SHARED(ppa[i]));
4491 4480 page_unlock(ppa[i]);
4492 4481 }
4493 4482 }
4494 4483 if (amp != NULL) {
4495 4484 anon_array_exit(&an_cookie);
4496 4485 ANON_LOCK_EXIT(&->a_rwlock);
4497 4486 }
4498 4487
4499 4488 next:
4500 4489 if (vpage != NULL) {
4501 4490 vpage += pages;
4502 4491 }
4503 4492 adjszc_chk = 1;
4504 4493 }
4505 4494 if (a == lpgeaddr)
4506 4495 break;
4507 4496 ASSERT(a < lpgeaddr);
4508 4497
4509 4498 ASSERT(!brkcow && !tron && type != F_SOFTLOCK);
4510 4499
4511 4500 /*
4512 4501 * ierr == -1 means we failed to map with a large page.
4513 4502 * (either due to allocation/relocation failures or
4514 4503 * misalignment with other mappings to this file.
4515 4504 *
4516 4505 * ierr == -2 means some other thread allocated a large page
4517 4506 * after we gave up tp map with a large page. retry with
4518 4507 * larger mapping.
4519 4508 */
4520 4509 ASSERT(ierr == -1 || ierr == -2);
4521 4510 ASSERT(ierr == -2 || szc != 0);
4522 4511 ASSERT(ierr == -1 || szc < seg->s_szc);
4523 4512 if (ierr == -2) {
4524 4513 SEGVN_VMSTAT_FLTVNPAGES(41);
4525 4514 ASSERT(pszc > szc && pszc <= seg->s_szc);
4526 4515 szc = pszc;
4527 4516 } else if (segvn_anypgsz_vnode) {
4528 4517 SEGVN_VMSTAT_FLTVNPAGES(42);
4529 4518 szc--;
4530 4519 } else {
4531 4520 SEGVN_VMSTAT_FLTVNPAGES(43);
4532 4521 ASSERT(pszc < szc);
4533 4522 /*
4534 4523 * other process created pszc large page.
4535 4524 * but we still have to drop to 0 szc.
4536 4525 */
4537 4526 szc = 0;
4538 4527 }
4539 4528
4540 4529 pgsz = page_get_pagesize(szc);
4541 4530 pages = btop(pgsz);
4542 4531 if (ierr == -2) {
4543 4532 /*
4544 4533 * Size up case. Note lpgaddr may only be needed for
4545 4534 * softlock case so we don't adjust it here.
4546 4535 */
4547 4536 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4548 4537 ASSERT(a >= lpgaddr);
4549 4538 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4550 4539 off = svd->offset + (uintptr_t)(a - seg->s_base);
4551 4540 aindx = svd->anon_index + seg_page(seg, a);
4552 4541 vpage = (svd->vpage != NULL) ?
4553 4542 &svd->vpage[seg_page(seg, a)] : NULL;
4554 4543 } else {
4555 4544 /*
4556 4545 * Size down case. Note lpgaddr may only be needed for
4557 4546 * softlock case so we don't adjust it here.
4558 4547 */
4559 4548 ASSERT(IS_P2ALIGNED(a, pgsz));
4560 4549 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4561 4550 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4562 4551 ASSERT(a < lpgeaddr);
4563 4552 if (a < addr) {
4564 4553 SEGVN_VMSTAT_FLTVNPAGES(44);
4565 4554 /*
4566 4555 * The beginning of the large page region can
4567 4556 * be pulled to the right to make a smaller
4568 4557 * region. We haven't yet faulted a single
4569 4558 * page.
4570 4559 */
4571 4560 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4572 4561 ASSERT(a >= lpgaddr);
4573 4562 off = svd->offset +
4574 4563 (uintptr_t)(a - seg->s_base);
4575 4564 aindx = svd->anon_index + seg_page(seg, a);
4576 4565 vpage = (svd->vpage != NULL) ?
4577 4566 &svd->vpage[seg_page(seg, a)] : NULL;
4578 4567 }
4579 4568 }
4580 4569 }
4581 4570 out:
4582 4571 kmem_free(ppa, ppasize);
4583 4572 if (!err && !vop_size_err) {
4584 4573 SEGVN_VMSTAT_FLTVNPAGES(45);
4585 4574 return (0);
4586 4575 }
4587 4576 if (type == F_SOFTLOCK && a > lpgaddr) {
4588 4577 SEGVN_VMSTAT_FLTVNPAGES(46);
4589 4578 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4590 4579 }
4591 4580 if (!vop_size_err) {
4592 4581 SEGVN_VMSTAT_FLTVNPAGES(47);
4593 4582 return (err);
4594 4583 }
4595 4584 ASSERT(brkcow || tron || type == F_SOFTLOCK);
4596 4585 /*
4597 4586 * Large page end is mapped beyond the end of file and it's a cow
4598 4587 * fault (can be a text replication induced cow) or softlock so we can't
4599 4588 * reduce the map area. For now just demote the segment. This should
4600 4589 * really only happen if the end of the file changed after the mapping
4601 4590 * was established since when large page segments are created we make
4602 4591 * sure they don't extend beyond the end of the file.
4603 4592 */
4604 4593 SEGVN_VMSTAT_FLTVNPAGES(48);
4605 4594
4606 4595 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4607 4596 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4608 4597 err = 0;
4609 4598 if (seg->s_szc != 0) {
4610 4599 segvn_fltvnpages_clrszc_cnt++;
4611 4600 ASSERT(svd->softlockcnt == 0);
4612 4601 err = segvn_clrszc(seg);
4613 4602 if (err != 0) {
4614 4603 segvn_fltvnpages_clrszc_err++;
4615 4604 }
4616 4605 }
4617 4606 ASSERT(err || seg->s_szc == 0);
4618 4607 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock);
4619 4608 /* segvn_fault will do its job as if szc had been zero to begin with */
4620 4609 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err));
4621 4610 }
4622 4611
4623 4612 /*
4624 4613 * This routine will attempt to fault in one large page.
4625 4614 * it will use smaller pages if that fails.
4626 4615 * It should only be called for pure anonymous segments.
4627 4616 */
4628 4617 static faultcode_t
4629 4618 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
4630 4619 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
4631 4620 caddr_t eaddr, int brkcow)
4632 4621 {
4633 4622 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4634 4623 struct anon_map *amp = svd->amp;
4635 4624 uchar_t segtype = svd->type;
4636 4625 uint_t szc = seg->s_szc;
4637 4626 size_t pgsz = page_get_pagesize(szc);
4638 4627 size_t maxpgsz = pgsz;
4639 4628 pgcnt_t pages = btop(pgsz);
4640 4629 uint_t ppaszc = szc;
4641 4630 caddr_t a = lpgaddr;
4642 4631 ulong_t aindx = svd->anon_index + seg_page(seg, a);
4643 4632 struct vpage *vpage = (svd->vpage != NULL) ?
4644 4633 &svd->vpage[seg_page(seg, a)] : NULL;
4645 4634 page_t **ppa;
4646 4635 uint_t ppa_szc;
4647 4636 faultcode_t err;
4648 4637 int ierr;
4649 4638 uint_t protchk, prot, vpprot;
4650 4639 ulong_t i;
4651 4640 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
4652 4641 anon_sync_obj_t cookie;
4653 4642 int adjszc_chk;
4654 4643 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0;
4655 4644
4656 4645 ASSERT(szc != 0);
4657 4646 ASSERT(amp != NULL);
4658 4647 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
4659 4648 ASSERT(!(svd->flags & MAP_NORESERVE));
4660 4649 ASSERT(type != F_SOFTUNLOCK);
4661 4650 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4662 4651 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF);
4663 4652 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4664 4653
4665 4654 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
4666 4655
4667 4656 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]);
4668 4657 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]);
4669 4658
4670 4659 if (svd->flags & MAP_TEXT) {
4671 4660 hat_flag |= HAT_LOAD_TEXT;
4672 4661 }
4673 4662
4674 4663 if (svd->pageprot) {
4675 4664 switch (rw) {
4676 4665 case S_READ:
4677 4666 protchk = PROT_READ;
4678 4667 break;
4679 4668 case S_WRITE:
4680 4669 protchk = PROT_WRITE;
4681 4670 break;
4682 4671 case S_EXEC:
4683 4672 protchk = PROT_EXEC;
4684 4673 break;
4685 4674 case S_OTHER:
4686 4675 default:
4687 4676 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4688 4677 break;
4689 4678 }
4690 4679 VM_STAT_ADD(segvnvmstats.fltanpages[2]);
4691 4680 } else {
4692 4681 prot = svd->prot;
4693 4682 /* caller has already done segment level protection check. */
4694 4683 }
4695 4684
4696 4685 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP);
4697 4686 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4698 4687 for (;;) {
4699 4688 adjszc_chk = 0;
4700 4689 for (; a < lpgeaddr; a += pgsz, aindx += pages) {
4701 4690 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
4702 4691 VM_STAT_ADD(segvnvmstats.fltanpages[3]);
4703 4692 ASSERT(vpage != NULL);
4704 4693 prot = VPP_PROT(vpage);
4705 4694 ASSERT(sameprot(seg, a, maxpgsz));
4706 4695 if ((prot & protchk) == 0) {
4707 4696 err = FC_PROT;
4708 4697 goto error;
4709 4698 }
4710 4699 }
4711 4700 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) &&
4712 4701 pgsz < maxpgsz) {
4713 4702 ASSERT(a > lpgaddr);
4714 4703 szc = seg->s_szc;
4715 4704 pgsz = maxpgsz;
4716 4705 pages = btop(pgsz);
4717 4706 ASSERT(IS_P2ALIGNED(aindx, pages));
4718 4707 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr,
4719 4708 pgsz);
4720 4709 }
4721 4710 if (type == F_SOFTLOCK) {
4722 4711 atomic_add_long((ulong_t *)&svd->softlockcnt,
4723 4712 pages);
4724 4713 }
4725 4714 anon_array_enter(amp, aindx, &cookie);
4726 4715 ppa_szc = (uint_t)-1;
4727 4716 ierr = anon_map_getpages(amp, aindx, szc, seg, a,
4728 4717 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow,
4729 4718 segvn_anypgsz, pgflags, svd->cred);
4730 4719 if (ierr != 0) {
4731 4720 anon_array_exit(&cookie);
4732 4721 VM_STAT_ADD(segvnvmstats.fltanpages[4]);
4733 4722 if (type == F_SOFTLOCK) {
4734 4723 atomic_add_long(
4735 4724 (ulong_t *)&svd->softlockcnt,
4736 4725 -pages);
4737 4726 }
4738 4727 if (ierr > 0) {
4739 4728 VM_STAT_ADD(segvnvmstats.fltanpages[6]);
4740 4729 err = FC_MAKE_ERR(ierr);
4741 4730 goto error;
4742 4731 }
4743 4732 break;
4744 4733 }
4745 4734
4746 4735 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4747 4736
4748 4737 ASSERT(segtype == MAP_SHARED ||
4749 4738 ppa[0]->p_szc <= szc);
4750 4739 ASSERT(segtype == MAP_PRIVATE ||
4751 4740 ppa[0]->p_szc >= szc);
4752 4741
4753 4742 /*
4754 4743 * Handle pages that have been marked for migration
4755 4744 */
4756 4745 if (lgrp_optimizations())
4757 4746 page_migrate(seg, a, ppa, pages);
4758 4747
4759 4748 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
4760 4749
4761 4750 if (segtype == MAP_SHARED) {
4762 4751 vpprot |= PROT_WRITE;
4763 4752 }
4764 4753
4765 4754 hat_memload_array(hat, a, pgsz, ppa,
4766 4755 prot & vpprot, hat_flag);
4767 4756
4768 4757 if (hat_flag & HAT_LOAD_LOCK) {
4769 4758 VM_STAT_ADD(segvnvmstats.fltanpages[7]);
4770 4759 } else {
4771 4760 VM_STAT_ADD(segvnvmstats.fltanpages[8]);
4772 4761 for (i = 0; i < pages; i++)
4773 4762 page_unlock(ppa[i]);
4774 4763 }
4775 4764 if (vpage != NULL)
4776 4765 vpage += pages;
4777 4766
4778 4767 anon_array_exit(&cookie);
4779 4768 adjszc_chk = 1;
4780 4769 }
4781 4770 if (a == lpgeaddr)
4782 4771 break;
4783 4772 ASSERT(a < lpgeaddr);
4784 4773 /*
4785 4774 * ierr == -1 means we failed to allocate a large page.
4786 4775 * so do a size down operation.
4787 4776 *
4788 4777 * ierr == -2 means some other process that privately shares
4789 4778 * pages with this process has allocated a larger page and we
4790 4779 * need to retry with larger pages. So do a size up
4791 4780 * operation. This relies on the fact that large pages are
4792 4781 * never partially shared i.e. if we share any constituent
4793 4782 * page of a large page with another process we must share the
4794 4783 * entire large page. Note this cannot happen for SOFTLOCK
4795 4784 * case, unless current address (a) is at the beginning of the
4796 4785 * next page size boundary because the other process couldn't
4797 4786 * have relocated locked pages.
4798 4787 */
4799 4788 ASSERT(ierr == -1 || ierr == -2);
4800 4789
4801 4790 if (segvn_anypgsz) {
4802 4791 ASSERT(ierr == -2 || szc != 0);
4803 4792 ASSERT(ierr == -1 || szc < seg->s_szc);
4804 4793 szc = (ierr == -1) ? szc - 1 : szc + 1;
4805 4794 } else {
4806 4795 /*
4807 4796 * For non COW faults and segvn_anypgsz == 0
4808 4797 * we need to be careful not to loop forever
4809 4798 * if existing page is found with szc other
4810 4799 * than 0 or seg->s_szc. This could be due
4811 4800 * to page relocations on behalf of DR or
4812 4801 * more likely large page creation. For this
4813 4802 * case simply re-size to existing page's szc
4814 4803 * if returned by anon_map_getpages().
4815 4804 */
4816 4805 if (ppa_szc == (uint_t)-1) {
4817 4806 szc = (ierr == -1) ? 0 : seg->s_szc;
4818 4807 } else {
4819 4808 ASSERT(ppa_szc <= seg->s_szc);
4820 4809 ASSERT(ierr == -2 || ppa_szc < szc);
4821 4810 ASSERT(ierr == -1 || ppa_szc > szc);
4822 4811 szc = ppa_szc;
4823 4812 }
4824 4813 }
4825 4814
4826 4815 pgsz = page_get_pagesize(szc);
4827 4816 pages = btop(pgsz);
4828 4817 ASSERT(type != F_SOFTLOCK || ierr == -1 ||
4829 4818 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz)));
4830 4819 if (type == F_SOFTLOCK) {
4831 4820 /*
4832 4821 * For softlocks we cannot reduce the fault area
4833 4822 * (calculated based on the largest page size for this
4834 4823 * segment) for size down and a is already next
4835 4824 * page size aligned as assertted above for size
4836 4825 * ups. Therefore just continue in case of softlock.
4837 4826 */
4838 4827 VM_STAT_ADD(segvnvmstats.fltanpages[9]);
4839 4828 continue; /* keep lint happy */
4840 4829 } else if (ierr == -2) {
4841 4830
4842 4831 /*
4843 4832 * Size up case. Note lpgaddr may only be needed for
4844 4833 * softlock case so we don't adjust it here.
4845 4834 */
4846 4835 VM_STAT_ADD(segvnvmstats.fltanpages[10]);
4847 4836 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4848 4837 ASSERT(a >= lpgaddr);
4849 4838 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4850 4839 aindx = svd->anon_index + seg_page(seg, a);
4851 4840 vpage = (svd->vpage != NULL) ?
4852 4841 &svd->vpage[seg_page(seg, a)] : NULL;
4853 4842 } else {
4854 4843 /*
4855 4844 * Size down case. Note lpgaddr may only be needed for
4856 4845 * softlock case so we don't adjust it here.
4857 4846 */
4858 4847 VM_STAT_ADD(segvnvmstats.fltanpages[11]);
4859 4848 ASSERT(IS_P2ALIGNED(a, pgsz));
4860 4849 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4861 4850 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4862 4851 ASSERT(a < lpgeaddr);
4863 4852 if (a < addr) {
4864 4853 /*
4865 4854 * The beginning of the large page region can
4866 4855 * be pulled to the right to make a smaller
4867 4856 * region. We haven't yet faulted a single
4868 4857 * page.
4869 4858 */
4870 4859 VM_STAT_ADD(segvnvmstats.fltanpages[12]);
4871 4860 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4872 4861 ASSERT(a >= lpgaddr);
4873 4862 aindx = svd->anon_index + seg_page(seg, a);
4874 4863 vpage = (svd->vpage != NULL) ?
4875 4864 &svd->vpage[seg_page(seg, a)] : NULL;
4876 4865 }
4877 4866 }
4878 4867 }
4879 4868 VM_STAT_ADD(segvnvmstats.fltanpages[13]);
4880 4869 ANON_LOCK_EXIT(&->a_rwlock);
4881 4870 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4882 4871 return (0);
4883 4872 error:
4884 4873 VM_STAT_ADD(segvnvmstats.fltanpages[14]);
4885 4874 ANON_LOCK_EXIT(&->a_rwlock);
4886 4875 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4887 4876 if (type == F_SOFTLOCK && a > lpgaddr) {
4888 4877 VM_STAT_ADD(segvnvmstats.fltanpages[15]);
4889 4878 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4890 4879 }
4891 4880 return (err);
4892 4881 }
4893 4882
4894 4883 int fltadvice = 1; /* set to free behind pages for sequential access */
4895 4884
4896 4885 /*
4897 4886 * This routine is called via a machine specific fault handling routine.
4898 4887 * It is also called by software routines wishing to lock or unlock
4899 4888 * a range of addresses.
4900 4889 *
4901 4890 * Here is the basic algorithm:
4902 4891 * If unlocking
4903 4892 * Call segvn_softunlock
4904 4893 * Return
4905 4894 * endif
4906 4895 * Checking and set up work
4907 4896 * If we will need some non-anonymous pages
4908 4897 * Call VOP_GETPAGE over the range of non-anonymous pages
4909 4898 * endif
4910 4899 * Loop over all addresses requested
4911 4900 * Call segvn_faultpage passing in page list
4912 4901 * to load up translations and handle anonymous pages
4913 4902 * endloop
4914 4903 * Load up translation to any additional pages in page list not
4915 4904 * already handled that fit into this segment
4916 4905 */
4917 4906 static faultcode_t
↓ open down ↓ |
4810 lines elided |
↑ open up ↑ |
4918 4907 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
4919 4908 enum fault_type type, enum seg_rw rw)
4920 4909 {
4921 4910 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4922 4911 page_t **plp, **ppp, *pp;
4923 4912 u_offset_t off;
4924 4913 caddr_t a;
4925 4914 struct vpage *vpage;
4926 4915 uint_t vpprot, prot;
4927 4916 int err;
4928 - page_t *pl[PVN_GETPAGE_NUM + 1];
4917 + page_t *pl[FAULT_TMP_PAGES_NUM + 1];
4929 4918 size_t plsz, pl_alloc_sz;
4930 4919 size_t page;
4931 4920 ulong_t anon_index;
4932 4921 struct anon_map *amp;
4933 4922 int dogetpage = 0;
4934 4923 caddr_t lpgaddr, lpgeaddr;
4935 4924 size_t pgsz;
4936 4925 anon_sync_obj_t cookie;
4937 4926 int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4938 4927
4939 4928 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
4940 4929 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4941 4930
4942 4931 /*
4943 4932 * First handle the easy stuff
4944 4933 */
4945 4934 if (type == F_SOFTUNLOCK) {
4946 4935 if (rw == S_READ_NOCOW) {
4947 4936 rw = S_READ;
4948 4937 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
4949 4938 }
4950 4939 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4951 4940 pgsz = (seg->s_szc == 0) ? PAGESIZE :
4952 4941 page_get_pagesize(seg->s_szc);
4953 4942 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]);
4954 4943 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
4955 4944 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw);
4956 4945 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4957 4946 return (0);
4958 4947 }
4959 4948
4960 4949 ASSERT(svd->tr_state == SEGVN_TR_OFF ||
4961 4950 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
4962 4951 if (brkcow == 0) {
4963 4952 if (svd->tr_state == SEGVN_TR_INIT) {
4964 4953 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4965 4954 if (svd->tr_state == SEGVN_TR_INIT) {
4966 4955 ASSERT(svd->vp != NULL && svd->amp == NULL);
4967 4956 ASSERT(svd->flags & MAP_TEXT);
4968 4957 ASSERT(svd->type == MAP_PRIVATE);
4969 4958 segvn_textrepl(seg);
4970 4959 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4971 4960 ASSERT(svd->tr_state != SEGVN_TR_ON ||
4972 4961 svd->amp != NULL);
4973 4962 }
4974 4963 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4975 4964 }
4976 4965 } else if (svd->tr_state != SEGVN_TR_OFF) {
4977 4966 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4978 4967
4979 4968 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) {
4980 4969 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
4981 4970 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4982 4971 return (FC_PROT);
4983 4972 }
4984 4973
4985 4974 if (svd->tr_state == SEGVN_TR_ON) {
4986 4975 ASSERT(svd->vp != NULL && svd->amp != NULL);
4987 4976 segvn_textunrepl(seg, 0);
4988 4977 ASSERT(svd->amp == NULL &&
4989 4978 svd->tr_state == SEGVN_TR_OFF);
4990 4979 } else if (svd->tr_state != SEGVN_TR_OFF) {
4991 4980 svd->tr_state = SEGVN_TR_OFF;
4992 4981 }
4993 4982 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
4994 4983 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4995 4984 }
4996 4985
4997 4986 top:
4998 4987 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4999 4988
5000 4989 /*
5001 4990 * If we have the same protections for the entire segment,
5002 4991 * insure that the access being attempted is legitimate.
5003 4992 */
5004 4993
5005 4994 if (svd->pageprot == 0) {
5006 4995 uint_t protchk;
5007 4996
5008 4997 switch (rw) {
5009 4998 case S_READ:
5010 4999 case S_READ_NOCOW:
5011 5000 protchk = PROT_READ;
5012 5001 break;
5013 5002 case S_WRITE:
5014 5003 protchk = PROT_WRITE;
5015 5004 break;
5016 5005 case S_EXEC:
5017 5006 protchk = PROT_EXEC;
5018 5007 break;
5019 5008 case S_OTHER:
5020 5009 default:
5021 5010 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
5022 5011 break;
5023 5012 }
5024 5013
5025 5014 if ((svd->prot & protchk) == 0) {
5026 5015 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5027 5016 return (FC_PROT); /* illegal access type */
5028 5017 }
5029 5018 }
5030 5019
5031 5020 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5032 5021 /* this must be SOFTLOCK S_READ fault */
5033 5022 ASSERT(svd->amp == NULL);
5034 5023 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5035 5024 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5036 5025 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5037 5026 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5038 5027 /*
5039 5028 * this must be the first ever non S_READ_NOCOW
5040 5029 * softlock for this segment.
5041 5030 */
5042 5031 ASSERT(svd->softlockcnt == 0);
5043 5032 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5044 5033 HAT_REGION_TEXT);
5045 5034 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5046 5035 }
5047 5036 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5048 5037 goto top;
5049 5038 }
5050 5039
5051 5040 /*
5052 5041 * We can't allow the long term use of softlocks for vmpss segments,
5053 5042 * because in some file truncation cases we should be able to demote
5054 5043 * the segment, which requires that there are no softlocks. The
5055 5044 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5056 5045 * segment is S_READ_NOCOW, where the caller holds the address space
5057 5046 * locked as writer and calls softunlock before dropping the as lock.
5058 5047 * S_READ_NOCOW is used by /proc to read memory from another user.
5059 5048 *
5060 5049 * Another deadlock between SOFTLOCK and file truncation can happen
5061 5050 * because segvn_fault_vnodepages() calls the FS one pagesize at
5062 5051 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5063 5052 * can cause a deadlock because the first set of page_t's remain
5064 5053 * locked SE_SHARED. To avoid this, we demote segments on a first
5065 5054 * SOFTLOCK if they have a length greater than the segment's
5066 5055 * page size.
5067 5056 *
5068 5057 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5069 5058 * the access type is S_READ_NOCOW and the fault length is less than
5070 5059 * or equal to the segment's page size. While this is quite restrictive,
5071 5060 * it should be the most common case of SOFTLOCK against a vmpss
5072 5061 * segment.
5073 5062 *
5074 5063 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5075 5064 * caller makes sure no COW will be caused by another thread for a
5076 5065 * softlocked page.
5077 5066 */
5078 5067 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) {
5079 5068 int demote = 0;
5080 5069
5081 5070 if (rw != S_READ_NOCOW) {
5082 5071 demote = 1;
5083 5072 }
5084 5073 if (!demote && len > PAGESIZE) {
5085 5074 pgsz = page_get_pagesize(seg->s_szc);
5086 5075 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5087 5076 lpgeaddr);
5088 5077 if (lpgeaddr - lpgaddr > pgsz) {
5089 5078 demote = 1;
5090 5079 }
5091 5080 }
5092 5081
5093 5082 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5094 5083
5095 5084 if (demote) {
5096 5085 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5097 5086 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5098 5087 if (seg->s_szc != 0) {
5099 5088 segvn_vmpss_clrszc_cnt++;
5100 5089 ASSERT(svd->softlockcnt == 0);
5101 5090 err = segvn_clrszc(seg);
5102 5091 if (err) {
5103 5092 segvn_vmpss_clrszc_err++;
5104 5093 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5105 5094 return (FC_MAKE_ERR(err));
5106 5095 }
5107 5096 }
5108 5097 ASSERT(seg->s_szc == 0);
5109 5098 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5110 5099 goto top;
5111 5100 }
5112 5101 }
5113 5102
5114 5103 /*
5115 5104 * Check to see if we need to allocate an anon_map structure.
5116 5105 */
5117 5106 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) {
5118 5107 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5119 5108 /*
5120 5109 * Drop the "read" lock on the segment and acquire
5121 5110 * the "write" version since we have to allocate the
5122 5111 * anon_map.
5123 5112 */
5124 5113 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5125 5114 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5126 5115
5127 5116 if (svd->amp == NULL) {
5128 5117 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
5129 5118 svd->amp->a_szc = seg->s_szc;
5130 5119 }
5131 5120 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5132 5121
5133 5122 /*
5134 5123 * Start all over again since segment protections
5135 5124 * may have changed after we dropped the "read" lock.
5136 5125 */
5137 5126 goto top;
5138 5127 }
5139 5128
5140 5129 /*
5141 5130 * S_READ_NOCOW vs S_READ distinction was
5142 5131 * only needed for the code above. After
5143 5132 * that we treat it as S_READ.
5144 5133 */
5145 5134 if (rw == S_READ_NOCOW) {
5146 5135 ASSERT(type == F_SOFTLOCK);
5147 5136 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5148 5137 rw = S_READ;
5149 5138 }
5150 5139
5151 5140 amp = svd->amp;
5152 5141
5153 5142 /*
5154 5143 * MADV_SEQUENTIAL work is ignored for large page segments.
5155 5144 */
5156 5145 if (seg->s_szc != 0) {
5157 5146 pgsz = page_get_pagesize(seg->s_szc);
5158 5147 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
5159 5148 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5160 5149 if (svd->vp == NULL) {
5161 5150 err = segvn_fault_anonpages(hat, seg, lpgaddr,
5162 5151 lpgeaddr, type, rw, addr, addr + len, brkcow);
5163 5152 } else {
5164 5153 err = segvn_fault_vnodepages(hat, seg, lpgaddr,
5165 5154 lpgeaddr, type, rw, addr, addr + len, brkcow);
5166 5155 if (err == IE_RETRY) {
5167 5156 ASSERT(seg->s_szc == 0);
5168 5157 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
5169 5158 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5170 5159 goto top;
5171 5160 }
5172 5161 }
5173 5162 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5174 5163 return (err);
5175 5164 }
5176 5165
5177 5166 page = seg_page(seg, addr);
5178 5167 if (amp != NULL) {
5179 5168 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5180 5169 anon_index = svd->anon_index + page;
5181 5170
5182 5171 if (type == F_PROT && rw == S_READ &&
5183 5172 svd->tr_state == SEGVN_TR_OFF &&
5184 5173 svd->type == MAP_PRIVATE && svd->pageprot == 0) {
5185 5174 size_t index = anon_index;
5186 5175 struct anon *ap;
5187 5176
5188 5177 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5189 5178 /*
5190 5179 * The fast path could apply to S_WRITE also, except
5191 5180 * that the protection fault could be caused by lazy
5192 5181 * tlb flush when ro->rw. In this case, the pte is
5193 5182 * RW already. But RO in the other cpu's tlb causes
5194 5183 * the fault. Since hat_chgprot won't do anything if
5195 5184 * pte doesn't change, we may end up faulting
5196 5185 * indefinitely until the RO tlb entry gets replaced.
5197 5186 */
5198 5187 for (a = addr; a < addr + len; a += PAGESIZE, index++) {
5199 5188 anon_array_enter(amp, index, &cookie);
5200 5189 ap = anon_get_ptr(amp->ahp, index);
5201 5190 anon_array_exit(&cookie);
5202 5191 if ((ap == NULL) || (ap->an_refcnt != 1)) {
5203 5192 ANON_LOCK_EXIT(&->a_rwlock);
5204 5193 goto slow;
5205 5194 }
5206 5195 }
5207 5196 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5208 5197 ANON_LOCK_EXIT(&->a_rwlock);
5209 5198 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5210 5199 return (0);
5211 5200 }
5212 5201 }
5213 5202 slow:
5214 5203
5215 5204 if (svd->vpage == NULL)
5216 5205 vpage = NULL;
5217 5206 else
5218 5207 vpage = &svd->vpage[page];
5219 5208
5220 5209 off = svd->offset + (uintptr_t)(addr - seg->s_base);
5221 5210
5222 5211 /*
5223 5212 * If MADV_SEQUENTIAL has been set for the particular page we
5224 5213 * are faulting on, free behind all pages in the segment and put
5225 5214 * them on the free list.
5226 5215 */
5227 5216
5228 5217 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) {
5229 5218 struct vpage *vpp;
5230 5219 ulong_t fanon_index;
5231 5220 size_t fpage;
5232 5221 u_offset_t pgoff, fpgoff;
5233 5222 struct vnode *fvp;
5234 5223 struct anon *fap = NULL;
5235 5224
5236 5225 if (svd->advice == MADV_SEQUENTIAL ||
5237 5226 (svd->pageadvice &&
5238 5227 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) {
5239 5228 pgoff = off - PAGESIZE;
5240 5229 fpage = page - 1;
5241 5230 if (vpage != NULL)
5242 5231 vpp = &svd->vpage[fpage];
5243 5232 if (amp != NULL)
5244 5233 fanon_index = svd->anon_index + fpage;
5245 5234
5246 5235 while (pgoff > svd->offset) {
5247 5236 if (svd->advice != MADV_SEQUENTIAL &&
5248 5237 (!svd->pageadvice || (vpage &&
5249 5238 VPP_ADVICE(vpp) != MADV_SEQUENTIAL)))
5250 5239 break;
5251 5240
5252 5241 /*
5253 5242 * If this is an anon page, we must find the
5254 5243 * correct <vp, offset> for it
5255 5244 */
5256 5245 fap = NULL;
5257 5246 if (amp != NULL) {
5258 5247 ANON_LOCK_ENTER(&->a_rwlock,
5259 5248 RW_READER);
5260 5249 anon_array_enter(amp, fanon_index,
5261 5250 &cookie);
5262 5251 fap = anon_get_ptr(amp->ahp,
5263 5252 fanon_index);
5264 5253 if (fap != NULL) {
5265 5254 swap_xlate(fap, &fvp, &fpgoff);
5266 5255 } else {
5267 5256 fpgoff = pgoff;
5268 5257 fvp = svd->vp;
5269 5258 }
5270 5259 anon_array_exit(&cookie);
5271 5260 ANON_LOCK_EXIT(&->a_rwlock);
5272 5261 } else {
5273 5262 fpgoff = pgoff;
5274 5263 fvp = svd->vp;
5275 5264 }
5276 5265 if (fvp == NULL)
5277 5266 break; /* XXX */
5278 5267 /*
5279 5268 * Skip pages that are free or have an
5280 5269 * "exclusive" lock.
5281 5270 */
5282 5271 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED);
5283 5272 if (pp == NULL)
5284 5273 break;
5285 5274 /*
5286 5275 * We don't need the page_struct_lock to test
5287 5276 * as this is only advisory; even if we
5288 5277 * acquire it someone might race in and lock
5289 5278 * the page after we unlock and before the
5290 5279 * PUTPAGE, then VOP_PUTPAGE will do nothing.
5291 5280 */
5292 5281 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) {
5293 5282 /*
5294 5283 * Hold the vnode before releasing
5295 5284 * the page lock to prevent it from
5296 5285 * being freed and re-used by some
5297 5286 * other thread.
5298 5287 */
5299 5288 VN_HOLD(fvp);
5300 5289 page_unlock(pp);
5301 5290 /*
5302 5291 * We should build a page list
5303 5292 * to kluster putpages XXX
5304 5293 */
5305 5294 (void) VOP_PUTPAGE(fvp,
5306 5295 (offset_t)fpgoff, PAGESIZE,
5307 5296 (B_DONTNEED|B_FREE|B_ASYNC),
5308 5297 svd->cred, NULL);
5309 5298 VN_RELE(fvp);
5310 5299 } else {
5311 5300 /*
5312 5301 * XXX - Should the loop terminate if
5313 5302 * the page is `locked'?
5314 5303 */
5315 5304 page_unlock(pp);
5316 5305 }
5317 5306 --vpp;
5318 5307 --fanon_index;
5319 5308 pgoff -= PAGESIZE;
5320 5309 }
5321 5310 }
5322 5311 }
5323 5312
5324 5313 plp = pl;
5325 5314 *plp = NULL;
5326 5315 pl_alloc_sz = 0;
5327 5316
5328 5317 /*
5329 5318 * See if we need to call VOP_GETPAGE for
5330 5319 * *any* of the range being faulted on.
5331 5320 * We can skip all of this work if there
5332 5321 * was no original vnode.
5333 5322 */
5334 5323 if (svd->vp != NULL) {
5335 5324 u_offset_t vp_off;
5336 5325 size_t vp_len;
5337 5326 struct anon *ap;
5338 5327 vnode_t *vp;
5339 5328
5340 5329 vp_off = off;
5341 5330 vp_len = len;
5342 5331
5343 5332 if (amp == NULL)
5344 5333 dogetpage = 1;
5345 5334 else {
5346 5335 /*
5347 5336 * Only acquire reader lock to prevent amp->ahp
5348 5337 * from being changed. It's ok to miss pages,
5349 5338 * hence we don't do anon_array_enter
5350 5339 */
5351 5340 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5352 5341 ap = anon_get_ptr(amp->ahp, anon_index);
5353 5342
5354 5343 if (len <= PAGESIZE)
5355 5344 /* inline non_anon() */
5356 5345 dogetpage = (ap == NULL);
↓ open down ↓ |
418 lines elided |
↑ open up ↑ |
5357 5346 else
5358 5347 dogetpage = non_anon(amp->ahp, anon_index,
5359 5348 &vp_off, &vp_len);
5360 5349 ANON_LOCK_EXIT(&->a_rwlock);
5361 5350 }
5362 5351
5363 5352 if (dogetpage) {
5364 5353 enum seg_rw arw;
5365 5354 struct as *as = seg->s_as;
5366 5355
5367 - if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) {
5356 + if (len > FAULT_TMP_PAGES_SZ) {
5368 5357 /*
5369 5358 * Page list won't fit in local array,
5370 5359 * allocate one of the needed size.
5371 5360 */
5372 5361 pl_alloc_sz =
5373 5362 (btop(len) + 1) * sizeof (page_t *);
5374 5363 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP);
5375 5364 plp[0] = NULL;
5376 5365 plsz = len;
5377 5366 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE ||
5378 5367 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER ||
5379 5368 (((size_t)(addr + PAGESIZE) <
5380 5369 (size_t)(seg->s_base + seg->s_size)) &&
5381 5370 hat_probe(as->a_hat, addr + PAGESIZE))) {
5382 5371 /*
5383 5372 * Ask VOP_GETPAGE to return the exact number
5384 5373 * of pages if
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
5385 5374 * (a) this is a COW fault, or
5386 5375 * (b) this is a software fault, or
5387 5376 * (c) next page is already mapped.
5388 5377 */
5389 5378 plsz = len;
5390 5379 } else {
5391 5380 /*
5392 5381 * Ask VOP_GETPAGE to return adjacent pages
5393 5382 * within the segment.
5394 5383 */
5395 - plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t)
5384 + plsz = MIN((size_t)FAULT_TMP_PAGES_SZ, (size_t)
5396 5385 ((seg->s_base + seg->s_size) - addr));
5397 5386 ASSERT((addr + plsz) <=
5398 5387 (seg->s_base + seg->s_size));
5399 5388 }
5400 5389
5401 5390 /*
5402 5391 * Need to get some non-anonymous pages.
5403 5392 * We need to make only one call to GETPAGE to do
5404 5393 * this to prevent certain deadlocking conditions
5405 5394 * when we are doing locking. In this case
5406 5395 * non_anon() should have picked up the smallest
5407 5396 * range which includes all the non-anonymous
5408 5397 * pages in the requested range. We have to
5409 5398 * be careful regarding which rw flag to pass in
5410 5399 * because on a private mapping, the underlying
5411 5400 * object is never allowed to be written.
5412 5401 */
5413 5402 if (rw == S_WRITE && svd->type == MAP_PRIVATE) {
5414 5403 arw = S_READ;
5415 5404 } else {
5416 5405 arw = rw;
5417 5406 }
5418 5407 vp = svd->vp;
5419 5408 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5420 5409 "segvn_getpage:seg %p addr %p vp %p",
5421 5410 seg, addr, vp);
5422 5411 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len,
5423 5412 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw,
5424 5413 svd->cred, NULL);
5425 5414 if (err) {
5426 5415 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5427 5416 segvn_pagelist_rele(plp);
5428 5417 if (pl_alloc_sz)
5429 5418 kmem_free(plp, pl_alloc_sz);
5430 5419 return (FC_MAKE_ERR(err));
5431 5420 }
5432 5421 if (svd->type == MAP_PRIVATE)
5433 5422 vpprot &= ~PROT_WRITE;
5434 5423 }
5435 5424 }
5436 5425
5437 5426 /*
5438 5427 * N.B. at this time the plp array has all the needed non-anon
5439 5428 * pages in addition to (possibly) having some adjacent pages.
5440 5429 */
5441 5430
5442 5431 /*
5443 5432 * Always acquire the anon_array_lock to prevent
5444 5433 * 2 threads from allocating separate anon slots for
5445 5434 * the same "addr".
5446 5435 *
5447 5436 * If this is a copy-on-write fault and we don't already
5448 5437 * have the anon_array_lock, acquire it to prevent the
5449 5438 * fault routine from handling multiple copy-on-write faults
5450 5439 * on the same "addr" in the same address space.
5451 5440 *
5452 5441 * Only one thread should deal with the fault since after
5453 5442 * it is handled, the other threads can acquire a translation
5454 5443 * to the newly created private page. This prevents two or
5455 5444 * more threads from creating different private pages for the
5456 5445 * same fault.
5457 5446 *
5458 5447 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5459 5448 * to prevent deadlock between this thread and another thread
5460 5449 * which has soft-locked this page and wants to acquire serial_lock.
5461 5450 * ( bug 4026339 )
5462 5451 *
5463 5452 * The fix for bug 4026339 becomes unnecessary when using the
5464 5453 * locking scheme with per amp rwlock and a global set of hash
5465 5454 * lock, anon_array_lock. If we steal a vnode page when low
5466 5455 * on memory and upgrad the page lock through page_rename,
5467 5456 * then the page is PAGE_HANDLED, nothing needs to be done
5468 5457 * for this page after returning from segvn_faultpage.
5469 5458 *
5470 5459 * But really, the page lock should be downgraded after
5471 5460 * the stolen page is page_rename'd.
5472 5461 */
5473 5462
5474 5463 if (amp != NULL)
5475 5464 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5476 5465
5477 5466 /*
5478 5467 * Ok, now loop over the address range and handle faults
5479 5468 */
5480 5469 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) {
5481 5470 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot,
5482 5471 type, rw, brkcow);
5483 5472 if (err) {
5484 5473 if (amp != NULL)
5485 5474 ANON_LOCK_EXIT(&->a_rwlock);
5486 5475 if (type == F_SOFTLOCK && a > addr) {
5487 5476 segvn_softunlock(seg, addr, (a - addr),
5488 5477 S_OTHER);
5489 5478 }
5490 5479 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5491 5480 segvn_pagelist_rele(plp);
5492 5481 if (pl_alloc_sz)
5493 5482 kmem_free(plp, pl_alloc_sz);
5494 5483 return (err);
5495 5484 }
5496 5485 if (vpage) {
5497 5486 vpage++;
5498 5487 } else if (svd->vpage) {
5499 5488 page = seg_page(seg, addr);
5500 5489 vpage = &svd->vpage[++page];
5501 5490 }
5502 5491 }
5503 5492
5504 5493 /* Didn't get pages from the underlying fs so we're done */
5505 5494 if (!dogetpage)
5506 5495 goto done;
5507 5496
5508 5497 /*
5509 5498 * Now handle any other pages in the list returned.
5510 5499 * If the page can be used, load up the translations now.
5511 5500 * Note that the for loop will only be entered if "plp"
5512 5501 * is pointing to a non-NULL page pointer which means that
5513 5502 * VOP_GETPAGE() was called and vpprot has been initialized.
5514 5503 */
5515 5504 if (svd->pageprot == 0)
5516 5505 prot = svd->prot & vpprot;
5517 5506
5518 5507
5519 5508 /*
5520 5509 * Large Files: diff should be unsigned value because we started
5521 5510 * supporting > 2GB segment sizes from 2.5.1 and when a
5522 5511 * large file of size > 2GB gets mapped to address space
5523 5512 * the diff value can be > 2GB.
5524 5513 */
5525 5514
5526 5515 for (ppp = plp; (pp = *ppp) != NULL; ppp++) {
5527 5516 size_t diff;
5528 5517 struct anon *ap;
5529 5518 int anon_index;
5530 5519 anon_sync_obj_t cookie;
5531 5520 int hat_flag = HAT_LOAD_ADV;
5532 5521
5533 5522 if (svd->flags & MAP_TEXT) {
5534 5523 hat_flag |= HAT_LOAD_TEXT;
5535 5524 }
5536 5525
5537 5526 if (pp == PAGE_HANDLED)
5538 5527 continue;
5539 5528
5540 5529 if (svd->tr_state != SEGVN_TR_ON &&
5541 5530 pp->p_offset >= svd->offset &&
5542 5531 pp->p_offset < svd->offset + seg->s_size) {
5543 5532
5544 5533 diff = pp->p_offset - svd->offset;
5545 5534
5546 5535 /*
5547 5536 * Large Files: Following is the assertion
5548 5537 * validating the above cast.
5549 5538 */
5550 5539 ASSERT(svd->vp == pp->p_vnode);
5551 5540
5552 5541 page = btop(diff);
5553 5542 if (svd->pageprot)
5554 5543 prot = VPP_PROT(&svd->vpage[page]) & vpprot;
5555 5544
5556 5545 /*
5557 5546 * Prevent other threads in the address space from
5558 5547 * creating private pages (i.e., allocating anon slots)
5559 5548 * while we are in the process of loading translations
5560 5549 * to additional pages returned by the underlying
5561 5550 * object.
5562 5551 */
5563 5552 if (amp != NULL) {
5564 5553 anon_index = svd->anon_index + page;
5565 5554 anon_array_enter(amp, anon_index, &cookie);
5566 5555 ap = anon_get_ptr(amp->ahp, anon_index);
5567 5556 }
5568 5557 if ((amp == NULL) || (ap == NULL)) {
5569 5558 if (IS_VMODSORT(pp->p_vnode) ||
5570 5559 enable_mbit_wa) {
5571 5560 if (rw == S_WRITE)
5572 5561 hat_setmod(pp);
5573 5562 else if (rw != S_OTHER &&
5574 5563 !hat_ismod(pp))
5575 5564 prot &= ~PROT_WRITE;
5576 5565 }
5577 5566 /*
5578 5567 * Skip mapping read ahead pages marked
5579 5568 * for migration, so they will get migrated
5580 5569 * properly on fault
5581 5570 */
5582 5571 ASSERT(amp == NULL ||
5583 5572 svd->rcookie == HAT_INVALID_REGION_COOKIE);
5584 5573 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) {
5585 5574 hat_memload_region(hat,
5586 5575 seg->s_base + diff,
5587 5576 pp, prot, hat_flag,
5588 5577 svd->rcookie);
5589 5578 }
5590 5579 }
5591 5580 if (amp != NULL)
5592 5581 anon_array_exit(&cookie);
5593 5582 }
5594 5583 page_unlock(pp);
5595 5584 }
5596 5585 done:
5597 5586 if (amp != NULL)
5598 5587 ANON_LOCK_EXIT(&->a_rwlock);
5599 5588 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5600 5589 if (pl_alloc_sz)
5601 5590 kmem_free(plp, pl_alloc_sz);
5602 5591 return (0);
5603 5592 }
5604 5593
5605 5594 /*
5606 5595 * This routine is used to start I/O on pages asynchronously. XXX it will
5607 5596 * only create PAGESIZE pages. At fault time they will be relocated into
5608 5597 * larger pages.
5609 5598 */
5610 5599 static faultcode_t
5611 5600 segvn_faulta(struct seg *seg, caddr_t addr)
5612 5601 {
5613 5602 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5614 5603 int err;
5615 5604 struct anon_map *amp;
5616 5605 vnode_t *vp;
5617 5606
5618 5607 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5619 5608
5620 5609 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5621 5610 if ((amp = svd->amp) != NULL) {
5622 5611 struct anon *ap;
5623 5612
5624 5613 /*
5625 5614 * Reader lock to prevent amp->ahp from being changed.
5626 5615 * This is advisory, it's ok to miss a page, so
5627 5616 * we don't do anon_array_enter lock.
5628 5617 */
5629 5618 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5630 5619 if ((ap = anon_get_ptr(amp->ahp,
5631 5620 svd->anon_index + seg_page(seg, addr))) != NULL) {
5632 5621
5633 5622 err = anon_getpage(&ap, NULL, NULL,
5634 5623 0, seg, addr, S_READ, svd->cred);
5635 5624
5636 5625 ANON_LOCK_EXIT(&->a_rwlock);
5637 5626 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5638 5627 if (err)
5639 5628 return (FC_MAKE_ERR(err));
5640 5629 return (0);
5641 5630 }
5642 5631 ANON_LOCK_EXIT(&->a_rwlock);
5643 5632 }
5644 5633
5645 5634 if (svd->vp == NULL) {
5646 5635 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5647 5636 return (0); /* zfod page - do nothing now */
5648 5637 }
5649 5638
5650 5639 vp = svd->vp;
5651 5640 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5652 5641 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp);
5653 5642 err = VOP_GETPAGE(vp,
5654 5643 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5655 5644 PAGESIZE, NULL, NULL, 0, seg, addr,
5656 5645 S_OTHER, svd->cred, NULL);
5657 5646
5658 5647 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5659 5648 if (err)
5660 5649 return (FC_MAKE_ERR(err));
5661 5650 return (0);
5662 5651 }
5663 5652
5664 5653 static int
5665 5654 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5666 5655 {
5667 5656 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5668 5657 struct vpage *cvp, *svp, *evp;
5669 5658 struct vnode *vp;
5670 5659 size_t pgsz;
5671 5660 pgcnt_t pgcnt;
5672 5661 anon_sync_obj_t cookie;
5673 5662 int unload_done = 0;
5674 5663
5675 5664 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5676 5665
5677 5666 if ((svd->maxprot & prot) != prot)
5678 5667 return (EACCES); /* violated maxprot */
5679 5668
5680 5669 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5681 5670
5682 5671 /* return if prot is the same */
5683 5672 if (!svd->pageprot && svd->prot == prot) {
5684 5673 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5685 5674 return (0);
5686 5675 }
5687 5676
5688 5677 /*
5689 5678 * Since we change protections we first have to flush the cache.
5690 5679 * This makes sure all the pagelock calls have to recheck
5691 5680 * protections.
5692 5681 */
5693 5682 if (svd->softlockcnt > 0) {
5694 5683 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5695 5684
5696 5685 /*
5697 5686 * If this is shared segment non 0 softlockcnt
5698 5687 * means locked pages are still in use.
5699 5688 */
5700 5689 if (svd->type == MAP_SHARED) {
5701 5690 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5702 5691 return (EAGAIN);
5703 5692 }
5704 5693
5705 5694 /*
5706 5695 * Since we do have the segvn writers lock nobody can fill
5707 5696 * the cache with entries belonging to this seg during
5708 5697 * the purge. The flush either succeeds or we still have
5709 5698 * pending I/Os.
5710 5699 */
5711 5700 segvn_purge(seg);
5712 5701 if (svd->softlockcnt > 0) {
5713 5702 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5714 5703 return (EAGAIN);
5715 5704 }
5716 5705 }
5717 5706
5718 5707 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5719 5708 ASSERT(svd->amp == NULL);
5720 5709 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5721 5710 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5722 5711 HAT_REGION_TEXT);
5723 5712 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5724 5713 unload_done = 1;
5725 5714 } else if (svd->tr_state == SEGVN_TR_INIT) {
5726 5715 svd->tr_state = SEGVN_TR_OFF;
5727 5716 } else if (svd->tr_state == SEGVN_TR_ON) {
5728 5717 ASSERT(svd->amp != NULL);
5729 5718 segvn_textunrepl(seg, 0);
5730 5719 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5731 5720 unload_done = 1;
5732 5721 }
5733 5722
5734 5723 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED &&
5735 5724 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) {
5736 5725 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
5737 5726 segvn_inval_trcache(svd->vp);
5738 5727 }
5739 5728 if (seg->s_szc != 0) {
5740 5729 int err;
5741 5730 pgsz = page_get_pagesize(seg->s_szc);
5742 5731 pgcnt = pgsz >> PAGESHIFT;
5743 5732 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
5744 5733 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5745 5734 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5746 5735 ASSERT(seg->s_base != addr || seg->s_size != len);
5747 5736 /*
5748 5737 * If we are holding the as lock as a reader then
5749 5738 * we need to return IE_RETRY and let the as
5750 5739 * layer drop and re-acquire the lock as a writer.
5751 5740 */
5752 5741 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock))
5753 5742 return (IE_RETRY);
5754 5743 VM_STAT_ADD(segvnvmstats.demoterange[1]);
5755 5744 if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5756 5745 err = segvn_demote_range(seg, addr, len,
5757 5746 SDR_END, 0);
5758 5747 } else {
5759 5748 uint_t szcvec = map_pgszcvec(seg->s_base,
5760 5749 pgsz, (uintptr_t)seg->s_base,
5761 5750 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5762 5751 err = segvn_demote_range(seg, addr, len,
5763 5752 SDR_END, szcvec);
5764 5753 }
5765 5754 if (err == 0)
5766 5755 return (IE_RETRY);
5767 5756 if (err == ENOMEM)
5768 5757 return (IE_NOMEM);
5769 5758 return (err);
5770 5759 }
5771 5760 }
5772 5761
5773 5762
5774 5763 /*
5775 5764 * If it's a private mapping and we're making it writable then we
5776 5765 * may have to reserve the additional swap space now. If we are
5777 5766 * making writable only a part of the segment then we use its vpage
5778 5767 * array to keep a record of the pages for which we have reserved
5779 5768 * swap. In this case we set the pageswap field in the segment's
5780 5769 * segvn structure to record this.
5781 5770 *
5782 5771 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5783 5772 * removing write permission on the entire segment and we haven't
5784 5773 * modified any pages, we can release the swap space.
5785 5774 */
5786 5775 if (svd->type == MAP_PRIVATE) {
5787 5776 if (prot & PROT_WRITE) {
5788 5777 if (!(svd->flags & MAP_NORESERVE) &&
5789 5778 !(svd->swresv && svd->pageswap == 0)) {
5790 5779 size_t sz = 0;
5791 5780
5792 5781 /*
5793 5782 * Start by determining how much swap
5794 5783 * space is required.
5795 5784 */
5796 5785 if (addr == seg->s_base &&
5797 5786 len == seg->s_size &&
5798 5787 svd->pageswap == 0) {
5799 5788 /* The whole segment */
5800 5789 sz = seg->s_size;
5801 5790 } else {
5802 5791 /*
5803 5792 * Make sure that the vpage array
5804 5793 * exists, and make a note of the
5805 5794 * range of elements corresponding
5806 5795 * to len.
5807 5796 */
5808 5797 segvn_vpage(seg);
5809 5798 if (svd->vpage == NULL) {
5810 5799 SEGVN_LOCK_EXIT(seg->s_as,
5811 5800 &svd->lock);
5812 5801 return (ENOMEM);
5813 5802 }
5814 5803 svp = &svd->vpage[seg_page(seg, addr)];
5815 5804 evp = &svd->vpage[seg_page(seg,
5816 5805 addr + len)];
5817 5806
5818 5807 if (svd->pageswap == 0) {
5819 5808 /*
5820 5809 * This is the first time we've
5821 5810 * asked for a part of this
5822 5811 * segment, so we need to
5823 5812 * reserve everything we've
5824 5813 * been asked for.
5825 5814 */
5826 5815 sz = len;
5827 5816 } else {
5828 5817 /*
5829 5818 * We have to count the number
5830 5819 * of pages required.
5831 5820 */
5832 5821 for (cvp = svp; cvp < evp;
5833 5822 cvp++) {
5834 5823 if (!VPP_ISSWAPRES(cvp))
5835 5824 sz++;
5836 5825 }
5837 5826 sz <<= PAGESHIFT;
5838 5827 }
5839 5828 }
5840 5829
5841 5830 /* Try to reserve the necessary swap. */
5842 5831 if (anon_resv_zone(sz,
5843 5832 seg->s_as->a_proc->p_zone) == 0) {
5844 5833 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5845 5834 return (IE_NOMEM);
5846 5835 }
5847 5836
5848 5837 /*
5849 5838 * Make a note of how much swap space
5850 5839 * we've reserved.
5851 5840 */
5852 5841 if (svd->pageswap == 0 && sz == seg->s_size) {
5853 5842 svd->swresv = sz;
5854 5843 } else {
5855 5844 ASSERT(svd->vpage != NULL);
5856 5845 svd->swresv += sz;
5857 5846 svd->pageswap = 1;
5858 5847 for (cvp = svp; cvp < evp; cvp++) {
5859 5848 if (!VPP_ISSWAPRES(cvp))
5860 5849 VPP_SETSWAPRES(cvp);
5861 5850 }
5862 5851 }
5863 5852 }
5864 5853 } else {
5865 5854 /*
5866 5855 * Swap space is released only if this segment
5867 5856 * does not map anonymous memory, since read faults
5868 5857 * on such segments still need an anon slot to read
5869 5858 * in the data.
5870 5859 */
5871 5860 if (svd->swresv != 0 && svd->vp != NULL &&
5872 5861 svd->amp == NULL && addr == seg->s_base &&
5873 5862 len == seg->s_size && svd->pageprot == 0) {
5874 5863 ASSERT(svd->pageswap == 0);
5875 5864 anon_unresv_zone(svd->swresv,
5876 5865 seg->s_as->a_proc->p_zone);
5877 5866 svd->swresv = 0;
5878 5867 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
5879 5868 "anon proc:%p %lu %u", seg, 0, 0);
5880 5869 }
5881 5870 }
5882 5871 }
5883 5872
5884 5873 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5885 5874 if (svd->prot == prot) {
5886 5875 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5887 5876 return (0); /* all done */
5888 5877 }
5889 5878 svd->prot = (uchar_t)prot;
5890 5879 } else if (svd->type == MAP_PRIVATE) {
5891 5880 struct anon *ap = NULL;
5892 5881 page_t *pp;
5893 5882 u_offset_t offset, off;
5894 5883 struct anon_map *amp;
5895 5884 ulong_t anon_idx = 0;
5896 5885
5897 5886 /*
5898 5887 * A vpage structure exists or else the change does not
5899 5888 * involve the entire segment. Establish a vpage structure
5900 5889 * if none is there. Then, for each page in the range,
5901 5890 * adjust its individual permissions. Note that write-
5902 5891 * enabling a MAP_PRIVATE page can affect the claims for
5903 5892 * locked down memory. Overcommitting memory terminates
5904 5893 * the operation.
5905 5894 */
5906 5895 segvn_vpage(seg);
5907 5896 if (svd->vpage == NULL) {
5908 5897 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5909 5898 return (ENOMEM);
5910 5899 }
5911 5900 svd->pageprot = 1;
5912 5901 if ((amp = svd->amp) != NULL) {
5913 5902 anon_idx = svd->anon_index + seg_page(seg, addr);
5914 5903 ASSERT(seg->s_szc == 0 ||
5915 5904 IS_P2ALIGNED(anon_idx, pgcnt));
5916 5905 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5917 5906 }
5918 5907
5919 5908 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5920 5909 evp = &svd->vpage[seg_page(seg, addr + len)];
5921 5910
5922 5911 /*
5923 5912 * See Statement at the beginning of segvn_lockop regarding
5924 5913 * the way cowcnts and lckcnts are handled.
5925 5914 */
5926 5915 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5927 5916
5928 5917 if (seg->s_szc != 0) {
5929 5918 if (amp != NULL) {
5930 5919 anon_array_enter(amp, anon_idx,
5931 5920 &cookie);
5932 5921 }
5933 5922 if (IS_P2ALIGNED(anon_idx, pgcnt) &&
5934 5923 !segvn_claim_pages(seg, svp, offset,
5935 5924 anon_idx, prot)) {
5936 5925 if (amp != NULL) {
5937 5926 anon_array_exit(&cookie);
5938 5927 }
5939 5928 break;
5940 5929 }
5941 5930 if (amp != NULL) {
5942 5931 anon_array_exit(&cookie);
5943 5932 }
5944 5933 anon_idx++;
5945 5934 } else {
5946 5935 if (amp != NULL) {
5947 5936 anon_array_enter(amp, anon_idx,
5948 5937 &cookie);
5949 5938 ap = anon_get_ptr(amp->ahp, anon_idx++);
5950 5939 }
5951 5940
5952 5941 if (VPP_ISPPLOCK(svp) &&
5953 5942 VPP_PROT(svp) != prot) {
5954 5943
5955 5944 if (amp == NULL || ap == NULL) {
5956 5945 vp = svd->vp;
5957 5946 off = offset;
5958 5947 } else
5959 5948 swap_xlate(ap, &vp, &off);
5960 5949 if (amp != NULL)
5961 5950 anon_array_exit(&cookie);
5962 5951
5963 5952 if ((pp = page_lookup(vp, off,
5964 5953 SE_SHARED)) == NULL) {
5965 5954 panic("segvn_setprot: no page");
5966 5955 /*NOTREACHED*/
5967 5956 }
5968 5957 ASSERT(seg->s_szc == 0);
5969 5958 if ((VPP_PROT(svp) ^ prot) &
5970 5959 PROT_WRITE) {
5971 5960 if (prot & PROT_WRITE) {
5972 5961 if (!page_addclaim(
5973 5962 pp)) {
5974 5963 page_unlock(pp);
5975 5964 break;
5976 5965 }
5977 5966 } else {
5978 5967 if (!page_subclaim(
5979 5968 pp)) {
5980 5969 page_unlock(pp);
5981 5970 break;
5982 5971 }
5983 5972 }
5984 5973 }
5985 5974 page_unlock(pp);
5986 5975 } else if (amp != NULL)
5987 5976 anon_array_exit(&cookie);
5988 5977 }
5989 5978 VPP_SETPROT(svp, prot);
5990 5979 offset += PAGESIZE;
5991 5980 }
5992 5981 if (amp != NULL)
5993 5982 ANON_LOCK_EXIT(&->a_rwlock);
5994 5983
5995 5984 /*
5996 5985 * Did we terminate prematurely? If so, simply unload
5997 5986 * the translations to the things we've updated so far.
5998 5987 */
5999 5988 if (svp != evp) {
6000 5989 if (unload_done) {
6001 5990 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6002 5991 return (IE_NOMEM);
6003 5992 }
6004 5993 len = (svp - &svd->vpage[seg_page(seg, addr)]) *
6005 5994 PAGESIZE;
6006 5995 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz));
6007 5996 if (len != 0)
6008 5997 hat_unload(seg->s_as->a_hat, addr,
6009 5998 len, HAT_UNLOAD);
6010 5999 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6011 6000 return (IE_NOMEM);
6012 6001 }
6013 6002 } else {
6014 6003 segvn_vpage(seg);
6015 6004 if (svd->vpage == NULL) {
6016 6005 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6017 6006 return (ENOMEM);
6018 6007 }
6019 6008 svd->pageprot = 1;
6020 6009 evp = &svd->vpage[seg_page(seg, addr + len)];
6021 6010 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
6022 6011 VPP_SETPROT(svp, prot);
6023 6012 }
6024 6013 }
6025 6014
6026 6015 if (unload_done) {
6027 6016 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6028 6017 return (0);
6029 6018 }
6030 6019
6031 6020 if (((prot & PROT_WRITE) != 0 &&
6032 6021 (svd->vp != NULL || svd->type == MAP_PRIVATE)) ||
6033 6022 (prot & ~PROT_USER) == PROT_NONE) {
6034 6023 /*
6035 6024 * Either private or shared data with write access (in
6036 6025 * which case we need to throw out all former translations
6037 6026 * so that we get the right translations set up on fault
6038 6027 * and we don't allow write access to any copy-on-write pages
6039 6028 * that might be around or to prevent write access to pages
6040 6029 * representing holes in a file), or we don't have permission
6041 6030 * to access the memory at all (in which case we have to
6042 6031 * unload any current translations that might exist).
6043 6032 */
6044 6033 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
6045 6034 } else {
6046 6035 /*
6047 6036 * A shared mapping or a private mapping in which write
6048 6037 * protection is going to be denied - just change all the
6049 6038 * protections over the range of addresses in question.
6050 6039 * segvn does not support any other attributes other
6051 6040 * than prot so we can use hat_chgattr.
6052 6041 */
6053 6042 hat_chgattr(seg->s_as->a_hat, addr, len, prot);
6054 6043 }
6055 6044
6056 6045 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6057 6046
6058 6047 return (0);
6059 6048 }
6060 6049
6061 6050 /*
6062 6051 * segvn_setpagesize is called via segop_setpagesize from as_setpagesize,
6063 6052 * to determine if the seg is capable of mapping the requested szc.
6064 6053 */
6065 6054 static int
6066 6055 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
6067 6056 {
6068 6057 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6069 6058 struct segvn_data *nsvd;
6070 6059 struct anon_map *amp = svd->amp;
6071 6060 struct seg *nseg;
6072 6061 caddr_t eaddr = addr + len, a;
6073 6062 size_t pgsz = page_get_pagesize(szc);
6074 6063 pgcnt_t pgcnt = page_get_pagecnt(szc);
6075 6064 int err;
6076 6065 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6077 6066
6078 6067 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6079 6068 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6080 6069
6081 6070 if (seg->s_szc == szc || segvn_lpg_disable != 0) {
6082 6071 return (0);
6083 6072 }
6084 6073
6085 6074 /*
6086 6075 * addr should always be pgsz aligned but eaddr may be misaligned if
6087 6076 * it's at the end of the segment.
6088 6077 *
6089 6078 * XXX we should assert this condition since as_setpagesize() logic
6090 6079 * guarantees it.
6091 6080 */
6092 6081 if (!IS_P2ALIGNED(addr, pgsz) ||
6093 6082 (!IS_P2ALIGNED(eaddr, pgsz) &&
6094 6083 eaddr != seg->s_base + seg->s_size)) {
6095 6084
6096 6085 segvn_setpgsz_align_err++;
6097 6086 return (EINVAL);
6098 6087 }
6099 6088
6100 6089 if (amp != NULL && svd->type == MAP_SHARED) {
6101 6090 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6102 6091 if (!IS_P2ALIGNED(an_idx, pgcnt)) {
6103 6092
6104 6093 segvn_setpgsz_anon_align_err++;
6105 6094 return (EINVAL);
6106 6095 }
6107 6096 }
6108 6097
6109 6098 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas ||
6110 6099 szc > segvn_maxpgszc) {
6111 6100 return (EINVAL);
6112 6101 }
6113 6102
6114 6103 /* paranoid check */
6115 6104 if (svd->vp != NULL &&
6116 6105 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) {
6117 6106 return (EINVAL);
6118 6107 }
6119 6108
6120 6109 if (seg->s_szc == 0 && svd->vp != NULL &&
6121 6110 map_addr_vacalign_check(addr, off)) {
6122 6111 return (EINVAL);
6123 6112 }
6124 6113
6125 6114 /*
6126 6115 * Check that protections are the same within new page
6127 6116 * size boundaries.
6128 6117 */
6129 6118 if (svd->pageprot) {
6130 6119 for (a = addr; a < eaddr; a += pgsz) {
6131 6120 if ((a + pgsz) > eaddr) {
6132 6121 if (!sameprot(seg, a, eaddr - a)) {
6133 6122 return (EINVAL);
6134 6123 }
6135 6124 } else {
6136 6125 if (!sameprot(seg, a, pgsz)) {
6137 6126 return (EINVAL);
6138 6127 }
6139 6128 }
6140 6129 }
6141 6130 }
6142 6131
6143 6132 /*
6144 6133 * Since we are changing page size we first have to flush
6145 6134 * the cache. This makes sure all the pagelock calls have
6146 6135 * to recheck protections.
6147 6136 */
6148 6137 if (svd->softlockcnt > 0) {
6149 6138 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6150 6139
6151 6140 /*
6152 6141 * If this is shared segment non 0 softlockcnt
6153 6142 * means locked pages are still in use.
6154 6143 */
6155 6144 if (svd->type == MAP_SHARED) {
6156 6145 return (EAGAIN);
6157 6146 }
6158 6147
6159 6148 /*
6160 6149 * Since we do have the segvn writers lock nobody can fill
6161 6150 * the cache with entries belonging to this seg during
6162 6151 * the purge. The flush either succeeds or we still have
6163 6152 * pending I/Os.
6164 6153 */
6165 6154 segvn_purge(seg);
6166 6155 if (svd->softlockcnt > 0) {
6167 6156 return (EAGAIN);
6168 6157 }
6169 6158 }
6170 6159
6171 6160 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6172 6161 ASSERT(svd->amp == NULL);
6173 6162 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6174 6163 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6175 6164 HAT_REGION_TEXT);
6176 6165 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6177 6166 } else if (svd->tr_state == SEGVN_TR_INIT) {
6178 6167 svd->tr_state = SEGVN_TR_OFF;
6179 6168 } else if (svd->tr_state == SEGVN_TR_ON) {
6180 6169 ASSERT(svd->amp != NULL);
6181 6170 segvn_textunrepl(seg, 1);
6182 6171 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6183 6172 amp = NULL;
6184 6173 }
6185 6174
6186 6175 /*
6187 6176 * Operation for sub range of existing segment.
6188 6177 */
6189 6178 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) {
6190 6179 if (szc < seg->s_szc) {
6191 6180 VM_STAT_ADD(segvnvmstats.demoterange[2]);
6192 6181 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0);
6193 6182 if (err == 0) {
6194 6183 return (IE_RETRY);
6195 6184 }
6196 6185 if (err == ENOMEM) {
6197 6186 return (IE_NOMEM);
6198 6187 }
6199 6188 return (err);
6200 6189 }
6201 6190 if (addr != seg->s_base) {
6202 6191 nseg = segvn_split_seg(seg, addr);
6203 6192 if (eaddr != (nseg->s_base + nseg->s_size)) {
6204 6193 /* eaddr is szc aligned */
6205 6194 (void) segvn_split_seg(nseg, eaddr);
6206 6195 }
6207 6196 return (IE_RETRY);
6208 6197 }
6209 6198 if (eaddr != (seg->s_base + seg->s_size)) {
6210 6199 /* eaddr is szc aligned */
6211 6200 (void) segvn_split_seg(seg, eaddr);
6212 6201 }
6213 6202 return (IE_RETRY);
6214 6203 }
6215 6204
6216 6205 /*
6217 6206 * Break any low level sharing and reset seg->s_szc to 0.
6218 6207 */
6219 6208 if ((err = segvn_clrszc(seg)) != 0) {
6220 6209 if (err == ENOMEM) {
6221 6210 err = IE_NOMEM;
6222 6211 }
6223 6212 return (err);
6224 6213 }
6225 6214 ASSERT(seg->s_szc == 0);
6226 6215
6227 6216 /*
6228 6217 * If the end of the current segment is not pgsz aligned
6229 6218 * then attempt to concatenate with the next segment.
6230 6219 */
6231 6220 if (!IS_P2ALIGNED(eaddr, pgsz)) {
6232 6221 nseg = AS_SEGNEXT(seg->s_as, seg);
6233 6222 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) {
6234 6223 return (ENOMEM);
6235 6224 }
6236 6225 if (nseg->s_ops != &segvn_ops) {
6237 6226 return (EINVAL);
6238 6227 }
6239 6228 nsvd = (struct segvn_data *)nseg->s_data;
6240 6229 if (nsvd->softlockcnt > 0) {
6241 6230 /*
6242 6231 * If this is shared segment non 0 softlockcnt
6243 6232 * means locked pages are still in use.
6244 6233 */
6245 6234 if (nsvd->type == MAP_SHARED) {
6246 6235 return (EAGAIN);
6247 6236 }
6248 6237 segvn_purge(nseg);
6249 6238 if (nsvd->softlockcnt > 0) {
6250 6239 return (EAGAIN);
6251 6240 }
6252 6241 }
6253 6242 err = segvn_clrszc(nseg);
6254 6243 if (err == ENOMEM) {
6255 6244 err = IE_NOMEM;
6256 6245 }
6257 6246 if (err != 0) {
6258 6247 return (err);
6259 6248 }
6260 6249 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6261 6250 err = segvn_concat(seg, nseg, 1);
6262 6251 if (err == -1) {
6263 6252 return (EINVAL);
6264 6253 }
6265 6254 if (err == -2) {
6266 6255 return (IE_NOMEM);
6267 6256 }
6268 6257 return (IE_RETRY);
6269 6258 }
6270 6259
6271 6260 /*
6272 6261 * May need to re-align anon array to
6273 6262 * new szc.
6274 6263 */
6275 6264 if (amp != NULL) {
6276 6265 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) {
6277 6266 struct anon_hdr *nahp;
6278 6267
6279 6268 ASSERT(svd->type == MAP_PRIVATE);
6280 6269
6281 6270 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6282 6271 ASSERT(amp->refcnt == 1);
6283 6272 nahp = anon_create(btop(amp->size), ANON_NOSLEEP);
6284 6273 if (nahp == NULL) {
6285 6274 ANON_LOCK_EXIT(&->a_rwlock);
6286 6275 return (IE_NOMEM);
6287 6276 }
6288 6277 if (anon_copy_ptr(amp->ahp, svd->anon_index,
6289 6278 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) {
6290 6279 anon_release(nahp, btop(amp->size));
6291 6280 ANON_LOCK_EXIT(&->a_rwlock);
6292 6281 return (IE_NOMEM);
6293 6282 }
6294 6283 anon_release(amp->ahp, btop(amp->size));
6295 6284 amp->ahp = nahp;
6296 6285 svd->anon_index = 0;
6297 6286 ANON_LOCK_EXIT(&->a_rwlock);
6298 6287 }
6299 6288 }
6300 6289 if (svd->vp != NULL && szc != 0) {
6301 6290 struct vattr va;
6302 6291 u_offset_t eoffpage = svd->offset;
6303 6292 va.va_mask = AT_SIZE;
6304 6293 eoffpage += seg->s_size;
6305 6294 eoffpage = btopr(eoffpage);
6306 6295 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) {
6307 6296 segvn_setpgsz_getattr_err++;
6308 6297 return (EINVAL);
6309 6298 }
6310 6299 if (btopr(va.va_size) < eoffpage) {
6311 6300 segvn_setpgsz_eof_err++;
6312 6301 return (EINVAL);
6313 6302 }
6314 6303 if (amp != NULL) {
6315 6304 /*
6316 6305 * anon_fill_cow_holes() may call VOP_GETPAGE().
6317 6306 * don't take anon map lock here to avoid holding it
6318 6307 * across VOP_GETPAGE() calls that may call back into
6319 6308 * segvn for klsutering checks. We don't really need
6320 6309 * anon map lock here since it's a private segment and
6321 6310 * we hold as level lock as writers.
6322 6311 */
6323 6312 if ((err = anon_fill_cow_holes(seg, seg->s_base,
6324 6313 amp->ahp, svd->anon_index, svd->vp, svd->offset,
6325 6314 seg->s_size, szc, svd->prot, svd->vpage,
6326 6315 svd->cred)) != 0) {
6327 6316 return (EINVAL);
6328 6317 }
6329 6318 }
6330 6319 segvn_setvnode_mpss(svd->vp);
6331 6320 }
6332 6321
6333 6322 if (amp != NULL) {
6334 6323 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6335 6324 if (svd->type == MAP_PRIVATE) {
6336 6325 amp->a_szc = szc;
6337 6326 } else if (szc > amp->a_szc) {
6338 6327 amp->a_szc = szc;
6339 6328 }
6340 6329 ANON_LOCK_EXIT(&->a_rwlock);
6341 6330 }
6342 6331
6343 6332 seg->s_szc = szc;
6344 6333
6345 6334 return (0);
6346 6335 }
6347 6336
6348 6337 static int
6349 6338 segvn_clrszc(struct seg *seg)
6350 6339 {
6351 6340 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6352 6341 struct anon_map *amp = svd->amp;
6353 6342 size_t pgsz;
6354 6343 pgcnt_t pages;
6355 6344 int err = 0;
6356 6345 caddr_t a = seg->s_base;
6357 6346 caddr_t ea = a + seg->s_size;
6358 6347 ulong_t an_idx = svd->anon_index;
6359 6348 vnode_t *vp = svd->vp;
6360 6349 struct vpage *vpage = svd->vpage;
6361 6350 page_t *anon_pl[1 + 1], *pp;
6362 6351 struct anon *ap, *oldap;
6363 6352 uint_t prot = svd->prot, vpprot;
6364 6353 int pageflag = 0;
6365 6354
6366 6355 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6367 6356 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6368 6357 ASSERT(svd->softlockcnt == 0);
6369 6358
6370 6359 if (vp == NULL && amp == NULL) {
6371 6360 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6372 6361 seg->s_szc = 0;
6373 6362 return (0);
6374 6363 }
6375 6364
6376 6365 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6377 6366 ASSERT(svd->amp == NULL);
6378 6367 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6379 6368 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6380 6369 HAT_REGION_TEXT);
6381 6370 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6382 6371 } else if (svd->tr_state == SEGVN_TR_ON) {
6383 6372 ASSERT(svd->amp != NULL);
6384 6373 segvn_textunrepl(seg, 1);
6385 6374 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6386 6375 amp = NULL;
6387 6376 } else {
6388 6377 if (svd->tr_state != SEGVN_TR_OFF) {
6389 6378 ASSERT(svd->tr_state == SEGVN_TR_INIT);
6390 6379 svd->tr_state = SEGVN_TR_OFF;
6391 6380 }
6392 6381
6393 6382 /*
6394 6383 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6395 6384 * unload argument is 0 when we are freeing the segment
6396 6385 * and unload was already done.
6397 6386 */
6398 6387 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
6399 6388 HAT_UNLOAD_UNMAP);
6400 6389 }
6401 6390
6402 6391 if (amp == NULL || svd->type == MAP_SHARED) {
6403 6392 seg->s_szc = 0;
6404 6393 return (0);
6405 6394 }
6406 6395
6407 6396 pgsz = page_get_pagesize(seg->s_szc);
6408 6397 pages = btop(pgsz);
6409 6398
6410 6399 /*
6411 6400 * XXX anon rwlock is not really needed because this is a
6412 6401 * private segment and we are writers.
6413 6402 */
6414 6403 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6415 6404
6416 6405 for (; a < ea; a += pgsz, an_idx += pages) {
6417 6406 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) {
6418 6407 ASSERT(vpage != NULL || svd->pageprot == 0);
6419 6408 if (vpage != NULL) {
6420 6409 ASSERT(sameprot(seg, a, pgsz));
6421 6410 prot = VPP_PROT(vpage);
6422 6411 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0;
6423 6412 }
6424 6413 if (seg->s_szc != 0) {
6425 6414 ASSERT(vp == NULL || anon_pages(amp->ahp,
6426 6415 an_idx, pages) == pages);
6427 6416 if ((err = anon_map_demotepages(amp, an_idx,
6428 6417 seg, a, prot, vpage, svd->cred)) != 0) {
6429 6418 goto out;
6430 6419 }
6431 6420 } else {
6432 6421 if (oldap->an_refcnt == 1) {
6433 6422 continue;
6434 6423 }
6435 6424 if ((err = anon_getpage(&oldap, &vpprot,
6436 6425 anon_pl, PAGESIZE, seg, a, S_READ,
6437 6426 svd->cred))) {
6438 6427 goto out;
6439 6428 }
6440 6429 if ((pp = anon_private(&ap, seg, a, prot,
6441 6430 anon_pl[0], pageflag, svd->cred)) == NULL) {
6442 6431 err = ENOMEM;
6443 6432 goto out;
6444 6433 }
6445 6434 anon_decref(oldap);
6446 6435 (void) anon_set_ptr(amp->ahp, an_idx, ap,
6447 6436 ANON_SLEEP);
6448 6437 page_unlock(pp);
6449 6438 }
6450 6439 }
6451 6440 vpage = (vpage == NULL) ? NULL : vpage + pages;
6452 6441 }
6453 6442
6454 6443 amp->a_szc = 0;
6455 6444 seg->s_szc = 0;
6456 6445 out:
6457 6446 ANON_LOCK_EXIT(&->a_rwlock);
6458 6447 return (err);
6459 6448 }
6460 6449
6461 6450 static int
6462 6451 segvn_claim_pages(
6463 6452 struct seg *seg,
6464 6453 struct vpage *svp,
6465 6454 u_offset_t off,
6466 6455 ulong_t anon_idx,
6467 6456 uint_t prot)
6468 6457 {
6469 6458 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6470 6459 size_t ppasize = (pgcnt + 1) * sizeof (page_t *);
6471 6460 page_t **ppa;
6472 6461 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6473 6462 struct anon_map *amp = svd->amp;
6474 6463 struct vpage *evp = svp + pgcnt;
6475 6464 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6476 6465 + seg->s_base;
6477 6466 struct anon *ap;
6478 6467 struct vnode *vp = svd->vp;
6479 6468 page_t *pp;
6480 6469 pgcnt_t pg_idx, i;
6481 6470 int err = 0;
6482 6471 anoff_t aoff;
6483 6472 int anon = (amp != NULL) ? 1 : 0;
6484 6473
6485 6474 ASSERT(svd->type == MAP_PRIVATE);
6486 6475 ASSERT(svd->vpage != NULL);
6487 6476 ASSERT(seg->s_szc != 0);
6488 6477 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
6489 6478 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt));
6490 6479 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT));
6491 6480
6492 6481 if (VPP_PROT(svp) == prot)
6493 6482 return (1);
6494 6483 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE))
6495 6484 return (1);
6496 6485
6497 6486 ppa = kmem_alloc(ppasize, KM_SLEEP);
6498 6487 if (anon && vp != NULL) {
6499 6488 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) {
6500 6489 anon = 0;
6501 6490 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt));
6502 6491 }
6503 6492 ASSERT(!anon ||
6504 6493 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt);
6505 6494 }
6506 6495
6507 6496 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) {
6508 6497 if (!VPP_ISPPLOCK(svp))
6509 6498 continue;
6510 6499 if (anon) {
6511 6500 ap = anon_get_ptr(amp->ahp, anon_idx);
6512 6501 if (ap == NULL) {
6513 6502 panic("segvn_claim_pages: no anon slot");
6514 6503 }
6515 6504 swap_xlate(ap, &vp, &aoff);
6516 6505 off = (u_offset_t)aoff;
6517 6506 }
6518 6507 ASSERT(vp != NULL);
6519 6508 if ((pp = page_lookup(vp,
6520 6509 (u_offset_t)off, SE_SHARED)) == NULL) {
6521 6510 panic("segvn_claim_pages: no page");
6522 6511 }
6523 6512 ppa[pg_idx++] = pp;
6524 6513 off += PAGESIZE;
6525 6514 }
6526 6515
6527 6516 if (ppa[0] == NULL) {
6528 6517 kmem_free(ppa, ppasize);
6529 6518 return (1);
6530 6519 }
6531 6520
6532 6521 ASSERT(pg_idx <= pgcnt);
6533 6522 ppa[pg_idx] = NULL;
6534 6523
6535 6524
6536 6525 /* Find each large page within ppa, and adjust its claim */
6537 6526
6538 6527 /* Does ppa cover a single large page? */
6539 6528 if (ppa[0]->p_szc == seg->s_szc) {
6540 6529 if (prot & PROT_WRITE)
6541 6530 err = page_addclaim_pages(ppa);
6542 6531 else
6543 6532 err = page_subclaim_pages(ppa);
6544 6533 } else {
6545 6534 for (i = 0; ppa[i]; i += pgcnt) {
6546 6535 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt));
6547 6536 if (prot & PROT_WRITE)
6548 6537 err = page_addclaim_pages(&ppa[i]);
6549 6538 else
6550 6539 err = page_subclaim_pages(&ppa[i]);
6551 6540 if (err == 0)
6552 6541 break;
6553 6542 }
6554 6543 }
6555 6544
6556 6545 for (i = 0; i < pg_idx; i++) {
6557 6546 ASSERT(ppa[i] != NULL);
6558 6547 page_unlock(ppa[i]);
6559 6548 }
6560 6549
6561 6550 kmem_free(ppa, ppasize);
6562 6551 return (err);
6563 6552 }
6564 6553
6565 6554 /*
6566 6555 * Returns right (upper address) segment if split occurred.
6567 6556 * If the address is equal to the beginning or end of its segment it returns
6568 6557 * the current segment.
6569 6558 */
6570 6559 static struct seg *
6571 6560 segvn_split_seg(struct seg *seg, caddr_t addr)
6572 6561 {
6573 6562 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6574 6563 struct seg *nseg;
6575 6564 size_t nsize;
6576 6565 struct segvn_data *nsvd;
6577 6566
6578 6567 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6579 6568 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6580 6569
6581 6570 ASSERT(addr >= seg->s_base);
6582 6571 ASSERT(addr <= seg->s_base + seg->s_size);
6583 6572 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6584 6573
6585 6574 if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6586 6575 return (seg);
6587 6576
6588 6577 nsize = seg->s_base + seg->s_size - addr;
6589 6578 seg->s_size = addr - seg->s_base;
6590 6579 nseg = seg_alloc(seg->s_as, addr, nsize);
6591 6580 ASSERT(nseg != NULL);
6592 6581 nseg->s_ops = seg->s_ops;
6593 6582 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
6594 6583 nseg->s_data = (void *)nsvd;
6595 6584 nseg->s_szc = seg->s_szc;
6596 6585 *nsvd = *svd;
6597 6586 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6598 6587 nsvd->seg = nseg;
6599 6588 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL);
6600 6589
6601 6590 if (nsvd->vp != NULL) {
6602 6591 VN_HOLD(nsvd->vp);
6603 6592 nsvd->offset = svd->offset +
6604 6593 (uintptr_t)(nseg->s_base - seg->s_base);
6605 6594 if (nsvd->type == MAP_SHARED)
6606 6595 lgrp_shm_policy_init(NULL, nsvd->vp);
6607 6596 } else {
6608 6597 /*
6609 6598 * The offset for an anonymous segment has no signifigance in
6610 6599 * terms of an offset into a file. If we were to use the above
6611 6600 * calculation instead, the structures read out of
6612 6601 * /proc/<pid>/xmap would be more difficult to decipher since
6613 6602 * it would be unclear whether two seemingly contiguous
6614 6603 * prxmap_t structures represented different segments or a
6615 6604 * single segment that had been split up into multiple prxmap_t
6616 6605 * structures (e.g. if some part of the segment had not yet
6617 6606 * been faulted in).
6618 6607 */
6619 6608 nsvd->offset = 0;
6620 6609 }
6621 6610
6622 6611 ASSERT(svd->softlockcnt == 0);
6623 6612 ASSERT(svd->softlockcnt_sbase == 0);
6624 6613 ASSERT(svd->softlockcnt_send == 0);
6625 6614 crhold(svd->cred);
6626 6615
6627 6616 if (svd->vpage != NULL) {
6628 6617 size_t bytes = vpgtob(seg_pages(seg));
6629 6618 size_t nbytes = vpgtob(seg_pages(nseg));
6630 6619 struct vpage *ovpage = svd->vpage;
6631 6620
6632 6621 svd->vpage = kmem_alloc(bytes, KM_SLEEP);
6633 6622 bcopy(ovpage, svd->vpage, bytes);
6634 6623 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
6635 6624 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes);
6636 6625 kmem_free(ovpage, bytes + nbytes);
6637 6626 }
6638 6627 if (svd->amp != NULL && svd->type == MAP_PRIVATE) {
6639 6628 struct anon_map *oamp = svd->amp, *namp;
6640 6629 struct anon_hdr *nahp;
6641 6630
6642 6631 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER);
6643 6632 ASSERT(oamp->refcnt == 1);
6644 6633 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
6645 6634 (void) anon_copy_ptr(oamp->ahp, svd->anon_index,
6646 6635 nahp, 0, btop(seg->s_size), ANON_SLEEP);
6647 6636
6648 6637 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
6649 6638 namp->a_szc = nseg->s_szc;
6650 6639 (void) anon_copy_ptr(oamp->ahp,
6651 6640 svd->anon_index + btop(seg->s_size),
6652 6641 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
6653 6642 anon_release(oamp->ahp, btop(oamp->size));
6654 6643 oamp->ahp = nahp;
6655 6644 oamp->size = seg->s_size;
6656 6645 svd->anon_index = 0;
6657 6646 nsvd->amp = namp;
6658 6647 nsvd->anon_index = 0;
6659 6648 ANON_LOCK_EXIT(&oamp->a_rwlock);
6660 6649 } else if (svd->amp != NULL) {
6661 6650 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6662 6651 ASSERT(svd->amp == nsvd->amp);
6663 6652 ASSERT(seg->s_szc <= svd->amp->a_szc);
6664 6653 nsvd->anon_index = svd->anon_index + seg_pages(seg);
6665 6654 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt));
6666 6655 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER);
6667 6656 svd->amp->refcnt++;
6668 6657 ANON_LOCK_EXIT(&svd->amp->a_rwlock);
6669 6658 }
6670 6659
6671 6660 /*
6672 6661 * Split the amount of swap reserved.
6673 6662 */
6674 6663 if (svd->swresv) {
6675 6664 /*
6676 6665 * For MAP_NORESERVE, only allocate swap reserve for pages
6677 6666 * being used. Other segments get enough to cover whole
6678 6667 * segment.
6679 6668 */
6680 6669 if (svd->flags & MAP_NORESERVE) {
6681 6670 size_t oswresv;
6682 6671
6683 6672 ASSERT(svd->amp);
6684 6673 oswresv = svd->swresv;
6685 6674 svd->swresv = ptob(anon_pages(svd->amp->ahp,
6686 6675 svd->anon_index, btop(seg->s_size)));
6687 6676 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
6688 6677 nsvd->anon_index, btop(nseg->s_size)));
6689 6678 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
6690 6679 } else {
6691 6680 if (svd->pageswap) {
6692 6681 svd->swresv = segvn_count_swap_by_vpages(seg);
6693 6682 ASSERT(nsvd->swresv >= svd->swresv);
6694 6683 nsvd->swresv -= svd->swresv;
6695 6684 } else {
6696 6685 ASSERT(svd->swresv == seg->s_size +
6697 6686 nseg->s_size);
6698 6687 svd->swresv = seg->s_size;
6699 6688 nsvd->swresv = nseg->s_size;
6700 6689 }
6701 6690 }
6702 6691 }
6703 6692
6704 6693 return (nseg);
6705 6694 }
6706 6695
6707 6696 /*
6708 6697 * called on memory operations (unmap, setprot, setpagesize) for a subset
6709 6698 * of a large page segment to either demote the memory range (SDR_RANGE)
6710 6699 * or the ends (SDR_END) by addr/len.
6711 6700 *
6712 6701 * returns 0 on success. returns errno, including ENOMEM, on failure.
6713 6702 */
6714 6703 static int
6715 6704 segvn_demote_range(
6716 6705 struct seg *seg,
6717 6706 caddr_t addr,
6718 6707 size_t len,
6719 6708 int flag,
6720 6709 uint_t szcvec)
6721 6710 {
6722 6711 caddr_t eaddr = addr + len;
6723 6712 caddr_t lpgaddr, lpgeaddr;
6724 6713 struct seg *nseg;
6725 6714 struct seg *badseg1 = NULL;
6726 6715 struct seg *badseg2 = NULL;
6727 6716 size_t pgsz;
6728 6717 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6729 6718 int err;
6730 6719 uint_t szc = seg->s_szc;
6731 6720 uint_t tszcvec;
6732 6721
6733 6722 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6734 6723 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6735 6724 ASSERT(szc != 0);
6736 6725 pgsz = page_get_pagesize(szc);
6737 6726 ASSERT(seg->s_base != addr || seg->s_size != len);
6738 6727 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6739 6728 ASSERT(svd->softlockcnt == 0);
6740 6729 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6741 6730 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6742 6731
6743 6732 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
6744 6733 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr);
6745 6734 if (flag == SDR_RANGE) {
6746 6735 /* demote entire range */
6747 6736 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6748 6737 (void) segvn_split_seg(nseg, lpgeaddr);
6749 6738 ASSERT(badseg1->s_base == lpgaddr);
6750 6739 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr);
6751 6740 } else if (addr != lpgaddr) {
6752 6741 ASSERT(flag == SDR_END);
6753 6742 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6754 6743 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz &&
6755 6744 eaddr < lpgaddr + 2 * pgsz) {
6756 6745 (void) segvn_split_seg(nseg, lpgeaddr);
6757 6746 ASSERT(badseg1->s_base == lpgaddr);
6758 6747 ASSERT(badseg1->s_size == 2 * pgsz);
6759 6748 } else {
6760 6749 nseg = segvn_split_seg(nseg, lpgaddr + pgsz);
6761 6750 ASSERT(badseg1->s_base == lpgaddr);
6762 6751 ASSERT(badseg1->s_size == pgsz);
6763 6752 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) {
6764 6753 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz);
6765 6754 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz);
6766 6755 badseg2 = nseg;
6767 6756 (void) segvn_split_seg(nseg, lpgeaddr);
6768 6757 ASSERT(badseg2->s_base == lpgeaddr - pgsz);
6769 6758 ASSERT(badseg2->s_size == pgsz);
6770 6759 }
6771 6760 }
6772 6761 } else {
6773 6762 ASSERT(flag == SDR_END);
6774 6763 ASSERT(eaddr < lpgeaddr);
6775 6764 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz);
6776 6765 (void) segvn_split_seg(nseg, lpgeaddr);
6777 6766 ASSERT(badseg1->s_base == lpgeaddr - pgsz);
6778 6767 ASSERT(badseg1->s_size == pgsz);
6779 6768 }
6780 6769
6781 6770 ASSERT(badseg1 != NULL);
6782 6771 ASSERT(badseg1->s_szc == szc);
6783 6772 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz ||
6784 6773 badseg1->s_size == 2 * pgsz);
6785 6774 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz));
6786 6775 ASSERT(badseg1->s_size == pgsz ||
6787 6776 sameprot(badseg1, badseg1->s_base + pgsz, pgsz));
6788 6777 if (err = segvn_clrszc(badseg1)) {
6789 6778 return (err);
6790 6779 }
6791 6780 ASSERT(badseg1->s_szc == 0);
6792 6781
6793 6782 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6794 6783 uint_t tszc = highbit(tszcvec) - 1;
6795 6784 caddr_t ta = MAX(addr, badseg1->s_base);
6796 6785 caddr_t te;
6797 6786 size_t tpgsz = page_get_pagesize(tszc);
6798 6787
6799 6788 ASSERT(svd->type == MAP_SHARED);
6800 6789 ASSERT(flag == SDR_END);
6801 6790 ASSERT(tszc < szc && tszc > 0);
6802 6791
6803 6792 if (eaddr > badseg1->s_base + badseg1->s_size) {
6804 6793 te = badseg1->s_base + badseg1->s_size;
6805 6794 } else {
6806 6795 te = eaddr;
6807 6796 }
6808 6797
6809 6798 ASSERT(ta <= te);
6810 6799 badseg1->s_szc = tszc;
6811 6800 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) {
6812 6801 if (badseg2 != NULL) {
6813 6802 err = segvn_demote_range(badseg1, ta, te - ta,
6814 6803 SDR_END, tszcvec);
6815 6804 if (err != 0) {
6816 6805 return (err);
6817 6806 }
6818 6807 } else {
6819 6808 return (segvn_demote_range(badseg1, ta,
6820 6809 te - ta, SDR_END, tszcvec));
6821 6810 }
6822 6811 }
6823 6812 }
6824 6813
6825 6814 if (badseg2 == NULL)
6826 6815 return (0);
6827 6816 ASSERT(badseg2->s_szc == szc);
6828 6817 ASSERT(badseg2->s_size == pgsz);
6829 6818 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size));
6830 6819 if (err = segvn_clrszc(badseg2)) {
6831 6820 return (err);
6832 6821 }
6833 6822 ASSERT(badseg2->s_szc == 0);
6834 6823
6835 6824 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6836 6825 uint_t tszc = highbit(tszcvec) - 1;
6837 6826 size_t tpgsz = page_get_pagesize(tszc);
6838 6827
6839 6828 ASSERT(svd->type == MAP_SHARED);
6840 6829 ASSERT(flag == SDR_END);
6841 6830 ASSERT(tszc < szc && tszc > 0);
6842 6831 ASSERT(badseg2->s_base > addr);
6843 6832 ASSERT(eaddr > badseg2->s_base);
6844 6833 ASSERT(eaddr < badseg2->s_base + badseg2->s_size);
6845 6834
6846 6835 badseg2->s_szc = tszc;
6847 6836 if (!IS_P2ALIGNED(eaddr, tpgsz)) {
6848 6837 return (segvn_demote_range(badseg2, badseg2->s_base,
6849 6838 eaddr - badseg2->s_base, SDR_END, tszcvec));
6850 6839 }
6851 6840 }
6852 6841
6853 6842 return (0);
6854 6843 }
6855 6844
6856 6845 static int
6857 6846 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6858 6847 {
6859 6848 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6860 6849 struct vpage *vp, *evp;
6861 6850
6862 6851 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6863 6852
6864 6853 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6865 6854 /*
6866 6855 * If segment protection can be used, simply check against them.
6867 6856 */
6868 6857 if (svd->pageprot == 0) {
6869 6858 int err;
6870 6859
6871 6860 err = ((svd->prot & prot) != prot) ? EACCES : 0;
6872 6861 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6873 6862 return (err);
6874 6863 }
6875 6864
6876 6865 /*
6877 6866 * Have to check down to the vpage level.
6878 6867 */
6879 6868 evp = &svd->vpage[seg_page(seg, addr + len)];
6880 6869 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6881 6870 if ((VPP_PROT(vp) & prot) != prot) {
6882 6871 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6883 6872 return (EACCES);
6884 6873 }
6885 6874 }
6886 6875 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6887 6876 return (0);
6888 6877 }
6889 6878
6890 6879 static int
6891 6880 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6892 6881 {
6893 6882 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6894 6883 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6895 6884
6896 6885 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6897 6886
6898 6887 if (pgno != 0) {
6899 6888 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6900 6889 if (svd->pageprot == 0) {
6901 6890 do {
6902 6891 protv[--pgno] = svd->prot;
6903 6892 } while (pgno != 0);
6904 6893 } else {
6905 6894 size_t pgoff = seg_page(seg, addr);
6906 6895
6907 6896 do {
6908 6897 pgno--;
6909 6898 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]);
6910 6899 } while (pgno != 0);
6911 6900 }
6912 6901 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6913 6902 }
6914 6903 return (0);
6915 6904 }
6916 6905
6917 6906 static u_offset_t
6918 6907 segvn_getoffset(struct seg *seg, caddr_t addr)
6919 6908 {
6920 6909 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6921 6910
6922 6911 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6923 6912
6924 6913 return (svd->offset + (uintptr_t)(addr - seg->s_base));
6925 6914 }
6926 6915
6927 6916 /*ARGSUSED*/
6928 6917 static int
6929 6918 segvn_gettype(struct seg *seg, caddr_t addr)
6930 6919 {
6931 6920 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6932 6921
6933 6922 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6934 6923
6935 6924 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6936 6925 MAP_INITDATA)));
6937 6926 }
6938 6927
6939 6928 /*ARGSUSED*/
6940 6929 static int
6941 6930 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6942 6931 {
6943 6932 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6944 6933
6945 6934 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6946 6935
6947 6936 *vpp = svd->vp;
6948 6937 return (0);
6949 6938 }
6950 6939
6951 6940 /*
6952 6941 * Check to see if it makes sense to do kluster/read ahead to
6953 6942 * addr + delta relative to the mapping at addr. We assume here
6954 6943 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6955 6944 *
6956 6945 * For segvn, we currently "approve" of the action if we are
6957 6946 * still in the segment and it maps from the same vp/off,
6958 6947 * or if the advice stored in segvn_data or vpages allows it.
6959 6948 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6960 6949 */
6961 6950 static int
6962 6951 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6963 6952 {
6964 6953 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6965 6954 struct anon *oap, *ap;
6966 6955 ssize_t pd;
6967 6956 size_t page;
6968 6957 struct vnode *vp1, *vp2;
6969 6958 u_offset_t off1, off2;
6970 6959 struct anon_map *amp;
6971 6960
6972 6961 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6973 6962 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6974 6963 SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
6975 6964
6976 6965 if (addr + delta < seg->s_base ||
6977 6966 addr + delta >= (seg->s_base + seg->s_size))
6978 6967 return (-1); /* exceeded segment bounds */
6979 6968
6980 6969 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */
6981 6970 page = seg_page(seg, addr);
6982 6971
6983 6972 /*
6984 6973 * Check to see if either of the pages addr or addr + delta
6985 6974 * have advice set that prevents klustering (if MADV_RANDOM advice
6986 6975 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
6987 6976 * is negative).
6988 6977 */
6989 6978 if (svd->advice == MADV_RANDOM ||
6990 6979 svd->advice == MADV_SEQUENTIAL && delta < 0)
6991 6980 return (-1);
6992 6981 else if (svd->pageadvice && svd->vpage) {
6993 6982 struct vpage *bvpp, *evpp;
6994 6983
6995 6984 bvpp = &svd->vpage[page];
6996 6985 evpp = &svd->vpage[page + pd];
6997 6986 if (VPP_ADVICE(bvpp) == MADV_RANDOM ||
6998 6987 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0)
6999 6988 return (-1);
7000 6989 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) &&
7001 6990 VPP_ADVICE(evpp) == MADV_RANDOM)
7002 6991 return (-1);
7003 6992 }
7004 6993
7005 6994 if (svd->type == MAP_SHARED)
7006 6995 return (0); /* shared mapping - all ok */
7007 6996
7008 6997 if ((amp = svd->amp) == NULL)
7009 6998 return (0); /* off original vnode */
7010 6999
7011 7000 page += svd->anon_index;
7012 7001
7013 7002 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7014 7003
7015 7004 oap = anon_get_ptr(amp->ahp, page);
7016 7005 ap = anon_get_ptr(amp->ahp, page + pd);
7017 7006
7018 7007 ANON_LOCK_EXIT(&->a_rwlock);
7019 7008
7020 7009 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) {
7021 7010 return (-1); /* one with and one without an anon */
7022 7011 }
7023 7012
7024 7013 if (oap == NULL) { /* implies that ap == NULL */
7025 7014 return (0); /* off original vnode */
7026 7015 }
7027 7016
7028 7017 /*
7029 7018 * Now we know we have two anon pointers - check to
7030 7019 * see if they happen to be properly allocated.
7031 7020 */
7032 7021
7033 7022 /*
7034 7023 * XXX We cheat here and don't lock the anon slots. We can't because
7035 7024 * we may have been called from the anon layer which might already
7036 7025 * have locked them. We are holding a refcnt on the slots so they
7037 7026 * can't disappear. The worst that will happen is we'll get the wrong
7038 7027 * names (vp, off) for the slots and make a poor klustering decision.
7039 7028 */
7040 7029 swap_xlate(ap, &vp1, &off1);
7041 7030 swap_xlate(oap, &vp2, &off2);
7042 7031
7043 7032
7044 7033 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta)
7045 7034 return (-1);
7046 7035 return (0);
7047 7036 }
7048 7037
7049 7038 /*
7050 7039 * Synchronize primary storage cache with real object in virtual memory.
7051 7040 *
7052 7041 * XXX - Anonymous pages should not be sync'ed out at all.
7053 7042 */
7054 7043 static int
7055 7044 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
7056 7045 {
7057 7046 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7058 7047 struct vpage *vpp;
7059 7048 page_t *pp;
7060 7049 u_offset_t offset;
7061 7050 struct vnode *vp;
7062 7051 u_offset_t off;
7063 7052 caddr_t eaddr;
7064 7053 int bflags;
7065 7054 int err = 0;
7066 7055 int segtype;
7067 7056 int pageprot;
7068 7057 int prot;
7069 7058 ulong_t anon_index;
7070 7059 struct anon_map *amp;
7071 7060 struct anon *ap;
7072 7061 anon_sync_obj_t cookie;
7073 7062
7074 7063 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7075 7064
7076 7065 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7077 7066
7078 7067 if (svd->softlockcnt > 0) {
7079 7068 /*
7080 7069 * If this is shared segment non 0 softlockcnt
7081 7070 * means locked pages are still in use.
7082 7071 */
7083 7072 if (svd->type == MAP_SHARED) {
7084 7073 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7085 7074 return (EAGAIN);
7086 7075 }
7087 7076
7088 7077 /*
7089 7078 * flush all pages from seg cache
7090 7079 * otherwise we may deadlock in swap_putpage
7091 7080 * for B_INVAL page (4175402).
7092 7081 *
7093 7082 * Even if we grab segvn WRITER's lock
7094 7083 * here, there might be another thread which could've
7095 7084 * successfully performed lookup/insert just before
7096 7085 * we acquired the lock here. So, grabbing either
7097 7086 * lock here is of not much use. Until we devise
7098 7087 * a strategy at upper layers to solve the
7099 7088 * synchronization issues completely, we expect
7100 7089 * applications to handle this appropriately.
7101 7090 */
7102 7091 segvn_purge(seg);
7103 7092 if (svd->softlockcnt > 0) {
7104 7093 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7105 7094 return (EAGAIN);
7106 7095 }
7107 7096 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7108 7097 svd->amp->a_softlockcnt > 0) {
7109 7098 /*
7110 7099 * Try to purge this amp's entries from pcache. It will
7111 7100 * succeed only if other segments that share the amp have no
7112 7101 * outstanding softlock's.
7113 7102 */
7114 7103 segvn_purge(seg);
7115 7104 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) {
7116 7105 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7117 7106 return (EAGAIN);
7118 7107 }
7119 7108 }
7120 7109
7121 7110 vpp = svd->vpage;
7122 7111 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7123 7112 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) |
7124 7113 ((flags & MS_INVALIDATE) ? B_INVAL : 0);
7125 7114
7126 7115 if (attr) {
7127 7116 pageprot = attr & ~(SHARED|PRIVATE);
7128 7117 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE;
7129 7118
7130 7119 /*
7131 7120 * We are done if the segment types don't match
7132 7121 * or if we have segment level protections and
7133 7122 * they don't match.
7134 7123 */
7135 7124 if (svd->type != segtype) {
7136 7125 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7137 7126 return (0);
7138 7127 }
7139 7128 if (vpp == NULL) {
7140 7129 if (svd->prot != pageprot) {
7141 7130 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7142 7131 return (0);
7143 7132 }
7144 7133 prot = svd->prot;
7145 7134 } else
7146 7135 vpp = &svd->vpage[seg_page(seg, addr)];
7147 7136
7148 7137 } else if (svd->vp && svd->amp == NULL &&
7149 7138 (flags & MS_INVALIDATE) == 0) {
7150 7139
7151 7140 /*
7152 7141 * No attributes, no anonymous pages and MS_INVALIDATE flag
7153 7142 * is not on, just use one big request.
7154 7143 */
7155 7144 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len,
7156 7145 bflags, svd->cred, NULL);
7157 7146 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7158 7147 return (err);
7159 7148 }
7160 7149
7161 7150 if ((amp = svd->amp) != NULL)
7162 7151 anon_index = svd->anon_index + seg_page(seg, addr);
7163 7152
7164 7153 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) {
7165 7154 ap = NULL;
7166 7155 if (amp != NULL) {
7167 7156 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7168 7157 anon_array_enter(amp, anon_index, &cookie);
7169 7158 ap = anon_get_ptr(amp->ahp, anon_index++);
7170 7159 if (ap != NULL) {
7171 7160 swap_xlate(ap, &vp, &off);
7172 7161 } else {
7173 7162 vp = svd->vp;
7174 7163 off = offset;
7175 7164 }
7176 7165 anon_array_exit(&cookie);
7177 7166 ANON_LOCK_EXIT(&->a_rwlock);
7178 7167 } else {
7179 7168 vp = svd->vp;
7180 7169 off = offset;
7181 7170 }
7182 7171 offset += PAGESIZE;
7183 7172
7184 7173 if (vp == NULL) /* untouched zfod page */
7185 7174 continue;
7186 7175
7187 7176 if (attr) {
7188 7177 if (vpp) {
7189 7178 prot = VPP_PROT(vpp);
7190 7179 vpp++;
7191 7180 }
7192 7181 if (prot != pageprot) {
7193 7182 continue;
7194 7183 }
7195 7184 }
7196 7185
7197 7186 /*
7198 7187 * See if any of these pages are locked -- if so, then we
7199 7188 * will have to truncate an invalidate request at the first
7200 7189 * locked one. We don't need the page_struct_lock to test
7201 7190 * as this is only advisory; even if we acquire it someone
7202 7191 * might race in and lock the page after we unlock and before
7203 7192 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7204 7193 */
7205 7194 if (flags & MS_INVALIDATE) {
7206 7195 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) {
7207 7196 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
7208 7197 page_unlock(pp);
7209 7198 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7210 7199 return (EBUSY);
7211 7200 }
7212 7201 if (ap != NULL && pp->p_szc != 0 &&
7213 7202 page_tryupgrade(pp)) {
7214 7203 if (pp->p_lckcnt == 0 &&
7215 7204 pp->p_cowcnt == 0) {
7216 7205 /*
7217 7206 * swapfs VN_DISPOSE() won't
7218 7207 * invalidate large pages.
7219 7208 * Attempt to demote.
7220 7209 * XXX can't help it if it
7221 7210 * fails. But for swapfs
7222 7211 * pages it is no big deal.
7223 7212 */
7224 7213 (void) page_try_demote_pages(
7225 7214 pp);
7226 7215 }
7227 7216 }
7228 7217 page_unlock(pp);
7229 7218 }
7230 7219 } else if (svd->type == MAP_SHARED && amp != NULL) {
7231 7220 /*
7232 7221 * Avoid writing out to disk ISM's large pages
7233 7222 * because segspt_free_pages() relies on NULL an_pvp
7234 7223 * of anon slots of such pages.
7235 7224 */
7236 7225
7237 7226 ASSERT(svd->vp == NULL);
7238 7227 /*
7239 7228 * swapfs uses page_lookup_nowait if not freeing or
7240 7229 * invalidating and skips a page if
7241 7230 * page_lookup_nowait returns NULL.
7242 7231 */
7243 7232 pp = page_lookup_nowait(vp, off, SE_SHARED);
7244 7233 if (pp == NULL) {
7245 7234 continue;
7246 7235 }
7247 7236 if (pp->p_szc != 0) {
7248 7237 page_unlock(pp);
7249 7238 continue;
7250 7239 }
7251 7240
7252 7241 /*
7253 7242 * Note ISM pages are created large so (vp, off)'s
7254 7243 * page cannot suddenly become large after we unlock
7255 7244 * pp.
7256 7245 */
7257 7246 page_unlock(pp);
7258 7247 }
7259 7248 /*
7260 7249 * XXX - Should ultimately try to kluster
7261 7250 * calls to VOP_PUTPAGE() for performance.
7262 7251 */
7263 7252 VN_HOLD(vp);
7264 7253 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
7265 7254 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)),
7266 7255 svd->cred, NULL);
7267 7256
7268 7257 VN_RELE(vp);
7269 7258 if (err)
7270 7259 break;
7271 7260 }
7272 7261 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7273 7262 return (err);
7274 7263 }
7275 7264
7276 7265 /*
7277 7266 * Determine if we have data corresponding to pages in the
7278 7267 * primary storage virtual memory cache (i.e., "in core").
7279 7268 */
7280 7269 static size_t
7281 7270 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
7282 7271 {
7283 7272 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7284 7273 struct vnode *vp, *avp;
7285 7274 u_offset_t offset, aoffset;
7286 7275 size_t p, ep;
7287 7276 int ret;
7288 7277 struct vpage *vpp;
7289 7278 page_t *pp;
7290 7279 uint_t start;
7291 7280 struct anon_map *amp; /* XXX - for locknest */
7292 7281 struct anon *ap;
7293 7282 uint_t attr;
7294 7283 anon_sync_obj_t cookie;
7295 7284
7296 7285 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7297 7286
7298 7287 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7299 7288 if (svd->amp == NULL && svd->vp == NULL) {
7300 7289 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7301 7290 bzero(vec, btopr(len));
7302 7291 return (len); /* no anonymous pages created yet */
7303 7292 }
7304 7293
7305 7294 p = seg_page(seg, addr);
7306 7295 ep = seg_page(seg, addr + len);
7307 7296 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0;
7308 7297
7309 7298 amp = svd->amp;
7310 7299 for (; p < ep; p++, addr += PAGESIZE) {
7311 7300 vpp = (svd->vpage) ? &svd->vpage[p]: NULL;
7312 7301 ret = start;
7313 7302 ap = NULL;
7314 7303 avp = NULL;
7315 7304 /* Grab the vnode/offset for the anon slot */
7316 7305 if (amp != NULL) {
7317 7306 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7318 7307 anon_array_enter(amp, svd->anon_index + p, &cookie);
7319 7308 ap = anon_get_ptr(amp->ahp, svd->anon_index + p);
7320 7309 if (ap != NULL) {
7321 7310 swap_xlate(ap, &avp, &aoffset);
7322 7311 }
7323 7312 anon_array_exit(&cookie);
7324 7313 ANON_LOCK_EXIT(&->a_rwlock);
7325 7314 }
7326 7315 if ((avp != NULL) && page_exists(avp, aoffset)) {
7327 7316 /* A page exists for the anon slot */
7328 7317 ret |= SEG_PAGE_INCORE;
7329 7318
7330 7319 /*
7331 7320 * If page is mapped and writable
7332 7321 */
7333 7322 attr = (uint_t)0;
7334 7323 if ((hat_getattr(seg->s_as->a_hat, addr,
7335 7324 &attr) != -1) && (attr & PROT_WRITE)) {
7336 7325 ret |= SEG_PAGE_ANON;
7337 7326 }
7338 7327 /*
7339 7328 * Don't get page_struct lock for lckcnt and cowcnt,
7340 7329 * since this is purely advisory.
7341 7330 */
7342 7331 if ((pp = page_lookup_nowait(avp, aoffset,
7343 7332 SE_SHARED)) != NULL) {
7344 7333 if (pp->p_lckcnt)
7345 7334 ret |= SEG_PAGE_SOFTLOCK;
7346 7335 if (pp->p_cowcnt)
7347 7336 ret |= SEG_PAGE_HASCOW;
7348 7337 page_unlock(pp);
7349 7338 }
7350 7339 }
7351 7340
7352 7341 /* Gather vnode statistics */
7353 7342 vp = svd->vp;
7354 7343 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7355 7344
7356 7345 if (vp != NULL) {
7357 7346 /*
7358 7347 * Try to obtain a "shared" lock on the page
7359 7348 * without blocking. If this fails, determine
7360 7349 * if the page is in memory.
7361 7350 */
7362 7351 pp = page_lookup_nowait(vp, offset, SE_SHARED);
7363 7352 if ((pp == NULL) && (page_exists(vp, offset))) {
7364 7353 /* Page is incore, and is named */
7365 7354 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7366 7355 }
7367 7356 /*
7368 7357 * Don't get page_struct lock for lckcnt and cowcnt,
7369 7358 * since this is purely advisory.
7370 7359 */
7371 7360 if (pp != NULL) {
7372 7361 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7373 7362 if (pp->p_lckcnt)
7374 7363 ret |= SEG_PAGE_SOFTLOCK;
7375 7364 if (pp->p_cowcnt)
7376 7365 ret |= SEG_PAGE_HASCOW;
7377 7366 page_unlock(pp);
7378 7367 }
7379 7368 }
7380 7369
7381 7370 /* Gather virtual page information */
7382 7371 if (vpp) {
7383 7372 if (VPP_ISPPLOCK(vpp))
7384 7373 ret |= SEG_PAGE_LOCKED;
7385 7374 vpp++;
7386 7375 }
7387 7376
7388 7377 *vec++ = (char)ret;
7389 7378 }
7390 7379 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7391 7380 return (len);
7392 7381 }
7393 7382
7394 7383 /*
7395 7384 * Statement for p_cowcnts/p_lckcnts.
7396 7385 *
7397 7386 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7398 7387 * irrespective of the following factors or anything else:
7399 7388 *
7400 7389 * (1) anon slots are populated or not
7401 7390 * (2) cow is broken or not
7402 7391 * (3) refcnt on ap is 1 or greater than 1
7403 7392 *
7404 7393 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7405 7394 * and munlock.
7406 7395 *
7407 7396 *
7408 7397 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7409 7398 *
7410 7399 * if vpage has PROT_WRITE
7411 7400 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7412 7401 * else
7413 7402 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7414 7403 *
7415 7404 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7416 7405 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7417 7406 *
7418 7407 * We may also break COW if softlocking on read access in the physio case.
7419 7408 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7420 7409 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7421 7410 * vpage doesn't have PROT_WRITE.
7422 7411 *
7423 7412 *
7424 7413 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7425 7414 *
7426 7415 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7427 7416 * increment p_lckcnt by calling page_subclaim() which takes care of
7428 7417 * availrmem accounting and p_lckcnt overflow.
7429 7418 *
7430 7419 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7431 7420 * increment p_cowcnt by calling page_addclaim() which takes care of
7432 7421 * availrmem availability and p_cowcnt overflow.
7433 7422 */
7434 7423
7435 7424 /*
7436 7425 * Lock down (or unlock) pages mapped by this segment.
7437 7426 *
7438 7427 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7439 7428 * At fault time they will be relocated into larger pages.
7440 7429 */
7441 7430 static int
7442 7431 segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
7443 7432 int attr, int op, ulong_t *lockmap, size_t pos)
7444 7433 {
7445 7434 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7446 7435 struct vpage *vpp;
7447 7436 struct vpage *evp;
7448 7437 page_t *pp;
7449 7438 u_offset_t offset;
7450 7439 u_offset_t off;
7451 7440 int segtype;
7452 7441 int pageprot;
7453 7442 int claim;
7454 7443 struct vnode *vp;
7455 7444 ulong_t anon_index;
7456 7445 struct anon_map *amp;
7457 7446 struct anon *ap;
7458 7447 struct vattr va;
7459 7448 anon_sync_obj_t cookie;
7460 7449 struct kshmid *sp = NULL;
7461 7450 struct proc *p = curproc;
7462 7451 kproject_t *proj = NULL;
7463 7452 int chargeproc = 1;
7464 7453 size_t locked_bytes = 0;
7465 7454 size_t unlocked_bytes = 0;
7466 7455 int err = 0;
7467 7456
7468 7457 /*
7469 7458 * Hold write lock on address space because may split or concatenate
7470 7459 * segments
7471 7460 */
7472 7461 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7473 7462
7474 7463 /*
7475 7464 * If this is a shm, use shm's project and zone, else use
7476 7465 * project and zone of calling process
7477 7466 */
7478 7467
7479 7468 /* Determine if this segment backs a sysV shm */
7480 7469 if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7481 7470 ASSERT(svd->type == MAP_SHARED);
7482 7471 ASSERT(svd->tr_state == SEGVN_TR_OFF);
7483 7472 sp = svd->amp->a_sp;
7484 7473 proj = sp->shm_perm.ipc_proj;
7485 7474 chargeproc = 0;
7486 7475 }
7487 7476
7488 7477 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7489 7478 if (attr) {
7490 7479 pageprot = attr & ~(SHARED|PRIVATE);
7491 7480 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE;
7492 7481
7493 7482 /*
7494 7483 * We are done if the segment types don't match
7495 7484 * or if we have segment level protections and
7496 7485 * they don't match.
7497 7486 */
7498 7487 if (svd->type != segtype) {
7499 7488 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7500 7489 return (0);
7501 7490 }
7502 7491 if (svd->pageprot == 0 && svd->prot != pageprot) {
7503 7492 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7504 7493 return (0);
7505 7494 }
7506 7495 }
7507 7496
7508 7497 if (op == MC_LOCK) {
7509 7498 if (svd->tr_state == SEGVN_TR_INIT) {
7510 7499 svd->tr_state = SEGVN_TR_OFF;
7511 7500 } else if (svd->tr_state == SEGVN_TR_ON) {
7512 7501 ASSERT(svd->amp != NULL);
7513 7502 segvn_textunrepl(seg, 0);
7514 7503 ASSERT(svd->amp == NULL &&
7515 7504 svd->tr_state == SEGVN_TR_OFF);
7516 7505 }
7517 7506 }
7518 7507
7519 7508 /*
7520 7509 * If we're locking, then we must create a vpage structure if
7521 7510 * none exists. If we're unlocking, then check to see if there
7522 7511 * is a vpage -- if not, then we could not have locked anything.
7523 7512 */
7524 7513
7525 7514 if ((vpp = svd->vpage) == NULL) {
7526 7515 if (op == MC_LOCK) {
7527 7516 segvn_vpage(seg);
7528 7517 if (svd->vpage == NULL) {
7529 7518 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7530 7519 return (ENOMEM);
7531 7520 }
7532 7521 } else {
7533 7522 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7534 7523 return (0);
7535 7524 }
7536 7525 }
7537 7526
7538 7527 /*
7539 7528 * The anonymous data vector (i.e., previously
7540 7529 * unreferenced mapping to swap space) can be allocated
7541 7530 * by lazily testing for its existence.
7542 7531 */
7543 7532 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) {
7544 7533 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
7545 7534 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
7546 7535 svd->amp->a_szc = seg->s_szc;
7547 7536 }
7548 7537
7549 7538 if ((amp = svd->amp) != NULL) {
7550 7539 anon_index = svd->anon_index + seg_page(seg, addr);
7551 7540 }
7552 7541
7553 7542 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7554 7543 evp = &svd->vpage[seg_page(seg, addr + len)];
7555 7544
7556 7545 if (sp != NULL)
7557 7546 mutex_enter(&sp->shm_mlock);
7558 7547
7559 7548 /* determine number of unlocked bytes in range for lock operation */
7560 7549 if (op == MC_LOCK) {
7561 7550
7562 7551 if (sp == NULL) {
7563 7552 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7564 7553 vpp++) {
7565 7554 if (!VPP_ISPPLOCK(vpp))
7566 7555 unlocked_bytes += PAGESIZE;
7567 7556 }
7568 7557 } else {
7569 7558 ulong_t i_idx, i_edx;
7570 7559 anon_sync_obj_t i_cookie;
7571 7560 struct anon *i_ap;
7572 7561 struct vnode *i_vp;
7573 7562 u_offset_t i_off;
7574 7563
7575 7564 /* Only count sysV pages once for locked memory */
7576 7565 i_edx = svd->anon_index + seg_page(seg, addr + len);
7577 7566 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7578 7567 for (i_idx = anon_index; i_idx < i_edx; i_idx++) {
7579 7568 anon_array_enter(amp, i_idx, &i_cookie);
7580 7569 i_ap = anon_get_ptr(amp->ahp, i_idx);
7581 7570 if (i_ap == NULL) {
7582 7571 unlocked_bytes += PAGESIZE;
7583 7572 anon_array_exit(&i_cookie);
7584 7573 continue;
7585 7574 }
7586 7575 swap_xlate(i_ap, &i_vp, &i_off);
7587 7576 anon_array_exit(&i_cookie);
7588 7577 pp = page_lookup(i_vp, i_off, SE_SHARED);
7589 7578 if (pp == NULL) {
7590 7579 unlocked_bytes += PAGESIZE;
7591 7580 continue;
7592 7581 } else if (pp->p_lckcnt == 0)
7593 7582 unlocked_bytes += PAGESIZE;
7594 7583 page_unlock(pp);
7595 7584 }
7596 7585 ANON_LOCK_EXIT(&->a_rwlock);
7597 7586 }
7598 7587
7599 7588 mutex_enter(&p->p_lock);
7600 7589 err = rctl_incr_locked_mem(p, proj, unlocked_bytes,
7601 7590 chargeproc);
7602 7591 mutex_exit(&p->p_lock);
7603 7592
7604 7593 if (err) {
7605 7594 if (sp != NULL)
7606 7595 mutex_exit(&sp->shm_mlock);
7607 7596 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7608 7597 return (err);
7609 7598 }
7610 7599 }
7611 7600 /*
7612 7601 * Loop over all pages in the range. Process if we're locking and
7613 7602 * page has not already been locked in this mapping; or if we're
7614 7603 * unlocking and the page has been locked.
7615 7604 */
7616 7605 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7617 7606 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) {
7618 7607 if ((attr == 0 || VPP_PROT(vpp) == pageprot) &&
7619 7608 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) ||
7620 7609 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) {
7621 7610
7622 7611 if (amp != NULL)
7623 7612 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7624 7613 /*
7625 7614 * If this isn't a MAP_NORESERVE segment and
7626 7615 * we're locking, allocate anon slots if they
7627 7616 * don't exist. The page is brought in later on.
7628 7617 */
7629 7618 if (op == MC_LOCK && svd->vp == NULL &&
7630 7619 ((svd->flags & MAP_NORESERVE) == 0) &&
7631 7620 amp != NULL &&
7632 7621 ((ap = anon_get_ptr(amp->ahp, anon_index))
7633 7622 == NULL)) {
7634 7623 anon_array_enter(amp, anon_index, &cookie);
7635 7624
7636 7625 if ((ap = anon_get_ptr(amp->ahp,
7637 7626 anon_index)) == NULL) {
7638 7627 pp = anon_zero(seg, addr, &ap,
7639 7628 svd->cred);
7640 7629 if (pp == NULL) {
7641 7630 anon_array_exit(&cookie);
7642 7631 ANON_LOCK_EXIT(&->a_rwlock);
7643 7632 err = ENOMEM;
7644 7633 goto out;
7645 7634 }
7646 7635 ASSERT(anon_get_ptr(amp->ahp,
7647 7636 anon_index) == NULL);
7648 7637 (void) anon_set_ptr(amp->ahp,
7649 7638 anon_index, ap, ANON_SLEEP);
7650 7639 page_unlock(pp);
7651 7640 }
7652 7641 anon_array_exit(&cookie);
7653 7642 }
7654 7643
7655 7644 /*
7656 7645 * Get name for page, accounting for
7657 7646 * existence of private copy.
7658 7647 */
7659 7648 ap = NULL;
7660 7649 if (amp != NULL) {
7661 7650 anon_array_enter(amp, anon_index, &cookie);
7662 7651 ap = anon_get_ptr(amp->ahp, anon_index);
7663 7652 if (ap != NULL) {
7664 7653 swap_xlate(ap, &vp, &off);
7665 7654 } else {
7666 7655 if (svd->vp == NULL &&
7667 7656 (svd->flags & MAP_NORESERVE)) {
7668 7657 anon_array_exit(&cookie);
7669 7658 ANON_LOCK_EXIT(&->a_rwlock);
7670 7659 continue;
7671 7660 }
7672 7661 vp = svd->vp;
7673 7662 off = offset;
7674 7663 }
7675 7664 if (op != MC_LOCK || ap == NULL) {
7676 7665 anon_array_exit(&cookie);
7677 7666 ANON_LOCK_EXIT(&->a_rwlock);
7678 7667 }
7679 7668 } else {
7680 7669 vp = svd->vp;
7681 7670 off = offset;
7682 7671 }
7683 7672
7684 7673 /*
7685 7674 * Get page frame. It's ok if the page is
7686 7675 * not available when we're unlocking, as this
7687 7676 * may simply mean that a page we locked got
7688 7677 * truncated out of existence after we locked it.
7689 7678 *
7690 7679 * Invoke VOP_GETPAGE() to obtain the page struct
7691 7680 * since we may need to read it from disk if its
7692 7681 * been paged out.
7693 7682 */
7694 7683 if (op != MC_LOCK)
7695 7684 pp = page_lookup(vp, off, SE_SHARED);
7696 7685 else {
7697 7686 page_t *pl[1 + 1];
7698 7687 int error;
7699 7688
7700 7689 ASSERT(vp != NULL);
7701 7690
7702 7691 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
7703 7692 (uint_t *)NULL, pl, PAGESIZE, seg, addr,
7704 7693 S_OTHER, svd->cred, NULL);
7705 7694
7706 7695 if (error && ap != NULL) {
7707 7696 anon_array_exit(&cookie);
7708 7697 ANON_LOCK_EXIT(&->a_rwlock);
7709 7698 }
7710 7699
7711 7700 /*
7712 7701 * If the error is EDEADLK then we must bounce
7713 7702 * up and drop all vm subsystem locks and then
7714 7703 * retry the operation later
7715 7704 * This behavior is a temporary measure because
7716 7705 * ufs/sds logging is badly designed and will
7717 7706 * deadlock if we don't allow this bounce to
7718 7707 * happen. The real solution is to re-design
7719 7708 * the logging code to work properly. See bug
7720 7709 * 4125102 for details of the problem.
7721 7710 */
7722 7711 if (error == EDEADLK) {
7723 7712 err = error;
7724 7713 goto out;
7725 7714 }
7726 7715 /*
7727 7716 * Quit if we fail to fault in the page. Treat
7728 7717 * the failure as an error, unless the addr
7729 7718 * is mapped beyond the end of a file.
7730 7719 */
7731 7720 if (error && svd->vp) {
7732 7721 va.va_mask = AT_SIZE;
7733 7722 if (VOP_GETATTR(svd->vp, &va, 0,
7734 7723 svd->cred, NULL) != 0) {
7735 7724 err = EIO;
7736 7725 goto out;
7737 7726 }
7738 7727 if (btopr(va.va_size) >=
7739 7728 btopr(off + 1)) {
7740 7729 err = EIO;
7741 7730 goto out;
7742 7731 }
7743 7732 goto out;
7744 7733
7745 7734 } else if (error) {
7746 7735 err = EIO;
7747 7736 goto out;
7748 7737 }
7749 7738 pp = pl[0];
7750 7739 ASSERT(pp != NULL);
7751 7740 }
7752 7741
7753 7742 /*
7754 7743 * See Statement at the beginning of this routine.
7755 7744 *
7756 7745 * claim is always set if MAP_PRIVATE and PROT_WRITE
7757 7746 * irrespective of following factors:
7758 7747 *
7759 7748 * (1) anon slots are populated or not
7760 7749 * (2) cow is broken or not
7761 7750 * (3) refcnt on ap is 1 or greater than 1
7762 7751 *
7763 7752 * See 4140683 for details
7764 7753 */
7765 7754 claim = ((VPP_PROT(vpp) & PROT_WRITE) &&
7766 7755 (svd->type == MAP_PRIVATE));
7767 7756
7768 7757 /*
7769 7758 * Perform page-level operation appropriate to
7770 7759 * operation. If locking, undo the SOFTLOCK
7771 7760 * performed to bring the page into memory
7772 7761 * after setting the lock. If unlocking,
7773 7762 * and no page was found, account for the claim
7774 7763 * separately.
7775 7764 */
7776 7765 if (op == MC_LOCK) {
7777 7766 int ret = 1; /* Assume success */
7778 7767
7779 7768 ASSERT(!VPP_ISPPLOCK(vpp));
7780 7769
7781 7770 ret = page_pp_lock(pp, claim, 0);
7782 7771 if (ap != NULL) {
7783 7772 if (ap->an_pvp != NULL) {
7784 7773 anon_swap_free(ap, pp);
7785 7774 }
7786 7775 anon_array_exit(&cookie);
7787 7776 ANON_LOCK_EXIT(&->a_rwlock);
7788 7777 }
7789 7778 if (ret == 0) {
7790 7779 /* locking page failed */
7791 7780 page_unlock(pp);
7792 7781 err = EAGAIN;
7793 7782 goto out;
7794 7783 }
7795 7784 VPP_SETPPLOCK(vpp);
7796 7785 if (sp != NULL) {
7797 7786 if (pp->p_lckcnt == 1)
7798 7787 locked_bytes += PAGESIZE;
7799 7788 } else
7800 7789 locked_bytes += PAGESIZE;
7801 7790
7802 7791 if (lockmap != (ulong_t *)NULL)
7803 7792 BT_SET(lockmap, pos);
7804 7793
7805 7794 page_unlock(pp);
7806 7795 } else {
7807 7796 ASSERT(VPP_ISPPLOCK(vpp));
7808 7797 if (pp != NULL) {
7809 7798 /* sysV pages should be locked */
7810 7799 ASSERT(sp == NULL || pp->p_lckcnt > 0);
7811 7800 page_pp_unlock(pp, claim, 0);
7812 7801 if (sp != NULL) {
7813 7802 if (pp->p_lckcnt == 0)
7814 7803 unlocked_bytes
7815 7804 += PAGESIZE;
7816 7805 } else
7817 7806 unlocked_bytes += PAGESIZE;
7818 7807 page_unlock(pp);
7819 7808 } else {
7820 7809 ASSERT(sp == NULL);
7821 7810 unlocked_bytes += PAGESIZE;
7822 7811 }
7823 7812 VPP_CLRPPLOCK(vpp);
7824 7813 }
7825 7814 }
7826 7815 }
7827 7816 out:
7828 7817 if (op == MC_LOCK) {
7829 7818 /* Credit back bytes that did not get locked */
7830 7819 if ((unlocked_bytes - locked_bytes) > 0) {
7831 7820 if (proj == NULL)
7832 7821 mutex_enter(&p->p_lock);
7833 7822 rctl_decr_locked_mem(p, proj,
7834 7823 (unlocked_bytes - locked_bytes), chargeproc);
7835 7824 if (proj == NULL)
7836 7825 mutex_exit(&p->p_lock);
7837 7826 }
7838 7827
7839 7828 } else {
7840 7829 /* Account bytes that were unlocked */
7841 7830 if (unlocked_bytes > 0) {
7842 7831 if (proj == NULL)
7843 7832 mutex_enter(&p->p_lock);
7844 7833 rctl_decr_locked_mem(p, proj, unlocked_bytes,
7845 7834 chargeproc);
7846 7835 if (proj == NULL)
7847 7836 mutex_exit(&p->p_lock);
7848 7837 }
7849 7838 }
7850 7839 if (sp != NULL)
7851 7840 mutex_exit(&sp->shm_mlock);
7852 7841 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7853 7842
7854 7843 return (err);
7855 7844 }
7856 7845
7857 7846 /*
7858 7847 * Set advice from user for specified pages
7859 7848 * There are 9 types of advice:
7860 7849 * MADV_NORMAL - Normal (default) behavior (whatever that is)
7861 7850 * MADV_RANDOM - Random page references
7862 7851 * do not allow readahead or 'klustering'
7863 7852 * MADV_SEQUENTIAL - Sequential page references
7864 7853 * Pages previous to the one currently being
7865 7854 * accessed (determined by fault) are 'not needed'
7866 7855 * and are freed immediately
7867 7856 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
7868 7857 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
7869 7858 * MADV_FREE - Contents can be discarded
7870 7859 * MADV_ACCESS_DEFAULT- Default access
7871 7860 * MADV_ACCESS_LWP - Next LWP will access heavily
7872 7861 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
7873 7862 */
7874 7863 static int
7875 7864 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
7876 7865 {
7877 7866 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7878 7867 size_t page;
7879 7868 int err = 0;
7880 7869 int already_set;
7881 7870 struct anon_map *amp;
7882 7871 ulong_t anon_index;
7883 7872 struct seg *next;
7884 7873 lgrp_mem_policy_t policy;
7885 7874 struct seg *prev;
7886 7875 struct vnode *vp;
7887 7876
7888 7877 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7889 7878
7890 7879 /*
7891 7880 * In case of MADV_FREE, we won't be modifying any segment private
7892 7881 * data structures; so, we only need to grab READER's lock
7893 7882 */
7894 7883 if (behav != MADV_FREE) {
7895 7884 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7896 7885 if (svd->tr_state != SEGVN_TR_OFF) {
7897 7886 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7898 7887 return (0);
7899 7888 }
7900 7889 } else {
7901 7890 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7902 7891 }
7903 7892
7904 7893 /*
7905 7894 * Large pages are assumed to be only turned on when accesses to the
7906 7895 * segment's address range have spatial and temporal locality. That
7907 7896 * justifies ignoring MADV_SEQUENTIAL for large page segments.
7908 7897 * Also, ignore advice affecting lgroup memory allocation
7909 7898 * if don't need to do lgroup optimizations on this system
7910 7899 */
7911 7900
7912 7901 if ((behav == MADV_SEQUENTIAL &&
7913 7902 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) ||
7914 7903 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT ||
7915 7904 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) {
7916 7905 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7917 7906 return (0);
7918 7907 }
7919 7908
7920 7909 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT ||
7921 7910 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) {
7922 7911 /*
7923 7912 * Since we are going to unload hat mappings
7924 7913 * we first have to flush the cache. Otherwise
7925 7914 * this might lead to system panic if another
7926 7915 * thread is doing physio on the range whose
7927 7916 * mappings are unloaded by madvise(3C).
7928 7917 */
7929 7918 if (svd->softlockcnt > 0) {
7930 7919 /*
7931 7920 * If this is shared segment non 0 softlockcnt
7932 7921 * means locked pages are still in use.
7933 7922 */
7934 7923 if (svd->type == MAP_SHARED) {
7935 7924 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7936 7925 return (EAGAIN);
7937 7926 }
7938 7927 /*
7939 7928 * Since we do have the segvn writers lock
7940 7929 * nobody can fill the cache with entries
7941 7930 * belonging to this seg during the purge.
7942 7931 * The flush either succeeds or we still
7943 7932 * have pending I/Os. In the later case,
7944 7933 * madvise(3C) fails.
7945 7934 */
7946 7935 segvn_purge(seg);
7947 7936 if (svd->softlockcnt > 0) {
7948 7937 /*
7949 7938 * Since madvise(3C) is advisory and
7950 7939 * it's not part of UNIX98, madvise(3C)
7951 7940 * failure here doesn't cause any hardship.
7952 7941 * Note that we don't block in "as" layer.
7953 7942 */
7954 7943 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7955 7944 return (EAGAIN);
7956 7945 }
7957 7946 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7958 7947 svd->amp->a_softlockcnt > 0) {
7959 7948 /*
7960 7949 * Try to purge this amp's entries from pcache. It
7961 7950 * will succeed only if other segments that share the
7962 7951 * amp have no outstanding softlock's.
7963 7952 */
7964 7953 segvn_purge(seg);
7965 7954 }
7966 7955 }
7967 7956
7968 7957 amp = svd->amp;
7969 7958 vp = svd->vp;
7970 7959 if (behav == MADV_FREE) {
7971 7960 /*
7972 7961 * MADV_FREE is not supported for segments with
7973 7962 * underlying object; if anonmap is NULL, anon slots
7974 7963 * are not yet populated and there is nothing for
7975 7964 * us to do. As MADV_FREE is advisory, we don't
7976 7965 * return error in either case.
7977 7966 */
7978 7967 if (vp != NULL || amp == NULL) {
7979 7968 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7980 7969 return (0);
7981 7970 }
7982 7971
7983 7972 segvn_purge(seg);
7984 7973
7985 7974 page = seg_page(seg, addr);
7986 7975 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7987 7976 anon_disclaim(amp, svd->anon_index + page, len);
7988 7977 ANON_LOCK_EXIT(&->a_rwlock);
7989 7978 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7990 7979 return (0);
7991 7980 }
7992 7981
7993 7982 /*
7994 7983 * If advice is to be applied to entire segment,
7995 7984 * use advice field in seg_data structure
7996 7985 * otherwise use appropriate vpage entry.
7997 7986 */
7998 7987 if ((addr == seg->s_base) && (len == seg->s_size)) {
7999 7988 switch (behav) {
8000 7989 case MADV_ACCESS_LWP:
8001 7990 case MADV_ACCESS_MANY:
8002 7991 case MADV_ACCESS_DEFAULT:
8003 7992 /*
8004 7993 * Set memory allocation policy for this segment
8005 7994 */
8006 7995 policy = lgrp_madv_to_policy(behav, len, svd->type);
8007 7996 if (svd->type == MAP_SHARED)
8008 7997 already_set = lgrp_shm_policy_set(policy, amp,
8009 7998 svd->anon_index, vp, svd->offset, len);
8010 7999 else {
8011 8000 /*
8012 8001 * For private memory, need writers lock on
8013 8002 * address space because the segment may be
8014 8003 * split or concatenated when changing policy
8015 8004 */
8016 8005 if (AS_READ_HELD(seg->s_as,
8017 8006 &seg->s_as->a_lock)) {
8018 8007 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8019 8008 return (IE_RETRY);
8020 8009 }
8021 8010
8022 8011 already_set = lgrp_privm_policy_set(policy,
8023 8012 &svd->policy_info, len);
8024 8013 }
8025 8014
8026 8015 /*
8027 8016 * If policy set already and it shouldn't be reapplied,
8028 8017 * don't do anything.
8029 8018 */
8030 8019 if (already_set &&
8031 8020 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8032 8021 break;
8033 8022
8034 8023 /*
8035 8024 * Mark any existing pages in given range for
8036 8025 * migration
8037 8026 */
8038 8027 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8039 8028 vp, svd->offset, 1);
8040 8029
8041 8030 /*
8042 8031 * If same policy set already or this is a shared
8043 8032 * memory segment, don't need to try to concatenate
8044 8033 * segment with adjacent ones.
8045 8034 */
8046 8035 if (already_set || svd->type == MAP_SHARED)
8047 8036 break;
8048 8037
8049 8038 /*
8050 8039 * Try to concatenate this segment with previous
8051 8040 * one and next one, since we changed policy for
8052 8041 * this one and it may be compatible with adjacent
8053 8042 * ones now.
8054 8043 */
8055 8044 prev = AS_SEGPREV(seg->s_as, seg);
8056 8045 next = AS_SEGNEXT(seg->s_as, seg);
8057 8046
8058 8047 if (next && next->s_ops == &segvn_ops &&
8059 8048 addr + len == next->s_base)
8060 8049 (void) segvn_concat(seg, next, 1);
8061 8050
8062 8051 if (prev && prev->s_ops == &segvn_ops &&
8063 8052 addr == prev->s_base + prev->s_size) {
8064 8053 /*
8065 8054 * Drop lock for private data of current
8066 8055 * segment before concatenating (deleting) it
8067 8056 * and return IE_REATTACH to tell as_ctl() that
8068 8057 * current segment has changed
8069 8058 */
8070 8059 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8071 8060 if (!segvn_concat(prev, seg, 1))
8072 8061 err = IE_REATTACH;
8073 8062
8074 8063 return (err);
8075 8064 }
8076 8065 break;
8077 8066
8078 8067 case MADV_SEQUENTIAL:
8079 8068 /*
8080 8069 * unloading mapping guarantees
8081 8070 * detection in segvn_fault
8082 8071 */
8083 8072 ASSERT(seg->s_szc == 0);
8084 8073 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8085 8074 hat_unload(seg->s_as->a_hat, addr, len,
8086 8075 HAT_UNLOAD);
8087 8076 /* FALLTHROUGH */
8088 8077 case MADV_NORMAL:
8089 8078 case MADV_RANDOM:
8090 8079 svd->advice = (uchar_t)behav;
8091 8080 svd->pageadvice = 0;
8092 8081 break;
8093 8082 case MADV_WILLNEED: /* handled in memcntl */
8094 8083 case MADV_DONTNEED: /* handled in memcntl */
8095 8084 case MADV_FREE: /* handled above */
8096 8085 break;
8097 8086 default:
8098 8087 err = EINVAL;
8099 8088 }
8100 8089 } else {
8101 8090 caddr_t eaddr;
8102 8091 struct seg *new_seg;
8103 8092 struct segvn_data *new_svd;
8104 8093 u_offset_t off;
8105 8094 caddr_t oldeaddr;
8106 8095
8107 8096 page = seg_page(seg, addr);
8108 8097
8109 8098 segvn_vpage(seg);
8110 8099 if (svd->vpage == NULL) {
8111 8100 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8112 8101 return (ENOMEM);
8113 8102 }
8114 8103
8115 8104 switch (behav) {
8116 8105 struct vpage *bvpp, *evpp;
8117 8106
8118 8107 case MADV_ACCESS_LWP:
8119 8108 case MADV_ACCESS_MANY:
8120 8109 case MADV_ACCESS_DEFAULT:
8121 8110 /*
8122 8111 * Set memory allocation policy for portion of this
8123 8112 * segment
8124 8113 */
8125 8114
8126 8115 /*
8127 8116 * Align address and length of advice to page
8128 8117 * boundaries for large pages
8129 8118 */
8130 8119 if (seg->s_szc != 0) {
8131 8120 size_t pgsz;
8132 8121
8133 8122 pgsz = page_get_pagesize(seg->s_szc);
8134 8123 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
8135 8124 len = P2ROUNDUP(len, pgsz);
8136 8125 }
8137 8126
8138 8127 /*
8139 8128 * Check to see whether policy is set already
8140 8129 */
8141 8130 policy = lgrp_madv_to_policy(behav, len, svd->type);
8142 8131
8143 8132 anon_index = svd->anon_index + page;
8144 8133 off = svd->offset + (uintptr_t)(addr - seg->s_base);
8145 8134
8146 8135 if (svd->type == MAP_SHARED)
8147 8136 already_set = lgrp_shm_policy_set(policy, amp,
8148 8137 anon_index, vp, off, len);
8149 8138 else
8150 8139 already_set =
8151 8140 (policy == svd->policy_info.mem_policy);
8152 8141
8153 8142 /*
8154 8143 * If policy set already and it shouldn't be reapplied,
8155 8144 * don't do anything.
8156 8145 */
8157 8146 if (already_set &&
8158 8147 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8159 8148 break;
8160 8149
8161 8150 /*
8162 8151 * For private memory, need writers lock on
8163 8152 * address space because the segment may be
8164 8153 * split or concatenated when changing policy
8165 8154 */
8166 8155 if (svd->type == MAP_PRIVATE &&
8167 8156 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) {
8168 8157 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8169 8158 return (IE_RETRY);
8170 8159 }
8171 8160
8172 8161 /*
8173 8162 * Mark any existing pages in given range for
8174 8163 * migration
8175 8164 */
8176 8165 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8177 8166 vp, svd->offset, 1);
8178 8167
8179 8168 /*
8180 8169 * Don't need to try to split or concatenate
8181 8170 * segments, since policy is same or this is a shared
8182 8171 * memory segment
8183 8172 */
8184 8173 if (already_set || svd->type == MAP_SHARED)
8185 8174 break;
8186 8175
8187 8176 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
8188 8177 ASSERT(svd->amp == NULL);
8189 8178 ASSERT(svd->tr_state == SEGVN_TR_OFF);
8190 8179 ASSERT(svd->softlockcnt == 0);
8191 8180 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
8192 8181 HAT_REGION_TEXT);
8193 8182 svd->rcookie = HAT_INVALID_REGION_COOKIE;
8194 8183 }
8195 8184
8196 8185 /*
8197 8186 * Split off new segment if advice only applies to a
8198 8187 * portion of existing segment starting in middle
8199 8188 */
8200 8189 new_seg = NULL;
8201 8190 eaddr = addr + len;
8202 8191 oldeaddr = seg->s_base + seg->s_size;
8203 8192 if (addr > seg->s_base) {
8204 8193 /*
8205 8194 * Must flush I/O page cache
8206 8195 * before splitting segment
8207 8196 */
8208 8197 if (svd->softlockcnt > 0)
8209 8198 segvn_purge(seg);
8210 8199
8211 8200 /*
8212 8201 * Split segment and return IE_REATTACH to tell
8213 8202 * as_ctl() that current segment changed
8214 8203 */
8215 8204 new_seg = segvn_split_seg(seg, addr);
8216 8205 new_svd = (struct segvn_data *)new_seg->s_data;
8217 8206 err = IE_REATTACH;
8218 8207
8219 8208 /*
8220 8209 * If new segment ends where old one
8221 8210 * did, try to concatenate the new
8222 8211 * segment with next one.
8223 8212 */
8224 8213 if (eaddr == oldeaddr) {
8225 8214 /*
8226 8215 * Set policy for new segment
8227 8216 */
8228 8217 (void) lgrp_privm_policy_set(policy,
8229 8218 &new_svd->policy_info,
8230 8219 new_seg->s_size);
8231 8220
8232 8221 next = AS_SEGNEXT(new_seg->s_as,
8233 8222 new_seg);
8234 8223
8235 8224 if (next &&
8236 8225 next->s_ops == &segvn_ops &&
8237 8226 eaddr == next->s_base)
8238 8227 (void) segvn_concat(new_seg,
8239 8228 next, 1);
8240 8229 }
8241 8230 }
8242 8231
8243 8232 /*
8244 8233 * Split off end of existing segment if advice only
8245 8234 * applies to a portion of segment ending before
8246 8235 * end of the existing segment
8247 8236 */
8248 8237 if (eaddr < oldeaddr) {
8249 8238 /*
8250 8239 * Must flush I/O page cache
8251 8240 * before splitting segment
8252 8241 */
8253 8242 if (svd->softlockcnt > 0)
8254 8243 segvn_purge(seg);
8255 8244
8256 8245 /*
8257 8246 * If beginning of old segment was already
8258 8247 * split off, use new segment to split end off
8259 8248 * from.
8260 8249 */
8261 8250 if (new_seg != NULL && new_seg != seg) {
8262 8251 /*
8263 8252 * Split segment
8264 8253 */
8265 8254 (void) segvn_split_seg(new_seg, eaddr);
8266 8255
8267 8256 /*
8268 8257 * Set policy for new segment
8269 8258 */
8270 8259 (void) lgrp_privm_policy_set(policy,
8271 8260 &new_svd->policy_info,
8272 8261 new_seg->s_size);
8273 8262 } else {
8274 8263 /*
8275 8264 * Split segment and return IE_REATTACH
8276 8265 * to tell as_ctl() that current
8277 8266 * segment changed
8278 8267 */
8279 8268 (void) segvn_split_seg(seg, eaddr);
8280 8269 err = IE_REATTACH;
8281 8270
8282 8271 (void) lgrp_privm_policy_set(policy,
8283 8272 &svd->policy_info, seg->s_size);
8284 8273
8285 8274 /*
8286 8275 * If new segment starts where old one
8287 8276 * did, try to concatenate it with
8288 8277 * previous segment.
8289 8278 */
8290 8279 if (addr == seg->s_base) {
8291 8280 prev = AS_SEGPREV(seg->s_as,
8292 8281 seg);
8293 8282
8294 8283 /*
8295 8284 * Drop lock for private data
8296 8285 * of current segment before
8297 8286 * concatenating (deleting) it
8298 8287 */
8299 8288 if (prev &&
8300 8289 prev->s_ops ==
8301 8290 &segvn_ops &&
8302 8291 addr == prev->s_base +
8303 8292 prev->s_size) {
8304 8293 SEGVN_LOCK_EXIT(
8305 8294 seg->s_as,
8306 8295 &svd->lock);
8307 8296 (void) segvn_concat(
8308 8297 prev, seg, 1);
8309 8298 return (err);
8310 8299 }
8311 8300 }
8312 8301 }
8313 8302 }
8314 8303 break;
8315 8304 case MADV_SEQUENTIAL:
8316 8305 ASSERT(seg->s_szc == 0);
8317 8306 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8318 8307 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
8319 8308 /* FALLTHROUGH */
8320 8309 case MADV_NORMAL:
8321 8310 case MADV_RANDOM:
8322 8311 bvpp = &svd->vpage[page];
8323 8312 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8324 8313 for (; bvpp < evpp; bvpp++)
8325 8314 VPP_SETADVICE(bvpp, behav);
8326 8315 svd->advice = MADV_NORMAL;
8327 8316 break;
8328 8317 case MADV_WILLNEED: /* handled in memcntl */
8329 8318 case MADV_DONTNEED: /* handled in memcntl */
8330 8319 case MADV_FREE: /* handled above */
8331 8320 break;
8332 8321 default:
8333 8322 err = EINVAL;
8334 8323 }
8335 8324 }
8336 8325 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8337 8326 return (err);
8338 8327 }
8339 8328
8340 8329 /*
8341 8330 * There is one kind of inheritance that can be specified for pages:
8342 8331 *
8343 8332 * SEGP_INH_ZERO - Pages should be zeroed in the child
8344 8333 */
8345 8334 static int
8346 8335 segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8347 8336 {
8348 8337 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8349 8338 struct vpage *bvpp, *evpp;
8350 8339 size_t page;
8351 8340 int ret = 0;
8352 8341
8353 8342 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8354 8343
8355 8344 /* Can't support something we don't know about */
8356 8345 if (behav != SEGP_INH_ZERO)
8357 8346 return (ENOTSUP);
8358 8347
8359 8348 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8360 8349
8361 8350 /*
8362 8351 * This must be a straightforward anonymous segment that is mapped
8363 8352 * privately and is not backed by a vnode.
8364 8353 */
8365 8354 if (svd->tr_state != SEGVN_TR_OFF ||
8366 8355 svd->type != MAP_PRIVATE ||
8367 8356 svd->vp != NULL) {
8368 8357 ret = EINVAL;
8369 8358 goto out;
8370 8359 }
8371 8360
8372 8361 /*
8373 8362 * If the entire segment has been marked as inherit zero, then no reason
8374 8363 * to do anything else.
8375 8364 */
8376 8365 if (svd->svn_inz == SEGVN_INZ_ALL) {
8377 8366 ret = 0;
8378 8367 goto out;
8379 8368 }
8380 8369
8381 8370 /*
8382 8371 * If this applies to the entire segment, simply mark it and we're done.
8383 8372 */
8384 8373 if ((addr == seg->s_base) && (len == seg->s_size)) {
8385 8374 svd->svn_inz = SEGVN_INZ_ALL;
8386 8375 ret = 0;
8387 8376 goto out;
8388 8377 }
8389 8378
8390 8379 /*
8391 8380 * We've been asked to mark a subset of this segment as inherit zero,
8392 8381 * therefore we need to mainpulate its vpages.
8393 8382 */
8394 8383 if (svd->vpage == NULL) {
8395 8384 segvn_vpage(seg);
8396 8385 if (svd->vpage == NULL) {
8397 8386 ret = ENOMEM;
8398 8387 goto out;
8399 8388 }
8400 8389 }
8401 8390
8402 8391 svd->svn_inz = SEGVN_INZ_VPP;
8403 8392 page = seg_page(seg, addr);
8404 8393 bvpp = &svd->vpage[page];
8405 8394 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8406 8395 for (; bvpp < evpp; bvpp++)
8407 8396 VPP_SETINHZERO(bvpp);
8408 8397 ret = 0;
8409 8398
8410 8399 out:
8411 8400 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8412 8401 return (ret);
8413 8402 }
8414 8403
8415 8404 /*
8416 8405 * Create a vpage structure for this seg.
8417 8406 */
8418 8407 static void
8419 8408 segvn_vpage(struct seg *seg)
8420 8409 {
8421 8410 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8422 8411 struct vpage *vp, *evp;
8423 8412 static pgcnt_t page_limit = 0;
8424 8413
8425 8414 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
8426 8415
8427 8416 /*
8428 8417 * If no vpage structure exists, allocate one. Copy the protections
8429 8418 * and the advice from the segment itself to the individual pages.
8430 8419 */
8431 8420 if (svd->vpage == NULL) {
8432 8421 /*
8433 8422 * Start by calculating the number of pages we must allocate to
8434 8423 * track the per-page vpage structs needs for this entire
8435 8424 * segment. If we know now that it will require more than our
8436 8425 * heuristic for the maximum amount of kmem we can consume then
8437 8426 * fail. We do this here, instead of trying to detect this deep
8438 8427 * in page_resv and propagating the error up, since the entire
8439 8428 * memory allocation stack is not amenable to passing this
8440 8429 * back. Instead, it wants to keep trying.
8441 8430 *
8442 8431 * As a heuristic we set a page limit of 5/8s of total_pages
8443 8432 * for this allocation. We use shifts so that no floating
8444 8433 * point conversion takes place and only need to do the
8445 8434 * calculation once.
8446 8435 */
8447 8436 ulong_t mem_needed = seg_pages(seg) * sizeof (struct vpage);
8448 8437 pgcnt_t npages = mem_needed >> PAGESHIFT;
8449 8438
8450 8439 if (page_limit == 0)
8451 8440 page_limit = (total_pages >> 1) + (total_pages >> 3);
8452 8441
8453 8442 if (npages > page_limit)
8454 8443 return;
8455 8444
8456 8445 svd->pageadvice = 1;
8457 8446 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP);
8458 8447 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
8459 8448 for (vp = svd->vpage; vp < evp; vp++) {
8460 8449 VPP_SETPROT(vp, svd->prot);
8461 8450 VPP_SETADVICE(vp, svd->advice);
8462 8451 }
8463 8452 }
8464 8453 }
8465 8454
8466 8455 /*
8467 8456 * Dump the pages belonging to this segvn segment.
8468 8457 */
8469 8458 static void
8470 8459 segvn_dump(struct seg *seg)
8471 8460 {
8472 8461 struct segvn_data *svd;
8473 8462 page_t *pp;
8474 8463 struct anon_map *amp;
8475 8464 ulong_t anon_index;
8476 8465 struct vnode *vp;
8477 8466 u_offset_t off, offset;
8478 8467 pfn_t pfn;
8479 8468 pgcnt_t page, npages;
8480 8469 caddr_t addr;
8481 8470
8482 8471 npages = seg_pages(seg);
8483 8472 svd = (struct segvn_data *)seg->s_data;
8484 8473 vp = svd->vp;
8485 8474 off = offset = svd->offset;
8486 8475 addr = seg->s_base;
8487 8476
8488 8477 if ((amp = svd->amp) != NULL) {
8489 8478 anon_index = svd->anon_index;
8490 8479 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8491 8480 }
8492 8481
8493 8482 for (page = 0; page < npages; page++, offset += PAGESIZE) {
8494 8483 struct anon *ap;
8495 8484 int we_own_it = 0;
8496 8485
8497 8486 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) {
8498 8487 swap_xlate_nopanic(ap, &vp, &off);
8499 8488 } else {
8500 8489 vp = svd->vp;
8501 8490 off = offset;
8502 8491 }
8503 8492
8504 8493 /*
8505 8494 * If pp == NULL, the page either does not exist
8506 8495 * or is exclusively locked. So determine if it
8507 8496 * exists before searching for it.
8508 8497 */
8509 8498
8510 8499 if ((pp = page_lookup_nowait(vp, off, SE_SHARED)))
8511 8500 we_own_it = 1;
8512 8501 else
8513 8502 pp = page_exists(vp, off);
8514 8503
8515 8504 if (pp) {
8516 8505 pfn = page_pptonum(pp);
8517 8506 dump_addpage(seg->s_as, addr, pfn);
8518 8507 if (we_own_it)
8519 8508 page_unlock(pp);
8520 8509 }
8521 8510 addr += PAGESIZE;
8522 8511 dump_timeleft = dump_timeout;
8523 8512 }
8524 8513
8525 8514 if (amp != NULL)
8526 8515 ANON_LOCK_EXIT(&->a_rwlock);
8527 8516 }
8528 8517
8529 8518 #ifdef DEBUG
8530 8519 static uint32_t segvn_pglock_mtbf = 0;
8531 8520 #endif
8532 8521
8533 8522 #define PCACHE_SHWLIST ((page_t *)-2)
8534 8523 #define NOPCACHE_SHWLIST ((page_t *)-1)
8535 8524
8536 8525 /*
8537 8526 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8538 8527 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8539 8528 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8540 8529 * the same parts of the segment. Currently shadow list creation is only
8541 8530 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8542 8531 * tagged with segment pointer, starting virtual address and length. This
8543 8532 * approach for MAP_SHARED segments may add many pcache entries for the same
8544 8533 * set of pages and lead to long hash chains that decrease pcache lookup
8545 8534 * performance. To avoid this issue for shared segments shared anon map and
8546 8535 * starting anon index are used for pcache entry tagging. This allows all
8547 8536 * segments to share pcache entries for the same anon range and reduces pcache
8548 8537 * chain's length as well as memory overhead from duplicate shadow lists and
8549 8538 * pcache entries.
8550 8539 *
8551 8540 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8552 8541 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8553 8542 * part of softlockcnt accounting is done differently for private and shared
8554 8543 * segments. In private segment case softlock is only incremented when a new
8555 8544 * shadow list is created but not when an existing one is found via
8556 8545 * seg_plookup(). pcache entries have reference count incremented/decremented
8557 8546 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8558 8547 * reference count can be purged (and purging is needed before segment can be
8559 8548 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8560 8549 * decrement softlockcnt. Since in private segment case each of its pcache
8561 8550 * entries only belongs to this segment we can expect that when
8562 8551 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8563 8552 * segment purge will succeed and softlockcnt will drop to 0. In shared
8564 8553 * segment case reference count in pcache entry counts active locks from many
8565 8554 * different segments so we can't expect segment purging to succeed even when
8566 8555 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8567 8556 * segment. To be able to determine when there're no pending pagelocks in
8568 8557 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8569 8558 * but instead softlockcnt is incremented and decremented for every
8570 8559 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8571 8560 * list was created or an existing one was found. When softlockcnt drops to 0
8572 8561 * this segment no longer has any claims for pcached shadow lists and the
8573 8562 * segment can be freed even if there're still active pcache entries
8574 8563 * shared by this segment anon map. Shared segment pcache entries belong to
8575 8564 * anon map and are typically removed when anon map is freed after all
8576 8565 * processes destroy the segments that use this anon map.
8577 8566 */
8578 8567 static int
8579 8568 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
8580 8569 enum lock_type type, enum seg_rw rw)
8581 8570 {
8582 8571 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8583 8572 size_t np;
8584 8573 pgcnt_t adjustpages;
8585 8574 pgcnt_t npages;
8586 8575 ulong_t anon_index;
8587 8576 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE;
8588 8577 uint_t error;
8589 8578 struct anon_map *amp;
8590 8579 pgcnt_t anpgcnt;
8591 8580 struct page **pplist, **pl, *pp;
8592 8581 caddr_t a;
8593 8582 size_t page;
8594 8583 caddr_t lpgaddr, lpgeaddr;
8595 8584 anon_sync_obj_t cookie;
8596 8585 int anlock;
8597 8586 struct anon_map *pamp;
8598 8587 caddr_t paddr;
8599 8588 seg_preclaim_cbfunc_t preclaim_callback;
8600 8589 size_t pgsz;
8601 8590 int use_pcache;
8602 8591 size_t wlen;
8603 8592 uint_t pflags = 0;
8604 8593 int sftlck_sbase = 0;
8605 8594 int sftlck_send = 0;
8606 8595
8607 8596 #ifdef DEBUG
8608 8597 if (type == L_PAGELOCK && segvn_pglock_mtbf) {
8609 8598 hrtime_t ts = gethrtime();
8610 8599 if ((ts % segvn_pglock_mtbf) == 0) {
8611 8600 return (ENOTSUP);
8612 8601 }
8613 8602 if ((ts % segvn_pglock_mtbf) == 1) {
8614 8603 return (EFAULT);
8615 8604 }
8616 8605 }
8617 8606 #endif
8618 8607
8619 8608 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START,
8620 8609 "segvn_pagelock: start seg %p addr %p", seg, addr);
8621 8610
8622 8611 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8623 8612 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
8624 8613
8625 8614 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8626 8615
8627 8616 /*
8628 8617 * for now we only support pagelock to anon memory. We would have to
8629 8618 * check protections for vnode objects and call into the vnode driver.
8630 8619 * That's too much for a fast path. Let the fault entry point handle
8631 8620 * it.
8632 8621 */
8633 8622 if (svd->vp != NULL) {
8634 8623 if (type == L_PAGELOCK) {
8635 8624 error = ENOTSUP;
8636 8625 goto out;
8637 8626 }
8638 8627 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8639 8628 }
8640 8629 if ((amp = svd->amp) == NULL) {
8641 8630 if (type == L_PAGELOCK) {
8642 8631 error = EFAULT;
8643 8632 goto out;
8644 8633 }
8645 8634 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8646 8635 }
8647 8636 if (rw != S_READ && rw != S_WRITE) {
8648 8637 if (type == L_PAGELOCK) {
8649 8638 error = ENOTSUP;
8650 8639 goto out;
8651 8640 }
8652 8641 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8653 8642 }
8654 8643
8655 8644 if (seg->s_szc != 0) {
8656 8645 /*
8657 8646 * We are adjusting the pagelock region to the large page size
8658 8647 * boundary because the unlocked part of a large page cannot
8659 8648 * be freed anyway unless all constituent pages of a large
8660 8649 * page are locked. Bigger regions reduce pcache chain length
8661 8650 * and improve lookup performance. The tradeoff is that the
8662 8651 * very first segvn_pagelock() call for a given page is more
8663 8652 * expensive if only 1 page_t is needed for IO. This is only
8664 8653 * an issue if pcache entry doesn't get reused by several
8665 8654 * subsequent calls. We optimize here for the case when pcache
8666 8655 * is heavily used by repeated IOs to the same address range.
8667 8656 *
8668 8657 * Note segment's page size cannot change while we are holding
8669 8658 * as lock. And then it cannot change while softlockcnt is
8670 8659 * not 0. This will allow us to correctly recalculate large
8671 8660 * page size region for the matching pageunlock/reclaim call
8672 8661 * since as_pageunlock() caller must always match
8673 8662 * as_pagelock() call's addr and len.
8674 8663 *
8675 8664 * For pageunlock *ppp points to the pointer of page_t that
8676 8665 * corresponds to the real unadjusted start address. Similar
8677 8666 * for pagelock *ppp must point to the pointer of page_t that
8678 8667 * corresponds to the real unadjusted start address.
8679 8668 */
8680 8669 pgsz = page_get_pagesize(seg->s_szc);
8681 8670 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
8682 8671 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8683 8672 } else if (len < segvn_pglock_comb_thrshld) {
8684 8673 lpgaddr = addr;
8685 8674 lpgeaddr = addr + len;
8686 8675 adjustpages = 0;
8687 8676 pgsz = PAGESIZE;
8688 8677 } else {
8689 8678 /*
8690 8679 * Align the address range of large enough requests to allow
8691 8680 * combining of different shadow lists into 1 to reduce memory
8692 8681 * overhead from potentially overlapping large shadow lists
8693 8682 * (worst case is we have a 1MB IO into buffers with start
8694 8683 * addresses separated by 4K). Alignment is only possible if
8695 8684 * padded chunks have sufficient access permissions. Note
8696 8685 * permissions won't change between L_PAGELOCK and
8697 8686 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8698 8687 * segvn_setprot() to wait until softlockcnt drops to 0. This
8699 8688 * allows us to determine in L_PAGEUNLOCK the same range we
8700 8689 * computed in L_PAGELOCK.
8701 8690 *
8702 8691 * If alignment is limited by segment ends set
8703 8692 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8704 8693 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8705 8694 * per segment counters. In L_PAGEUNLOCK case decrease
8706 8695 * softlockcnt_sbase/softlockcnt_send counters if
8707 8696 * sftlck_sbase/sftlck_send flags are set. When
8708 8697 * softlockcnt_sbase/softlockcnt_send are non 0
8709 8698 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8710 8699 * won't merge the segments. This restriction combined with
8711 8700 * restriction on segment unmapping and splitting for segments
8712 8701 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8713 8702 * correctly determine the same range that was previously
8714 8703 * locked by matching L_PAGELOCK.
8715 8704 */
8716 8705 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16);
8717 8706 pgsz = PAGESIZE;
8718 8707 if (svd->type == MAP_PRIVATE) {
8719 8708 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr,
8720 8709 segvn_pglock_comb_balign);
8721 8710 if (lpgaddr < seg->s_base) {
8722 8711 lpgaddr = seg->s_base;
8723 8712 sftlck_sbase = 1;
8724 8713 }
8725 8714 } else {
8726 8715 ulong_t aix = svd->anon_index + seg_page(seg, addr);
8727 8716 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign);
8728 8717 if (aaix < svd->anon_index) {
8729 8718 lpgaddr = seg->s_base;
8730 8719 sftlck_sbase = 1;
8731 8720 } else {
8732 8721 lpgaddr = addr - ptob(aix - aaix);
8733 8722 ASSERT(lpgaddr >= seg->s_base);
8734 8723 }
8735 8724 }
8736 8725 if (svd->pageprot && lpgaddr != addr) {
8737 8726 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)];
8738 8727 struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8739 8728 while (vp < evp) {
8740 8729 if ((VPP_PROT(vp) & protchk) == 0) {
8741 8730 break;
8742 8731 }
8743 8732 vp++;
8744 8733 }
8745 8734 if (vp < evp) {
8746 8735 lpgaddr = addr;
8747 8736 pflags = 0;
8748 8737 }
8749 8738 }
8750 8739 lpgeaddr = addr + len;
8751 8740 if (pflags) {
8752 8741 if (svd->type == MAP_PRIVATE) {
8753 8742 lpgeaddr = (caddr_t)P2ROUNDUP(
8754 8743 (uintptr_t)lpgeaddr,
8755 8744 segvn_pglock_comb_balign);
8756 8745 } else {
8757 8746 ulong_t aix = svd->anon_index +
8758 8747 seg_page(seg, lpgeaddr);
8759 8748 ulong_t aaix = P2ROUNDUP(aix,
8760 8749 segvn_pglock_comb_palign);
8761 8750 if (aaix < aix) {
8762 8751 lpgeaddr = 0;
8763 8752 } else {
8764 8753 lpgeaddr += ptob(aaix - aix);
8765 8754 }
8766 8755 }
8767 8756 if (lpgeaddr == 0 ||
8768 8757 lpgeaddr > seg->s_base + seg->s_size) {
8769 8758 lpgeaddr = seg->s_base + seg->s_size;
8770 8759 sftlck_send = 1;
8771 8760 }
8772 8761 }
8773 8762 if (svd->pageprot && lpgeaddr != addr + len) {
8774 8763 struct vpage *vp;
8775 8764 struct vpage *evp;
8776 8765
8777 8766 vp = &svd->vpage[seg_page(seg, addr + len)];
8778 8767 evp = &svd->vpage[seg_page(seg, lpgeaddr)];
8779 8768
8780 8769 while (vp < evp) {
8781 8770 if ((VPP_PROT(vp) & protchk) == 0) {
8782 8771 break;
8783 8772 }
8784 8773 vp++;
8785 8774 }
8786 8775 if (vp < evp) {
8787 8776 lpgeaddr = addr + len;
8788 8777 }
8789 8778 }
8790 8779 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8791 8780 }
8792 8781
8793 8782 /*
8794 8783 * For MAP_SHARED segments we create pcache entries tagged by amp and
8795 8784 * anon index so that we can share pcache entries with other segments
8796 8785 * that map this amp. For private segments pcache entries are tagged
8797 8786 * with segment and virtual address.
8798 8787 */
8799 8788 if (svd->type == MAP_SHARED) {
8800 8789 pamp = amp;
8801 8790 paddr = (caddr_t)((lpgaddr - seg->s_base) +
8802 8791 ptob(svd->anon_index));
8803 8792 preclaim_callback = shamp_reclaim;
8804 8793 } else {
8805 8794 pamp = NULL;
8806 8795 paddr = lpgaddr;
8807 8796 preclaim_callback = segvn_reclaim;
8808 8797 }
8809 8798
8810 8799 if (type == L_PAGEUNLOCK) {
8811 8800 VM_STAT_ADD(segvnvmstats.pagelock[0]);
8812 8801
8813 8802 /*
8814 8803 * update hat ref bits for /proc. We need to make sure
8815 8804 * that threads tracing the ref and mod bits of the
8816 8805 * address space get the right data.
8817 8806 * Note: page ref and mod bits are updated at reclaim time
8818 8807 */
8819 8808 if (seg->s_as->a_vbits) {
8820 8809 for (a = addr; a < addr + len; a += PAGESIZE) {
8821 8810 if (rw == S_WRITE) {
8822 8811 hat_setstat(seg->s_as, a,
8823 8812 PAGESIZE, P_REF | P_MOD);
8824 8813 } else {
8825 8814 hat_setstat(seg->s_as, a,
8826 8815 PAGESIZE, P_REF);
8827 8816 }
8828 8817 }
8829 8818 }
8830 8819
8831 8820 /*
8832 8821 * Check the shadow list entry after the last page used in
8833 8822 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
8834 8823 * was not inserted into pcache and is not large page
8835 8824 * adjusted. In this case call reclaim callback directly and
8836 8825 * don't adjust the shadow list start and size for large
8837 8826 * pages.
8838 8827 */
8839 8828 npages = btop(len);
8840 8829 if ((*ppp)[npages] == NOPCACHE_SHWLIST) {
8841 8830 void *ptag;
8842 8831 if (pamp != NULL) {
8843 8832 ASSERT(svd->type == MAP_SHARED);
8844 8833 ptag = (void *)pamp;
8845 8834 paddr = (caddr_t)((addr - seg->s_base) +
8846 8835 ptob(svd->anon_index));
8847 8836 } else {
8848 8837 ptag = (void *)seg;
8849 8838 paddr = addr;
8850 8839 }
8851 8840 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
8852 8841 } else {
8853 8842 ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
8854 8843 IS_SWAPFSVP((*ppp)[npages]->p_vnode));
8855 8844 len = lpgeaddr - lpgaddr;
8856 8845 npages = btop(len);
8857 8846 seg_pinactive(seg, pamp, paddr, len,
8858 8847 *ppp - adjustpages, rw, pflags, preclaim_callback);
8859 8848 }
8860 8849
8861 8850 if (pamp != NULL) {
8862 8851 ASSERT(svd->type == MAP_SHARED);
8863 8852 ASSERT(svd->softlockcnt >= npages);
8864 8853 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
8865 8854 }
8866 8855
8867 8856 if (sftlck_sbase) {
8868 8857 ASSERT(svd->softlockcnt_sbase > 0);
8869 8858 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase);
8870 8859 }
8871 8860 if (sftlck_send) {
8872 8861 ASSERT(svd->softlockcnt_send > 0);
8873 8862 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send);
8874 8863 }
8875 8864
8876 8865 /*
8877 8866 * If someone is blocked while unmapping, we purge
8878 8867 * segment page cache and thus reclaim pplist synchronously
8879 8868 * without waiting for seg_pasync_thread. This speeds up
8880 8869 * unmapping in cases where munmap(2) is called, while
8881 8870 * raw async i/o is still in progress or where a thread
8882 8871 * exits on data fault in a multithreaded application.
8883 8872 */
8884 8873 if (AS_ISUNMAPWAIT(seg->s_as)) {
8885 8874 if (svd->softlockcnt == 0) {
8886 8875 mutex_enter(&seg->s_as->a_contents);
8887 8876 if (AS_ISUNMAPWAIT(seg->s_as)) {
8888 8877 AS_CLRUNMAPWAIT(seg->s_as);
8889 8878 cv_broadcast(&seg->s_as->a_cv);
8890 8879 }
8891 8880 mutex_exit(&seg->s_as->a_contents);
8892 8881 } else if (pamp == NULL) {
8893 8882 /*
8894 8883 * softlockcnt is not 0 and this is a
8895 8884 * MAP_PRIVATE segment. Try to purge its
8896 8885 * pcache entries to reduce softlockcnt.
8897 8886 * If it drops to 0 segvn_reclaim()
8898 8887 * will wake up a thread waiting on
8899 8888 * unmapwait flag.
8900 8889 *
8901 8890 * We don't purge MAP_SHARED segments with non
8902 8891 * 0 softlockcnt since IO is still in progress
8903 8892 * for such segments.
8904 8893 */
8905 8894 ASSERT(svd->type == MAP_PRIVATE);
8906 8895 segvn_purge(seg);
8907 8896 }
8908 8897 }
8909 8898 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8910 8899 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END,
8911 8900 "segvn_pagelock: unlock seg %p addr %p", seg, addr);
8912 8901 return (0);
8913 8902 }
8914 8903
8915 8904 /* The L_PAGELOCK case ... */
8916 8905
8917 8906 VM_STAT_ADD(segvnvmstats.pagelock[1]);
8918 8907
8919 8908 /*
8920 8909 * For MAP_SHARED segments we have to check protections before
8921 8910 * seg_plookup() since pcache entries may be shared by many segments
8922 8911 * with potentially different page protections.
8923 8912 */
8924 8913 if (pamp != NULL) {
8925 8914 ASSERT(svd->type == MAP_SHARED);
8926 8915 if (svd->pageprot == 0) {
8927 8916 if ((svd->prot & protchk) == 0) {
8928 8917 error = EACCES;
8929 8918 goto out;
8930 8919 }
8931 8920 } else {
8932 8921 /*
8933 8922 * check page protections
8934 8923 */
8935 8924 caddr_t ea;
8936 8925
8937 8926 if (seg->s_szc) {
8938 8927 a = lpgaddr;
8939 8928 ea = lpgeaddr;
8940 8929 } else {
8941 8930 a = addr;
8942 8931 ea = addr + len;
8943 8932 }
8944 8933 for (; a < ea; a += pgsz) {
8945 8934 struct vpage *vp;
8946 8935
8947 8936 ASSERT(seg->s_szc == 0 ||
8948 8937 sameprot(seg, a, pgsz));
8949 8938 vp = &svd->vpage[seg_page(seg, a)];
8950 8939 if ((VPP_PROT(vp) & protchk) == 0) {
8951 8940 error = EACCES;
8952 8941 goto out;
8953 8942 }
8954 8943 }
8955 8944 }
8956 8945 }
8957 8946
8958 8947 /*
8959 8948 * try to find pages in segment page cache
8960 8949 */
8961 8950 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
8962 8951 if (pplist != NULL) {
8963 8952 if (pamp != NULL) {
8964 8953 npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
8965 8954 ASSERT(svd->type == MAP_SHARED);
8966 8955 atomic_add_long((ulong_t *)&svd->softlockcnt,
8967 8956 npages);
8968 8957 }
8969 8958 if (sftlck_sbase) {
8970 8959 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
8971 8960 }
8972 8961 if (sftlck_send) {
8973 8962 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
8974 8963 }
8975 8964 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8976 8965 *ppp = pplist + adjustpages;
8977 8966 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
8978 8967 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
8979 8968 return (0);
8980 8969 }
8981 8970
8982 8971 /*
8983 8972 * For MAP_SHARED segments we already verified above that segment
8984 8973 * protections allow this pagelock operation.
8985 8974 */
8986 8975 if (pamp == NULL) {
8987 8976 ASSERT(svd->type == MAP_PRIVATE);
8988 8977 if (svd->pageprot == 0) {
8989 8978 if ((svd->prot & protchk) == 0) {
8990 8979 error = EACCES;
8991 8980 goto out;
8992 8981 }
8993 8982 if (svd->prot & PROT_WRITE) {
8994 8983 wlen = lpgeaddr - lpgaddr;
8995 8984 } else {
8996 8985 wlen = 0;
8997 8986 ASSERT(rw == S_READ);
8998 8987 }
8999 8988 } else {
9000 8989 int wcont = 1;
9001 8990 /*
9002 8991 * check page protections
9003 8992 */
9004 8993 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) {
9005 8994 struct vpage *vp;
9006 8995
9007 8996 ASSERT(seg->s_szc == 0 ||
9008 8997 sameprot(seg, a, pgsz));
9009 8998 vp = &svd->vpage[seg_page(seg, a)];
9010 8999 if ((VPP_PROT(vp) & protchk) == 0) {
9011 9000 error = EACCES;
9012 9001 goto out;
9013 9002 }
9014 9003 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) {
9015 9004 wlen += pgsz;
9016 9005 } else {
9017 9006 wcont = 0;
9018 9007 ASSERT(rw == S_READ);
9019 9008 }
9020 9009 }
9021 9010 }
9022 9011 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr);
9023 9012 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr);
9024 9013 }
9025 9014
9026 9015 /*
9027 9016 * Only build large page adjusted shadow list if we expect to insert
9028 9017 * it into pcache. For large enough pages it's a big overhead to
9029 9018 * create a shadow list of the entire large page. But this overhead
9030 9019 * should be amortized over repeated pcache hits on subsequent reuse
9031 9020 * of this shadow list (IO into any range within this shadow list will
9032 9021 * find it in pcache since we large page align the request for pcache
9033 9022 * lookups). pcache performance is improved with bigger shadow lists
9034 9023 * as it reduces the time to pcache the entire big segment and reduces
9035 9024 * pcache chain length.
9036 9025 */
9037 9026 if (seg_pinsert_check(seg, pamp, paddr,
9038 9027 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) {
9039 9028 addr = lpgaddr;
9040 9029 len = lpgeaddr - lpgaddr;
9041 9030 use_pcache = 1;
9042 9031 } else {
9043 9032 use_pcache = 0;
9044 9033 /*
9045 9034 * Since this entry will not be inserted into the pcache, we
9046 9035 * will not do any adjustments to the starting address or
9047 9036 * size of the memory to be locked.
9048 9037 */
9049 9038 adjustpages = 0;
9050 9039 }
9051 9040 npages = btop(len);
9052 9041
9053 9042 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP);
9054 9043 pl = pplist;
9055 9044 *ppp = pplist + adjustpages;
9056 9045 /*
9057 9046 * If use_pcache is 0 this shadow list is not large page adjusted.
9058 9047 * Record this info in the last entry of shadow array so that
9059 9048 * L_PAGEUNLOCK can determine if it should large page adjust the
9060 9049 * address range to find the real range that was locked.
9061 9050 */
9062 9051 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST;
9063 9052
9064 9053 page = seg_page(seg, addr);
9065 9054 anon_index = svd->anon_index + page;
9066 9055
9067 9056 anlock = 0;
9068 9057 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9069 9058 ASSERT(amp->a_szc >= seg->s_szc);
9070 9059 anpgcnt = page_get_pagecnt(amp->a_szc);
9071 9060 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) {
9072 9061 struct anon *ap;
9073 9062 struct vnode *vp;
9074 9063 u_offset_t off;
9075 9064
9076 9065 /*
9077 9066 * Lock and unlock anon array only once per large page.
9078 9067 * anon_array_enter() locks the root anon slot according to
9079 9068 * a_szc which can't change while anon map is locked. We lock
9080 9069 * anon the first time through this loop and each time we
9081 9070 * reach anon index that corresponds to a root of a large
9082 9071 * page.
9083 9072 */
9084 9073 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) {
9085 9074 ASSERT(anlock == 0);
9086 9075 anon_array_enter(amp, anon_index, &cookie);
9087 9076 anlock = 1;
9088 9077 }
9089 9078 ap = anon_get_ptr(amp->ahp, anon_index);
9090 9079
9091 9080 /*
9092 9081 * We must never use seg_pcache for COW pages
9093 9082 * because we might end up with original page still
9094 9083 * lying in seg_pcache even after private page is
9095 9084 * created. This leads to data corruption as
9096 9085 * aio_write refers to the page still in cache
9097 9086 * while all other accesses refer to the private
9098 9087 * page.
9099 9088 */
9100 9089 if (ap == NULL || ap->an_refcnt != 1) {
9101 9090 struct vpage *vpage;
9102 9091
9103 9092 if (seg->s_szc) {
9104 9093 error = EFAULT;
9105 9094 break;
9106 9095 }
9107 9096 if (svd->vpage != NULL) {
9108 9097 vpage = &svd->vpage[seg_page(seg, a)];
9109 9098 } else {
9110 9099 vpage = NULL;
9111 9100 }
9112 9101 ASSERT(anlock);
9113 9102 anon_array_exit(&cookie);
9114 9103 anlock = 0;
9115 9104 pp = NULL;
9116 9105 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0,
9117 9106 vpage, &pp, 0, F_INVAL, rw, 1);
9118 9107 if (error) {
9119 9108 error = fc_decode(error);
9120 9109 break;
9121 9110 }
9122 9111 anon_array_enter(amp, anon_index, &cookie);
9123 9112 anlock = 1;
9124 9113 ap = anon_get_ptr(amp->ahp, anon_index);
9125 9114 if (ap == NULL || ap->an_refcnt != 1) {
9126 9115 error = EFAULT;
9127 9116 break;
9128 9117 }
9129 9118 }
9130 9119 swap_xlate(ap, &vp, &off);
9131 9120 pp = page_lookup_nowait(vp, off, SE_SHARED);
9132 9121 if (pp == NULL) {
9133 9122 error = EFAULT;
9134 9123 break;
9135 9124 }
9136 9125 if (ap->an_pvp != NULL) {
9137 9126 anon_swap_free(ap, pp);
9138 9127 }
9139 9128 /*
9140 9129 * Unlock anon if this is the last slot in a large page.
9141 9130 */
9142 9131 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) {
9143 9132 ASSERT(anlock);
9144 9133 anon_array_exit(&cookie);
9145 9134 anlock = 0;
9146 9135 }
9147 9136 *pplist++ = pp;
9148 9137 }
9149 9138 if (anlock) { /* Ensure the lock is dropped */
9150 9139 anon_array_exit(&cookie);
9151 9140 }
9152 9141 ANON_LOCK_EXIT(&->a_rwlock);
9153 9142
9154 9143 if (a >= addr + len) {
9155 9144 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9156 9145 if (pamp != NULL) {
9157 9146 ASSERT(svd->type == MAP_SHARED);
9158 9147 atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9159 9148 npages);
9160 9149 wlen = len;
9161 9150 }
9162 9151 if (sftlck_sbase) {
9163 9152 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9164 9153 }
9165 9154 if (sftlck_send) {
9166 9155 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9167 9156 }
9168 9157 if (use_pcache) {
9169 9158 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9170 9159 rw, pflags, preclaim_callback);
9171 9160 }
9172 9161 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9173 9162 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9174 9163 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9175 9164 return (0);
9176 9165 }
9177 9166
9178 9167 pplist = pl;
9179 9168 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9180 9169 while (np > (uint_t)0) {
9181 9170 ASSERT(PAGE_LOCKED(*pplist));
9182 9171 page_unlock(*pplist);
9183 9172 np--;
9184 9173 pplist++;
9185 9174 }
9186 9175 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9187 9176 out:
9188 9177 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9189 9178 *ppp = NULL;
9190 9179 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END,
9191 9180 "segvn_pagelock: cache miss seg %p addr %p", seg, addr);
9192 9181 return (error);
9193 9182 }
9194 9183
9195 9184 /*
9196 9185 * purge any cached pages in the I/O page cache
9197 9186 */
9198 9187 static void
9199 9188 segvn_purge(struct seg *seg)
9200 9189 {
9201 9190 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9202 9191
9203 9192 /*
9204 9193 * pcache is only used by pure anon segments.
9205 9194 */
9206 9195 if (svd->amp == NULL || svd->vp != NULL) {
9207 9196 return;
9208 9197 }
9209 9198
9210 9199 /*
9211 9200 * For MAP_SHARED segments non 0 segment's softlockcnt means
9212 9201 * active IO is still in progress via this segment. So we only
9213 9202 * purge MAP_SHARED segments when their softlockcnt is 0.
9214 9203 */
9215 9204 if (svd->type == MAP_PRIVATE) {
9216 9205 if (svd->softlockcnt) {
9217 9206 seg_ppurge(seg, NULL, 0);
9218 9207 }
9219 9208 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) {
9220 9209 seg_ppurge(seg, svd->amp, 0);
9221 9210 }
9222 9211 }
9223 9212
9224 9213 /*
9225 9214 * If async argument is not 0 we are called from pcache async thread and don't
9226 9215 * hold AS lock.
9227 9216 */
9228 9217
9229 9218 /*ARGSUSED*/
9230 9219 static int
9231 9220 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9232 9221 enum seg_rw rw, int async)
9233 9222 {
9234 9223 struct seg *seg = (struct seg *)ptag;
9235 9224 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9236 9225 pgcnt_t np, npages;
9237 9226 struct page **pl;
9238 9227
9239 9228 npages = np = btop(len);
9240 9229 ASSERT(npages);
9241 9230
9242 9231 ASSERT(svd->vp == NULL && svd->amp != NULL);
9243 9232 ASSERT(svd->softlockcnt >= npages);
9244 9233 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9245 9234
9246 9235 pl = pplist;
9247 9236
9248 9237 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9249 9238 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9250 9239
9251 9240 while (np > (uint_t)0) {
9252 9241 if (rw == S_WRITE) {
9253 9242 hat_setrefmod(*pplist);
9254 9243 } else {
9255 9244 hat_setref(*pplist);
9256 9245 }
9257 9246 page_unlock(*pplist);
9258 9247 np--;
9259 9248 pplist++;
9260 9249 }
9261 9250
9262 9251 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9263 9252
9264 9253 /*
9265 9254 * If we are pcache async thread we don't hold AS lock. This means if
9266 9255 * softlockcnt drops to 0 after the decrement below address space may
9267 9256 * get freed. We can't allow it since after softlock derement to 0 we
9268 9257 * still need to access as structure for possible wakeup of unmap
9269 9258 * waiters. To prevent the disappearance of as we take this segment
9270 9259 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9271 9260 * make sure this routine completes before segment is freed.
9272 9261 *
9273 9262 * The second complication we have to deal with in async case is a
9274 9263 * possibility of missed wake up of unmap wait thread. When we don't
9275 9264 * hold as lock here we may take a_contents lock before unmap wait
9276 9265 * thread that was first to see softlockcnt was still not 0. As a
9277 9266 * result we'll fail to wake up an unmap wait thread. To avoid this
9278 9267 * race we set nounmapwait flag in as structure if we drop softlockcnt
9279 9268 * to 0 when we were called by pcache async thread. unmapwait thread
9280 9269 * will not block if this flag is set.
9281 9270 */
9282 9271 if (async) {
9283 9272 mutex_enter(&svd->segfree_syncmtx);
9284 9273 }
9285 9274
9286 9275 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) {
9287 9276 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
9288 9277 mutex_enter(&seg->s_as->a_contents);
9289 9278 if (async) {
9290 9279 AS_SETNOUNMAPWAIT(seg->s_as);
9291 9280 }
9292 9281 if (AS_ISUNMAPWAIT(seg->s_as)) {
9293 9282 AS_CLRUNMAPWAIT(seg->s_as);
9294 9283 cv_broadcast(&seg->s_as->a_cv);
9295 9284 }
9296 9285 mutex_exit(&seg->s_as->a_contents);
9297 9286 }
9298 9287 }
9299 9288
9300 9289 if (async) {
9301 9290 mutex_exit(&svd->segfree_syncmtx);
9302 9291 }
9303 9292 return (0);
9304 9293 }
9305 9294
9306 9295 /*ARGSUSED*/
9307 9296 static int
9308 9297 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9309 9298 enum seg_rw rw, int async)
9310 9299 {
9311 9300 amp_t *amp = (amp_t *)ptag;
9312 9301 pgcnt_t np, npages;
9313 9302 struct page **pl;
9314 9303
9315 9304 npages = np = btop(len);
9316 9305 ASSERT(npages);
9317 9306 ASSERT(amp->a_softlockcnt >= npages);
9318 9307
9319 9308 pl = pplist;
9320 9309
9321 9310 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9322 9311 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9323 9312
9324 9313 while (np > (uint_t)0) {
9325 9314 if (rw == S_WRITE) {
9326 9315 hat_setrefmod(*pplist);
9327 9316 } else {
9328 9317 hat_setref(*pplist);
9329 9318 }
9330 9319 page_unlock(*pplist);
9331 9320 np--;
9332 9321 pplist++;
9333 9322 }
9334 9323
9335 9324 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9336 9325
9337 9326 /*
9338 9327 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9339 9328 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9340 9329 * and anonmap_purge() acquires a_purgemtx.
9341 9330 */
9342 9331 mutex_enter(&->a_purgemtx);
9343 9332 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) &&
9344 9333 amp->a_purgewait) {
9345 9334 amp->a_purgewait = 0;
9346 9335 cv_broadcast(&->a_purgecv);
9347 9336 }
9348 9337 mutex_exit(&->a_purgemtx);
9349 9338 return (0);
9350 9339 }
9351 9340
9352 9341 /*
9353 9342 * get a memory ID for an addr in a given segment
9354 9343 *
9355 9344 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9356 9345 * At fault time they will be relocated into larger pages.
9357 9346 */
9358 9347 static int
9359 9348 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
9360 9349 {
9361 9350 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9362 9351 struct anon *ap = NULL;
9363 9352 ulong_t anon_index;
9364 9353 struct anon_map *amp;
9365 9354 anon_sync_obj_t cookie;
9366 9355
9367 9356 if (svd->type == MAP_PRIVATE) {
9368 9357 memidp->val[0] = (uintptr_t)seg->s_as;
9369 9358 memidp->val[1] = (uintptr_t)addr;
9370 9359 return (0);
9371 9360 }
9372 9361
9373 9362 if (svd->type == MAP_SHARED) {
9374 9363 if (svd->vp) {
9375 9364 memidp->val[0] = (uintptr_t)svd->vp;
9376 9365 memidp->val[1] = (u_longlong_t)svd->offset +
9377 9366 (uintptr_t)(addr - seg->s_base);
9378 9367 return (0);
9379 9368 } else {
9380 9369
9381 9370 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
9382 9371 if ((amp = svd->amp) != NULL) {
9383 9372 anon_index = svd->anon_index +
9384 9373 seg_page(seg, addr);
9385 9374 }
9386 9375 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9387 9376
9388 9377 ASSERT(amp != NULL);
9389 9378
9390 9379 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9391 9380 anon_array_enter(amp, anon_index, &cookie);
9392 9381 ap = anon_get_ptr(amp->ahp, anon_index);
9393 9382 if (ap == NULL) {
9394 9383 page_t *pp;
9395 9384
9396 9385 pp = anon_zero(seg, addr, &ap, svd->cred);
9397 9386 if (pp == NULL) {
9398 9387 anon_array_exit(&cookie);
9399 9388 ANON_LOCK_EXIT(&->a_rwlock);
9400 9389 return (ENOMEM);
9401 9390 }
9402 9391 ASSERT(anon_get_ptr(amp->ahp, anon_index)
9403 9392 == NULL);
9404 9393 (void) anon_set_ptr(amp->ahp, anon_index,
9405 9394 ap, ANON_SLEEP);
9406 9395 page_unlock(pp);
9407 9396 }
9408 9397
9409 9398 anon_array_exit(&cookie);
9410 9399 ANON_LOCK_EXIT(&->a_rwlock);
9411 9400
9412 9401 memidp->val[0] = (uintptr_t)ap;
9413 9402 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
9414 9403 return (0);
9415 9404 }
9416 9405 }
9417 9406 return (EINVAL);
9418 9407 }
9419 9408
9420 9409 static int
9421 9410 sameprot(struct seg *seg, caddr_t a, size_t len)
9422 9411 {
9423 9412 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9424 9413 struct vpage *vpage;
9425 9414 spgcnt_t pages = btop(len);
9426 9415 uint_t prot;
9427 9416
9428 9417 if (svd->pageprot == 0)
9429 9418 return (1);
9430 9419
9431 9420 ASSERT(svd->vpage != NULL);
9432 9421
9433 9422 vpage = &svd->vpage[seg_page(seg, a)];
9434 9423 prot = VPP_PROT(vpage);
9435 9424 vpage++;
9436 9425 pages--;
9437 9426 while (pages-- > 0) {
9438 9427 if (prot != VPP_PROT(vpage))
9439 9428 return (0);
9440 9429 vpage++;
9441 9430 }
9442 9431 return (1);
9443 9432 }
9444 9433
9445 9434 /*
9446 9435 * Get memory allocation policy info for specified address in given segment
9447 9436 */
9448 9437 static lgrp_mem_policy_info_t *
9449 9438 segvn_getpolicy(struct seg *seg, caddr_t addr)
9450 9439 {
9451 9440 struct anon_map *amp;
9452 9441 ulong_t anon_index;
9453 9442 lgrp_mem_policy_info_t *policy_info;
9454 9443 struct segvn_data *svn_data;
9455 9444 u_offset_t vn_off;
9456 9445 vnode_t *vp;
9457 9446
9458 9447 ASSERT(seg != NULL);
9459 9448
9460 9449 svn_data = (struct segvn_data *)seg->s_data;
9461 9450 if (svn_data == NULL)
9462 9451 return (NULL);
9463 9452
9464 9453 /*
9465 9454 * Get policy info for private or shared memory
9466 9455 */
9467 9456 if (svn_data->type != MAP_SHARED) {
9468 9457 if (svn_data->tr_state != SEGVN_TR_ON) {
9469 9458 policy_info = &svn_data->policy_info;
9470 9459 } else {
9471 9460 policy_info = &svn_data->tr_policy_info;
9472 9461 ASSERT(policy_info->mem_policy ==
9473 9462 LGRP_MEM_POLICY_NEXT_SEG);
9474 9463 }
9475 9464 } else {
9476 9465 amp = svn_data->amp;
9477 9466 anon_index = svn_data->anon_index + seg_page(seg, addr);
9478 9467 vp = svn_data->vp;
9479 9468 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base);
9480 9469 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off);
9481 9470 }
9482 9471
9483 9472 return (policy_info);
9484 9473 }
9485 9474
9486 9475 /*
9487 9476 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9488 9477 * established to per vnode mapping per lgroup amp pages instead of to vnode
9489 9478 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9490 9479 * may share the same text replication amp. If a suitable amp doesn't already
9491 9480 * exist in svntr hash table create a new one. We may fail to bind to amp if
9492 9481 * segment is not eligible for text replication. Code below first checks for
9493 9482 * these conditions. If binding is successful segment tr_state is set to on
9494 9483 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9495 9484 * svd->amp remains as NULL.
9496 9485 */
9497 9486 static void
9498 9487 segvn_textrepl(struct seg *seg)
9499 9488 {
9500 9489 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9501 9490 vnode_t *vp = svd->vp;
9502 9491 u_offset_t off = svd->offset;
9503 9492 size_t size = seg->s_size;
9504 9493 u_offset_t eoff = off + size;
9505 9494 uint_t szc = seg->s_szc;
9506 9495 ulong_t hash = SVNTR_HASH_FUNC(vp);
9507 9496 svntr_t *svntrp;
9508 9497 struct vattr va;
9509 9498 proc_t *p = seg->s_as->a_proc;
9510 9499 lgrp_id_t lgrp_id;
9511 9500 lgrp_id_t olid;
9512 9501 int first;
9513 9502 struct anon_map *amp;
9514 9503
9515 9504 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9516 9505 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9517 9506 ASSERT(p != NULL);
9518 9507 ASSERT(svd->tr_state == SEGVN_TR_INIT);
9519 9508 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9520 9509 ASSERT(svd->flags & MAP_TEXT);
9521 9510 ASSERT(svd->type == MAP_PRIVATE);
9522 9511 ASSERT(vp != NULL && svd->amp == NULL);
9523 9512 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9524 9513 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9525 9514 ASSERT(seg->s_as != &kas);
9526 9515 ASSERT(off < eoff);
9527 9516 ASSERT(svntr_hashtab != NULL);
9528 9517
9529 9518 /*
9530 9519 * If numa optimizations are no longer desired bail out.
9531 9520 */
9532 9521 if (!lgrp_optimizations()) {
9533 9522 svd->tr_state = SEGVN_TR_OFF;
9534 9523 return;
9535 9524 }
9536 9525
9537 9526 /*
9538 9527 * Avoid creating anon maps with size bigger than the file size.
9539 9528 * If VOP_GETATTR() call fails bail out.
9540 9529 */
9541 9530 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME;
9542 9531 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) {
9543 9532 svd->tr_state = SEGVN_TR_OFF;
9544 9533 SEGVN_TR_ADDSTAT(gaerr);
9545 9534 return;
9546 9535 }
9547 9536 if (btopr(va.va_size) < btopr(eoff)) {
9548 9537 svd->tr_state = SEGVN_TR_OFF;
9549 9538 SEGVN_TR_ADDSTAT(overmap);
9550 9539 return;
9551 9540 }
9552 9541
9553 9542 /*
9554 9543 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9555 9544 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9556 9545 * mapping that checks if trcache for this vnode needs to be
9557 9546 * invalidated can't miss us.
9558 9547 */
9559 9548 if (!(vp->v_flag & VVMEXEC)) {
9560 9549 mutex_enter(&vp->v_lock);
9561 9550 vp->v_flag |= VVMEXEC;
9562 9551 mutex_exit(&vp->v_lock);
9563 9552 }
9564 9553 mutex_enter(&svntr_hashtab[hash].tr_lock);
9565 9554 /*
9566 9555 * Bail out if potentially MAP_SHARED writable mappings exist to this
9567 9556 * vnode. We don't want to use old file contents from existing
9568 9557 * replicas if this mapping was established after the original file
9569 9558 * was changed.
9570 9559 */
9571 9560 if (vn_is_mapped(vp, V_WRITE)) {
9572 9561 mutex_exit(&svntr_hashtab[hash].tr_lock);
9573 9562 svd->tr_state = SEGVN_TR_OFF;
9574 9563 SEGVN_TR_ADDSTAT(wrcnt);
9575 9564 return;
9576 9565 }
9577 9566 svntrp = svntr_hashtab[hash].tr_head;
9578 9567 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9579 9568 ASSERT(svntrp->tr_refcnt != 0);
9580 9569 if (svntrp->tr_vp != vp) {
9581 9570 continue;
9582 9571 }
9583 9572
9584 9573 /*
9585 9574 * Bail out if the file or its attributes were changed after
9586 9575 * this replication entry was created since we need to use the
9587 9576 * latest file contents. Note that mtime test alone is not
9588 9577 * sufficient because a user can explicitly change mtime via
9589 9578 * utimes(2) interfaces back to the old value after modifiying
9590 9579 * the file contents. To detect this case we also have to test
9591 9580 * ctime which among other things records the time of the last
9592 9581 * mtime change by utimes(2). ctime is not changed when the file
9593 9582 * is only read or executed so we expect that typically existing
9594 9583 * replication amp's can be used most of the time.
9595 9584 */
9596 9585 if (!svntrp->tr_valid ||
9597 9586 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec ||
9598 9587 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec ||
9599 9588 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec ||
9600 9589 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) {
9601 9590 mutex_exit(&svntr_hashtab[hash].tr_lock);
9602 9591 svd->tr_state = SEGVN_TR_OFF;
9603 9592 SEGVN_TR_ADDSTAT(stale);
9604 9593 return;
9605 9594 }
9606 9595 /*
9607 9596 * if off, eoff and szc match current segment we found the
9608 9597 * existing entry we can use.
9609 9598 */
9610 9599 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff &&
9611 9600 svntrp->tr_szc == szc) {
9612 9601 break;
9613 9602 }
9614 9603 /*
9615 9604 * Don't create different but overlapping in file offsets
9616 9605 * entries to avoid replication of the same file pages more
9617 9606 * than once per lgroup.
9618 9607 */
9619 9608 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) ||
9620 9609 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) {
9621 9610 mutex_exit(&svntr_hashtab[hash].tr_lock);
9622 9611 svd->tr_state = SEGVN_TR_OFF;
9623 9612 SEGVN_TR_ADDSTAT(overlap);
9624 9613 return;
9625 9614 }
9626 9615 }
9627 9616 /*
9628 9617 * If we didn't find existing entry create a new one.
9629 9618 */
9630 9619 if (svntrp == NULL) {
9631 9620 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP);
9632 9621 if (svntrp == NULL) {
9633 9622 mutex_exit(&svntr_hashtab[hash].tr_lock);
9634 9623 svd->tr_state = SEGVN_TR_OFF;
9635 9624 SEGVN_TR_ADDSTAT(nokmem);
9636 9625 return;
9637 9626 }
9638 9627 #ifdef DEBUG
9639 9628 {
9640 9629 lgrp_id_t i;
9641 9630 for (i = 0; i < NLGRPS_MAX; i++) {
9642 9631 ASSERT(svntrp->tr_amp[i] == NULL);
9643 9632 }
9644 9633 }
9645 9634 #endif /* DEBUG */
9646 9635 svntrp->tr_vp = vp;
9647 9636 svntrp->tr_off = off;
9648 9637 svntrp->tr_eoff = eoff;
9649 9638 svntrp->tr_szc = szc;
9650 9639 svntrp->tr_valid = 1;
9651 9640 svntrp->tr_mtime = va.va_mtime;
9652 9641 svntrp->tr_ctime = va.va_ctime;
9653 9642 svntrp->tr_refcnt = 0;
9654 9643 svntrp->tr_next = svntr_hashtab[hash].tr_head;
9655 9644 svntr_hashtab[hash].tr_head = svntrp;
9656 9645 }
9657 9646 first = 1;
9658 9647 again:
9659 9648 /*
9660 9649 * We want to pick a replica with pages on main thread's (t_tid = 1,
9661 9650 * aka T1) lgrp. Currently text replication is only optimized for
9662 9651 * workloads that either have all threads of a process on the same
9663 9652 * lgrp or execute their large text primarily on main thread.
9664 9653 */
9665 9654 lgrp_id = p->p_t1_lgrpid;
9666 9655 if (lgrp_id == LGRP_NONE) {
9667 9656 /*
9668 9657 * In case exec() prefaults text on non main thread use
9669 9658 * current thread lgrpid. It will become main thread anyway
9670 9659 * soon.
9671 9660 */
9672 9661 lgrp_id = lgrp_home_id(curthread);
9673 9662 }
9674 9663 /*
9675 9664 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9676 9665 * just set it to NLGRPS_MAX if it's different from current process T1
9677 9666 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9678 9667 * replication and T1 new home is different from lgrp used for text
9679 9668 * replication. When this happens asyncronous segvn thread rechecks if
9680 9669 * segments should change lgrps used for text replication. If we fail
9681 9670 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9682 9671 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9683 9672 * we want to use. We don't need to use cas in this case because
9684 9673 * another thread that races in between our non atomic check and set
9685 9674 * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9686 9675 */
9687 9676 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9688 9677 olid = p->p_tr_lgrpid;
9689 9678 if (lgrp_id != olid && olid != NLGRPS_MAX) {
9690 9679 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX;
9691 9680 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) !=
9692 9681 olid) {
9693 9682 olid = p->p_tr_lgrpid;
9694 9683 ASSERT(olid != LGRP_NONE);
9695 9684 if (olid != lgrp_id && olid != NLGRPS_MAX) {
9696 9685 p->p_tr_lgrpid = NLGRPS_MAX;
9697 9686 }
9698 9687 }
9699 9688 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
9700 9689 membar_producer();
9701 9690 /*
9702 9691 * lgrp_move_thread() won't schedule async recheck after
9703 9692 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9704 9693 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9705 9694 * is not LGRP_NONE.
9706 9695 */
9707 9696 if (first && p->p_t1_lgrpid != LGRP_NONE &&
9708 9697 p->p_t1_lgrpid != lgrp_id) {
9709 9698 first = 0;
9710 9699 goto again;
9711 9700 }
9712 9701 }
9713 9702 /*
9714 9703 * If no amp was created yet for lgrp_id create a new one as long as
9715 9704 * we have enough memory to afford it.
9716 9705 */
9717 9706 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) {
9718 9707 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
9719 9708 if (trmem > segvn_textrepl_max_bytes) {
9720 9709 SEGVN_TR_ADDSTAT(normem);
9721 9710 goto fail;
9722 9711 }
9723 9712 if (anon_try_resv_zone(size, NULL) == 0) {
9724 9713 SEGVN_TR_ADDSTAT(noanon);
9725 9714 goto fail;
9726 9715 }
9727 9716 amp = anonmap_alloc(size, size, ANON_NOSLEEP);
9728 9717 if (amp == NULL) {
9729 9718 anon_unresv_zone(size, NULL);
9730 9719 SEGVN_TR_ADDSTAT(nokmem);
9731 9720 goto fail;
9732 9721 }
9733 9722 ASSERT(amp->refcnt == 1);
9734 9723 amp->a_szc = szc;
9735 9724 svntrp->tr_amp[lgrp_id] = amp;
9736 9725 SEGVN_TR_ADDSTAT(newamp);
9737 9726 }
9738 9727 svntrp->tr_refcnt++;
9739 9728 ASSERT(svd->svn_trnext == NULL);
9740 9729 ASSERT(svd->svn_trprev == NULL);
9741 9730 svd->svn_trnext = svntrp->tr_svnhead;
9742 9731 svd->svn_trprev = NULL;
9743 9732 if (svntrp->tr_svnhead != NULL) {
9744 9733 svntrp->tr_svnhead->svn_trprev = svd;
9745 9734 }
9746 9735 svntrp->tr_svnhead = svd;
9747 9736 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size);
9748 9737 ASSERT(amp->refcnt >= 1);
9749 9738 svd->amp = amp;
9750 9739 svd->anon_index = 0;
9751 9740 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG;
9752 9741 svd->tr_policy_info.mem_lgrpid = lgrp_id;
9753 9742 svd->tr_state = SEGVN_TR_ON;
9754 9743 mutex_exit(&svntr_hashtab[hash].tr_lock);
9755 9744 SEGVN_TR_ADDSTAT(repl);
9756 9745 return;
9757 9746 fail:
9758 9747 ASSERT(segvn_textrepl_bytes >= size);
9759 9748 atomic_add_long(&segvn_textrepl_bytes, -size);
9760 9749 ASSERT(svntrp != NULL);
9761 9750 ASSERT(svntrp->tr_amp[lgrp_id] == NULL);
9762 9751 if (svntrp->tr_refcnt == 0) {
9763 9752 ASSERT(svntrp == svntr_hashtab[hash].tr_head);
9764 9753 svntr_hashtab[hash].tr_head = svntrp->tr_next;
9765 9754 mutex_exit(&svntr_hashtab[hash].tr_lock);
9766 9755 kmem_cache_free(svntr_cache, svntrp);
9767 9756 } else {
9768 9757 mutex_exit(&svntr_hashtab[hash].tr_lock);
9769 9758 }
9770 9759 svd->tr_state = SEGVN_TR_OFF;
9771 9760 }
9772 9761
9773 9762 /*
9774 9763 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9775 9764 * replication amp. This routine is most typically called when segment is
9776 9765 * unmapped but can also be called when segment no longer qualifies for text
9777 9766 * replication (e.g. due to protection changes). If unload_unmap is set use
9778 9767 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9779 9768 * svntr free all its anon maps and remove it from the hash table.
9780 9769 */
9781 9770 static void
9782 9771 segvn_textunrepl(struct seg *seg, int unload_unmap)
9783 9772 {
9784 9773 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9785 9774 vnode_t *vp = svd->vp;
9786 9775 u_offset_t off = svd->offset;
9787 9776 size_t size = seg->s_size;
9788 9777 u_offset_t eoff = off + size;
9789 9778 uint_t szc = seg->s_szc;
9790 9779 ulong_t hash = SVNTR_HASH_FUNC(vp);
9791 9780 svntr_t *svntrp;
9792 9781 svntr_t **prv_svntrp;
9793 9782 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid;
9794 9783 lgrp_id_t i;
9795 9784
9796 9785 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9797 9786 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
9798 9787 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9799 9788 ASSERT(svd->tr_state == SEGVN_TR_ON);
9800 9789 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9801 9790 ASSERT(svd->amp != NULL);
9802 9791 ASSERT(svd->amp->refcnt >= 1);
9803 9792 ASSERT(svd->anon_index == 0);
9804 9793 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9805 9794 ASSERT(svntr_hashtab != NULL);
9806 9795
9807 9796 mutex_enter(&svntr_hashtab[hash].tr_lock);
9808 9797 prv_svntrp = &svntr_hashtab[hash].tr_head;
9809 9798 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) {
9810 9799 ASSERT(svntrp->tr_refcnt != 0);
9811 9800 if (svntrp->tr_vp == vp && svntrp->tr_off == off &&
9812 9801 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) {
9813 9802 break;
9814 9803 }
9815 9804 }
9816 9805 if (svntrp == NULL) {
9817 9806 panic("segvn_textunrepl: svntr record not found");
9818 9807 }
9819 9808 if (svntrp->tr_amp[lgrp_id] != svd->amp) {
9820 9809 panic("segvn_textunrepl: amp mismatch");
9821 9810 }
9822 9811 svd->tr_state = SEGVN_TR_OFF;
9823 9812 svd->amp = NULL;
9824 9813 if (svd->svn_trprev == NULL) {
9825 9814 ASSERT(svntrp->tr_svnhead == svd);
9826 9815 svntrp->tr_svnhead = svd->svn_trnext;
9827 9816 if (svntrp->tr_svnhead != NULL) {
9828 9817 svntrp->tr_svnhead->svn_trprev = NULL;
9829 9818 }
9830 9819 svd->svn_trnext = NULL;
9831 9820 } else {
9832 9821 svd->svn_trprev->svn_trnext = svd->svn_trnext;
9833 9822 if (svd->svn_trnext != NULL) {
9834 9823 svd->svn_trnext->svn_trprev = svd->svn_trprev;
9835 9824 svd->svn_trnext = NULL;
9836 9825 }
9837 9826 svd->svn_trprev = NULL;
9838 9827 }
9839 9828 if (--svntrp->tr_refcnt) {
9840 9829 mutex_exit(&svntr_hashtab[hash].tr_lock);
9841 9830 goto done;
9842 9831 }
9843 9832 *prv_svntrp = svntrp->tr_next;
9844 9833 mutex_exit(&svntr_hashtab[hash].tr_lock);
9845 9834 for (i = 0; i < NLGRPS_MAX; i++) {
9846 9835 struct anon_map *amp = svntrp->tr_amp[i];
9847 9836 if (amp == NULL) {
9848 9837 continue;
9849 9838 }
9850 9839 ASSERT(amp->refcnt == 1);
9851 9840 ASSERT(amp->swresv == size);
9852 9841 ASSERT(amp->size == size);
9853 9842 ASSERT(amp->a_szc == szc);
9854 9843 if (amp->a_szc != 0) {
9855 9844 anon_free_pages(amp->ahp, 0, size, szc);
9856 9845 } else {
9857 9846 anon_free(amp->ahp, 0, size);
9858 9847 }
9859 9848 svntrp->tr_amp[i] = NULL;
9860 9849 ASSERT(segvn_textrepl_bytes >= size);
9861 9850 atomic_add_long(&segvn_textrepl_bytes, -size);
9862 9851 anon_unresv_zone(amp->swresv, NULL);
9863 9852 amp->refcnt = 0;
9864 9853 anonmap_free(amp);
9865 9854 }
9866 9855 kmem_cache_free(svntr_cache, svntrp);
9867 9856 done:
9868 9857 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size,
9869 9858 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL);
9870 9859 }
9871 9860
9872 9861 /*
9873 9862 * This is called when a MAP_SHARED writable mapping is created to a vnode
9874 9863 * that is currently used for execution (VVMEXEC flag is set). In this case we
9875 9864 * need to prevent further use of existing replicas.
9876 9865 */
9877 9866 static void
9878 9867 segvn_inval_trcache(vnode_t *vp)
9879 9868 {
9880 9869 ulong_t hash = SVNTR_HASH_FUNC(vp);
9881 9870 svntr_t *svntrp;
9882 9871
9883 9872 ASSERT(vp->v_flag & VVMEXEC);
9884 9873
9885 9874 if (svntr_hashtab == NULL) {
9886 9875 return;
9887 9876 }
9888 9877
9889 9878 mutex_enter(&svntr_hashtab[hash].tr_lock);
9890 9879 svntrp = svntr_hashtab[hash].tr_head;
9891 9880 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9892 9881 ASSERT(svntrp->tr_refcnt != 0);
9893 9882 if (svntrp->tr_vp == vp && svntrp->tr_valid) {
9894 9883 svntrp->tr_valid = 0;
9895 9884 }
9896 9885 }
9897 9886 mutex_exit(&svntr_hashtab[hash].tr_lock);
9898 9887 }
9899 9888
9900 9889 static void
9901 9890 segvn_trasync_thread(void)
9902 9891 {
9903 9892 callb_cpr_t cpr_info;
9904 9893 kmutex_t cpr_lock; /* just for CPR stuff */
9905 9894
9906 9895 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
9907 9896
9908 9897 CALLB_CPR_INIT(&cpr_info, &cpr_lock,
9909 9898 callb_generic_cpr, "segvn_async");
9910 9899
9911 9900 if (segvn_update_textrepl_interval == 0) {
9912 9901 segvn_update_textrepl_interval = segvn_update_tr_time * hz;
9913 9902 } else {
9914 9903 segvn_update_textrepl_interval *= hz;
9915 9904 }
9916 9905 (void) timeout(segvn_trupdate_wakeup, NULL,
9917 9906 segvn_update_textrepl_interval);
9918 9907
9919 9908 for (;;) {
9920 9909 mutex_enter(&cpr_lock);
9921 9910 CALLB_CPR_SAFE_BEGIN(&cpr_info);
9922 9911 mutex_exit(&cpr_lock);
9923 9912 sema_p(&segvn_trasync_sem);
9924 9913 mutex_enter(&cpr_lock);
9925 9914 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
9926 9915 mutex_exit(&cpr_lock);
9927 9916 segvn_trupdate();
9928 9917 }
9929 9918 }
9930 9919
9931 9920 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0;
9932 9921
9933 9922 static void
9934 9923 segvn_trupdate_wakeup(void *dummy)
9935 9924 {
9936 9925 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations();
9937 9926
9938 9927 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) {
9939 9928 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs;
9940 9929 sema_v(&segvn_trasync_sem);
9941 9930 }
9942 9931
9943 9932 if (!segvn_disable_textrepl_update &&
9944 9933 segvn_update_textrepl_interval != 0) {
9945 9934 (void) timeout(segvn_trupdate_wakeup, dummy,
9946 9935 segvn_update_textrepl_interval);
9947 9936 }
9948 9937 }
9949 9938
9950 9939 static void
9951 9940 segvn_trupdate(void)
9952 9941 {
9953 9942 ulong_t hash;
9954 9943 svntr_t *svntrp;
9955 9944 segvn_data_t *svd;
9956 9945
9957 9946 ASSERT(svntr_hashtab != NULL);
9958 9947
9959 9948 for (hash = 0; hash < svntr_hashtab_sz; hash++) {
9960 9949 mutex_enter(&svntr_hashtab[hash].tr_lock);
9961 9950 svntrp = svntr_hashtab[hash].tr_head;
9962 9951 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9963 9952 ASSERT(svntrp->tr_refcnt != 0);
9964 9953 svd = svntrp->tr_svnhead;
9965 9954 for (; svd != NULL; svd = svd->svn_trnext) {
9966 9955 segvn_trupdate_seg(svd->seg, svd, svntrp,
9967 9956 hash);
9968 9957 }
9969 9958 }
9970 9959 mutex_exit(&svntr_hashtab[hash].tr_lock);
9971 9960 }
9972 9961 }
9973 9962
9974 9963 static void
9975 9964 segvn_trupdate_seg(struct seg *seg,
9976 9965 segvn_data_t *svd,
9977 9966 svntr_t *svntrp,
9978 9967 ulong_t hash)
9979 9968 {
9980 9969 proc_t *p;
9981 9970 lgrp_id_t lgrp_id;
9982 9971 struct as *as;
9983 9972 size_t size;
9984 9973 struct anon_map *amp;
9985 9974
9986 9975 ASSERT(svd->vp != NULL);
9987 9976 ASSERT(svd->vp == svntrp->tr_vp);
9988 9977 ASSERT(svd->offset == svntrp->tr_off);
9989 9978 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff);
9990 9979 ASSERT(seg != NULL);
9991 9980 ASSERT(svd->seg == seg);
9992 9981 ASSERT(seg->s_data == (void *)svd);
9993 9982 ASSERT(seg->s_szc == svntrp->tr_szc);
9994 9983 ASSERT(svd->tr_state == SEGVN_TR_ON);
9995 9984 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9996 9985 ASSERT(svd->amp != NULL);
9997 9986 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
9998 9987 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE);
9999 9988 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX);
10000 9989 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp);
10001 9990 ASSERT(svntrp->tr_refcnt != 0);
10002 9991 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock));
10003 9992
10004 9993 as = seg->s_as;
10005 9994 ASSERT(as != NULL && as != &kas);
10006 9995 p = as->a_proc;
10007 9996 ASSERT(p != NULL);
10008 9997 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
10009 9998 lgrp_id = p->p_t1_lgrpid;
10010 9999 if (lgrp_id == LGRP_NONE) {
10011 10000 return;
10012 10001 }
10013 10002 ASSERT(lgrp_id < NLGRPS_MAX);
10014 10003 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10015 10004 return;
10016 10005 }
10017 10006
10018 10007 /*
10019 10008 * Use tryenter locking since we are locking as/seg and svntr hash
10020 10009 * lock in reverse from syncrounous thread order.
10021 10010 */
10022 10011 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) {
10023 10012 SEGVN_TR_ADDSTAT(nolock);
10024 10013 if (segvn_lgrp_trthr_migrs_snpsht) {
10025 10014 segvn_lgrp_trthr_migrs_snpsht = 0;
10026 10015 }
10027 10016 return;
10028 10017 }
10029 10018 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10030 10019 AS_LOCK_EXIT(as, &as->a_lock);
10031 10020 SEGVN_TR_ADDSTAT(nolock);
10032 10021 if (segvn_lgrp_trthr_migrs_snpsht) {
10033 10022 segvn_lgrp_trthr_migrs_snpsht = 0;
10034 10023 }
10035 10024 return;
10036 10025 }
10037 10026 size = seg->s_size;
10038 10027 if (svntrp->tr_amp[lgrp_id] == NULL) {
10039 10028 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
10040 10029 if (trmem > segvn_textrepl_max_bytes) {
10041 10030 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10042 10031 AS_LOCK_EXIT(as, &as->a_lock);
10043 10032 atomic_add_long(&segvn_textrepl_bytes, -size);
10044 10033 SEGVN_TR_ADDSTAT(normem);
10045 10034 return;
10046 10035 }
10047 10036 if (anon_try_resv_zone(size, NULL) == 0) {
10048 10037 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10049 10038 AS_LOCK_EXIT(as, &as->a_lock);
10050 10039 atomic_add_long(&segvn_textrepl_bytes, -size);
10051 10040 SEGVN_TR_ADDSTAT(noanon);
10052 10041 return;
10053 10042 }
10054 10043 amp = anonmap_alloc(size, size, KM_NOSLEEP);
10055 10044 if (amp == NULL) {
10056 10045 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10057 10046 AS_LOCK_EXIT(as, &as->a_lock);
10058 10047 atomic_add_long(&segvn_textrepl_bytes, -size);
10059 10048 anon_unresv_zone(size, NULL);
10060 10049 SEGVN_TR_ADDSTAT(nokmem);
10061 10050 return;
10062 10051 }
10063 10052 ASSERT(amp->refcnt == 1);
10064 10053 amp->a_szc = seg->s_szc;
10065 10054 svntrp->tr_amp[lgrp_id] = amp;
10066 10055 }
10067 10056 /*
10068 10057 * We don't need to drop the bucket lock but here we give other
10069 10058 * threads a chance. svntr and svd can't be unlinked as long as
10070 10059 * segment lock is held as a writer and AS held as well. After we
10071 10060 * retake bucket lock we'll continue from where we left. We'll be able
10072 10061 * to reach the end of either list since new entries are always added
10073 10062 * to the beginning of the lists.
10074 10063 */
10075 10064 mutex_exit(&svntr_hashtab[hash].tr_lock);
10076 10065 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL);
10077 10066 mutex_enter(&svntr_hashtab[hash].tr_lock);
10078 10067
10079 10068 ASSERT(svd->tr_state == SEGVN_TR_ON);
10080 10069 ASSERT(svd->amp != NULL);
10081 10070 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10082 10071 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10083 10072 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10084 10073
10085 10074 svd->tr_policy_info.mem_lgrpid = lgrp_id;
10086 10075 svd->amp = svntrp->tr_amp[lgrp_id];
10087 10076 p->p_tr_lgrpid = NLGRPS_MAX;
10088 10077 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10089 10078 AS_LOCK_EXIT(as, &as->a_lock);
10090 10079
10091 10080 ASSERT(svntrp->tr_refcnt != 0);
10092 10081 ASSERT(svd->vp == svntrp->tr_vp);
10093 10082 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10094 10083 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10095 10084 ASSERT(svd->seg == seg);
10096 10085 ASSERT(svd->tr_state == SEGVN_TR_ON);
10097 10086
10098 10087 SEGVN_TR_ADDSTAT(asyncrepl);
10099 10088 }
↓ open down ↓ |
4694 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX