Print this page
PVN_GETPAGE_{SZ,NUM} are misnamed and unnecessarily complicated
There is really no reason to not allow 8 pages all the time. With the
current logic, we get the following:
Assuming 4kB pages (x86):
_SZ = ptob(8) /* 32kB */
_NUM = 8
Assuming 8kB pages (sparc):
_SZ = ptob(8) /* 64kB */
_NUM = 8
We'd have to deal with 16kB base pages in order for the _NUM #define to not
be 8 (it'd be 4 in that case). So, in the spirit of simplicity, let's just
always grab 8 pages as there are no interesting systems with 16kB+ base pages.
Finally, the defines are poorly named.
const-ify make segment ops structures
There is no reason to keep the segment ops structures writable.
use NULL capable segop as a shorthand for no-capabilities
Instead of forcing every segment driver to implement a dummy "return 0"
function, handle NULL capable segop function pointer as "no copabilities
supported" shorthand.
patch lower-case-segops
use C99 initializers in segment ops structures
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory. The code is there and in theory it runs when we get *extremely* low
on memory. In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs. The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync. Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/seg_vn.c
+++ new/usr/src/uts/common/vm/seg_vn.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1986, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright 2015, Joyent, Inc. All rights reserved.
24 24 * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26
27 27 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
28 28 /* All Rights Reserved */
29 29
30 30 /*
31 31 * University Copyright- Copyright (c) 1982, 1986, 1988
32 32 * The Regents of the University of California
33 33 * All Rights Reserved
34 34 *
35 35 * University Acknowledgment- Portions of this document are derived from
36 36 * software developed by the University of California, Berkeley, and its
37 37 * contributors.
38 38 */
39 39
40 40 /*
41 41 * VM - shared or copy-on-write from a vnode/anonymous memory.
42 42 */
43 43
44 44 #include <sys/types.h>
45 45 #include <sys/param.h>
46 46 #include <sys/t_lock.h>
47 47 #include <sys/errno.h>
48 48 #include <sys/systm.h>
49 49 #include <sys/mman.h>
50 50 #include <sys/debug.h>
51 51 #include <sys/cred.h>
52 52 #include <sys/vmsystm.h>
53 53 #include <sys/tuneable.h>
54 54 #include <sys/bitmap.h>
55 55 #include <sys/swap.h>
56 56 #include <sys/kmem.h>
57 57 #include <sys/sysmacros.h>
58 58 #include <sys/vtrace.h>
59 59 #include <sys/cmn_err.h>
60 60 #include <sys/callb.h>
61 61 #include <sys/vm.h>
62 62 #include <sys/dumphdr.h>
63 63 #include <sys/lgrp.h>
64 64
65 65 #include <vm/hat.h>
66 66 #include <vm/as.h>
67 67 #include <vm/seg.h>
68 68 #include <vm/seg_vn.h>
69 69 #include <vm/pvn.h>
70 70 #include <vm/anon.h>
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
71 71 #include <vm/page.h>
72 72 #include <vm/vpage.h>
73 73 #include <sys/proc.h>
74 74 #include <sys/task.h>
75 75 #include <sys/project.h>
76 76 #include <sys/zone.h>
77 77 #include <sys/shm_impl.h>
78 78
79 79 /*
80 80 * segvn_fault needs a temporary page list array. To avoid calling kmem all
81 - * the time, it creates a small (PVN_GETPAGE_NUM entry) array and uses it if
82 - * it can. In the rare case when this page list is not large enough, it
83 - * goes and gets a large enough array from kmem.
84 - *
85 - * This small page list array covers either 8 pages or 64kB worth of pages -
86 - * whichever is smaller.
81 + * the time, it creates a small (FAULT_TMP_PAGES_NUM entry) array and uses
82 + * it if it can. In the rare case when this page list is not large enough,
83 + * it goes and gets a large enough array from kmem.
87 84 */
88 -#define PVN_MAX_GETPAGE_SZ 0x10000
89 -#define PVN_MAX_GETPAGE_NUM 0x8
90 -
91 -#if PVN_MAX_GETPAGE_SZ > PVN_MAX_GETPAGE_NUM * PAGESIZE
92 -#define PVN_GETPAGE_SZ ptob(PVN_MAX_GETPAGE_NUM)
93 -#define PVN_GETPAGE_NUM PVN_MAX_GETPAGE_NUM
94 -#else
95 -#define PVN_GETPAGE_SZ PVN_MAX_GETPAGE_SZ
96 -#define PVN_GETPAGE_NUM btop(PVN_MAX_GETPAGE_SZ)
97 -#endif
85 +#define FAULT_TMP_PAGES_NUM 0x8
86 +#define FAULT_TMP_PAGES_SZ ptob(FAULT_TMP_PAGES_NUM)
98 87
99 88 /*
100 89 * Private seg op routines.
101 90 */
102 91 static int segvn_dup(struct seg *seg, struct seg *newseg);
103 92 static int segvn_unmap(struct seg *seg, caddr_t addr, size_t len);
104 93 static void segvn_free(struct seg *seg);
105 94 static faultcode_t segvn_fault(struct hat *hat, struct seg *seg,
106 95 caddr_t addr, size_t len, enum fault_type type,
107 96 enum seg_rw rw);
108 97 static faultcode_t segvn_faulta(struct seg *seg, caddr_t addr);
109 98 static int segvn_setprot(struct seg *seg, caddr_t addr,
110 99 size_t len, uint_t prot);
111 100 static int segvn_checkprot(struct seg *seg, caddr_t addr,
112 101 size_t len, uint_t prot);
113 102 static int segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta);
114 -static size_t segvn_swapout(struct seg *seg);
115 103 static int segvn_sync(struct seg *seg, caddr_t addr, size_t len,
116 104 int attr, uint_t flags);
117 105 static size_t segvn_incore(struct seg *seg, caddr_t addr, size_t len,
118 106 char *vec);
119 107 static int segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
120 108 int attr, int op, ulong_t *lockmap, size_t pos);
121 109 static int segvn_getprot(struct seg *seg, caddr_t addr, size_t len,
122 110 uint_t *protv);
123 111 static u_offset_t segvn_getoffset(struct seg *seg, caddr_t addr);
124 112 static int segvn_gettype(struct seg *seg, caddr_t addr);
125 113 static int segvn_getvp(struct seg *seg, caddr_t addr,
126 114 struct vnode **vpp);
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
127 115 static int segvn_advise(struct seg *seg, caddr_t addr, size_t len,
128 116 uint_t behav);
129 117 static void segvn_dump(struct seg *seg);
130 118 static int segvn_pagelock(struct seg *seg, caddr_t addr, size_t len,
131 119 struct page ***ppp, enum lock_type type, enum seg_rw rw);
132 120 static int segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len,
133 121 uint_t szc);
134 122 static int segvn_getmemid(struct seg *seg, caddr_t addr,
135 123 memid_t *memidp);
136 124 static lgrp_mem_policy_info_t *segvn_getpolicy(struct seg *, caddr_t);
137 -static int segvn_capable(struct seg *seg, segcapability_t capable);
138 125 static int segvn_inherit(struct seg *, caddr_t, size_t, uint_t);
139 126
140 -struct seg_ops segvn_ops = {
141 - segvn_dup,
142 - segvn_unmap,
143 - segvn_free,
144 - segvn_fault,
145 - segvn_faulta,
146 - segvn_setprot,
147 - segvn_checkprot,
148 - segvn_kluster,
149 - segvn_swapout,
150 - segvn_sync,
151 - segvn_incore,
152 - segvn_lockop,
153 - segvn_getprot,
154 - segvn_getoffset,
155 - segvn_gettype,
156 - segvn_getvp,
157 - segvn_advise,
158 - segvn_dump,
159 - segvn_pagelock,
160 - segvn_setpagesize,
161 - segvn_getmemid,
162 - segvn_getpolicy,
163 - segvn_capable,
164 - segvn_inherit
127 +const struct seg_ops segvn_ops = {
128 + .dup = segvn_dup,
129 + .unmap = segvn_unmap,
130 + .free = segvn_free,
131 + .fault = segvn_fault,
132 + .faulta = segvn_faulta,
133 + .setprot = segvn_setprot,
134 + .checkprot = segvn_checkprot,
135 + .kluster = segvn_kluster,
136 + .sync = segvn_sync,
137 + .incore = segvn_incore,
138 + .lockop = segvn_lockop,
139 + .getprot = segvn_getprot,
140 + .getoffset = segvn_getoffset,
141 + .gettype = segvn_gettype,
142 + .getvp = segvn_getvp,
143 + .advise = segvn_advise,
144 + .dump = segvn_dump,
145 + .pagelock = segvn_pagelock,
146 + .setpagesize = segvn_setpagesize,
147 + .getmemid = segvn_getmemid,
148 + .getpolicy = segvn_getpolicy,
149 + .inherit = segvn_inherit,
165 150 };
166 151
167 152 /*
168 153 * Common zfod structures, provided as a shorthand for others to use.
169 154 */
170 155 static segvn_crargs_t zfod_segvn_crargs =
171 156 SEGVN_ZFOD_ARGS(PROT_ZFOD, PROT_ALL);
172 157 static segvn_crargs_t kzfod_segvn_crargs =
173 158 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_USER,
174 159 PROT_ALL & ~PROT_USER);
175 160 static segvn_crargs_t stack_noexec_crargs =
176 161 SEGVN_ZFOD_ARGS(PROT_ZFOD & ~PROT_EXEC, PROT_ALL);
177 162
178 163 caddr_t zfod_argsp = (caddr_t)&zfod_segvn_crargs; /* user zfod argsp */
179 164 caddr_t kzfod_argsp = (caddr_t)&kzfod_segvn_crargs; /* kernel zfod argsp */
180 165 caddr_t stack_exec_argsp = (caddr_t)&zfod_segvn_crargs; /* executable stack */
181 166 caddr_t stack_noexec_argsp = (caddr_t)&stack_noexec_crargs; /* noexec stack */
182 167
183 168 #define vpgtob(n) ((n) * sizeof (struct vpage)) /* For brevity */
184 169
185 170 size_t segvn_comb_thrshld = UINT_MAX; /* patchable -- see 1196681 */
186 171
187 172 size_t segvn_pglock_comb_thrshld = (1UL << 16); /* 64K */
188 173 size_t segvn_pglock_comb_balign = (1UL << 16); /* 64K */
189 174 uint_t segvn_pglock_comb_bshift;
190 175 size_t segvn_pglock_comb_palign;
191 176
192 177 static int segvn_concat(struct seg *, struct seg *, int);
193 178 static int segvn_extend_prev(struct seg *, struct seg *,
194 179 struct segvn_crargs *, size_t);
195 180 static int segvn_extend_next(struct seg *, struct seg *,
196 181 struct segvn_crargs *, size_t);
197 182 static void segvn_softunlock(struct seg *, caddr_t, size_t, enum seg_rw);
198 183 static void segvn_pagelist_rele(page_t **);
199 184 static void segvn_setvnode_mpss(vnode_t *);
200 185 static void segvn_relocate_pages(page_t **, page_t *);
201 186 static int segvn_full_szcpages(page_t **, uint_t, int *, uint_t *);
202 187 static int segvn_fill_vp_pages(struct segvn_data *, vnode_t *, u_offset_t,
203 188 uint_t, page_t **, page_t **, uint_t *, int *);
204 189 static faultcode_t segvn_fault_vnodepages(struct hat *, struct seg *, caddr_t,
205 190 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
206 191 static faultcode_t segvn_fault_anonpages(struct hat *, struct seg *, caddr_t,
207 192 caddr_t, enum fault_type, enum seg_rw, caddr_t, caddr_t, int);
208 193 static faultcode_t segvn_faultpage(struct hat *, struct seg *, caddr_t,
209 194 u_offset_t, struct vpage *, page_t **, uint_t,
210 195 enum fault_type, enum seg_rw, int);
211 196 static void segvn_vpage(struct seg *);
212 197 static size_t segvn_count_swap_by_vpages(struct seg *);
213 198
214 199 static void segvn_purge(struct seg *seg);
215 200 static int segvn_reclaim(void *, caddr_t, size_t, struct page **,
216 201 enum seg_rw, int);
217 202 static int shamp_reclaim(void *, caddr_t, size_t, struct page **,
218 203 enum seg_rw, int);
219 204
220 205 static int sameprot(struct seg *, caddr_t, size_t);
221 206
222 207 static int segvn_demote_range(struct seg *, caddr_t, size_t, int, uint_t);
223 208 static int segvn_clrszc(struct seg *);
224 209 static struct seg *segvn_split_seg(struct seg *, caddr_t);
225 210 static int segvn_claim_pages(struct seg *, struct vpage *, u_offset_t,
226 211 ulong_t, uint_t);
227 212
228 213 static void segvn_hat_rgn_unload_callback(caddr_t, caddr_t, caddr_t,
229 214 size_t, void *, u_offset_t);
230 215
231 216 static struct kmem_cache *segvn_cache;
232 217 static struct kmem_cache **segvn_szc_cache;
233 218
234 219 #ifdef VM_STATS
235 220 static struct segvnvmstats_str {
236 221 ulong_t fill_vp_pages[31];
237 222 ulong_t fltvnpages[49];
238 223 ulong_t fullszcpages[10];
239 224 ulong_t relocatepages[3];
240 225 ulong_t fltanpages[17];
241 226 ulong_t pagelock[2];
242 227 ulong_t demoterange[3];
243 228 } segvnvmstats;
244 229 #endif /* VM_STATS */
245 230
246 231 #define SDR_RANGE 1 /* demote entire range */
247 232 #define SDR_END 2 /* demote non aligned ends only */
248 233
249 234 #define CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr) { \
250 235 if ((len) != 0) { \
251 236 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)(addr), pgsz); \
252 237 ASSERT(lpgaddr >= (seg)->s_base); \
253 238 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)((addr) + \
254 239 (len)), pgsz); \
255 240 ASSERT(lpgeaddr > lpgaddr); \
256 241 ASSERT(lpgeaddr <= (seg)->s_base + (seg)->s_size); \
257 242 } else { \
258 243 lpgeaddr = lpgaddr = (addr); \
259 244 } \
260 245 }
261 246
262 247 /*ARGSUSED*/
263 248 static int
264 249 segvn_cache_constructor(void *buf, void *cdrarg, int kmflags)
265 250 {
266 251 struct segvn_data *svd = buf;
267 252
268 253 rw_init(&svd->lock, NULL, RW_DEFAULT, NULL);
269 254 mutex_init(&svd->segfree_syncmtx, NULL, MUTEX_DEFAULT, NULL);
270 255 svd->svn_trnext = svd->svn_trprev = NULL;
271 256 return (0);
272 257 }
273 258
274 259 /*ARGSUSED1*/
275 260 static void
276 261 segvn_cache_destructor(void *buf, void *cdrarg)
277 262 {
278 263 struct segvn_data *svd = buf;
279 264
280 265 rw_destroy(&svd->lock);
281 266 mutex_destroy(&svd->segfree_syncmtx);
282 267 }
283 268
284 269 /*ARGSUSED*/
285 270 static int
286 271 svntr_cache_constructor(void *buf, void *cdrarg, int kmflags)
287 272 {
288 273 bzero(buf, sizeof (svntr_t));
289 274 return (0);
290 275 }
291 276
292 277 /*
293 278 * Patching this variable to non-zero allows the system to run with
294 279 * stacks marked as "not executable". It's a bit of a kludge, but is
295 280 * provided as a tweakable for platforms that export those ABIs
296 281 * (e.g. sparc V8) that have executable stacks enabled by default.
297 282 * There are also some restrictions for platforms that don't actually
298 283 * implement 'noexec' protections.
299 284 *
300 285 * Once enabled, the system is (therefore) unable to provide a fully
301 286 * ABI-compliant execution environment, though practically speaking,
302 287 * most everything works. The exceptions are generally some interpreters
303 288 * and debuggers that create executable code on the stack and jump
304 289 * into it (without explicitly mprotecting the address range to include
305 290 * PROT_EXEC).
306 291 *
307 292 * One important class of applications that are disabled are those
308 293 * that have been transformed into malicious agents using one of the
309 294 * numerous "buffer overflow" attacks. See 4007890.
310 295 */
311 296 int noexec_user_stack = 0;
312 297 int noexec_user_stack_log = 1;
313 298
314 299 int segvn_lpg_disable = 0;
315 300 uint_t segvn_maxpgszc = 0;
316 301
317 302 ulong_t segvn_vmpss_clrszc_cnt;
318 303 ulong_t segvn_vmpss_clrszc_err;
319 304 ulong_t segvn_fltvnpages_clrszc_cnt;
320 305 ulong_t segvn_fltvnpages_clrszc_err;
321 306 ulong_t segvn_setpgsz_align_err;
322 307 ulong_t segvn_setpgsz_anon_align_err;
323 308 ulong_t segvn_setpgsz_getattr_err;
324 309 ulong_t segvn_setpgsz_eof_err;
325 310 ulong_t segvn_faultvnmpss_align_err1;
326 311 ulong_t segvn_faultvnmpss_align_err2;
327 312 ulong_t segvn_faultvnmpss_align_err3;
328 313 ulong_t segvn_faultvnmpss_align_err4;
329 314 ulong_t segvn_faultvnmpss_align_err5;
330 315 ulong_t segvn_vmpss_pageio_deadlk_err;
331 316
332 317 int segvn_use_regions = 1;
333 318
334 319 /*
335 320 * Segvn supports text replication optimization for NUMA platforms. Text
336 321 * replica's are represented by anon maps (amp). There's one amp per text file
337 322 * region per lgroup. A process chooses the amp for each of its text mappings
338 323 * based on the lgroup assignment of its main thread (t_tid = 1). All
339 324 * processes that want a replica on a particular lgroup for the same text file
340 325 * mapping share the same amp. amp's are looked up in svntr_hashtab hash table
341 326 * with vp,off,size,szc used as a key. Text replication segments are read only
342 327 * MAP_PRIVATE|MAP_TEXT segments that map vnode. Replication is achieved by
343 328 * forcing COW faults from vnode to amp and mapping amp pages instead of vnode
344 329 * pages. Replication amp is assigned to a segment when it gets its first
345 330 * pagefault. To handle main thread lgroup rehoming segvn_trasync_thread
346 331 * rechecks periodically if the process still maps an amp local to the main
347 332 * thread. If not async thread forces process to remap to an amp in the new
348 333 * home lgroup of the main thread. Current text replication implementation
349 334 * only provides the benefit to workloads that do most of their work in the
350 335 * main thread of a process or all the threads of a process run in the same
351 336 * lgroup. To extend text replication benefit to different types of
352 337 * multithreaded workloads further work would be needed in the hat layer to
353 338 * allow the same virtual address in the same hat to simultaneously map
354 339 * different physical addresses (i.e. page table replication would be needed
355 340 * for x86).
356 341 *
357 342 * amp pages are used instead of vnode pages as long as segment has a very
358 343 * simple life cycle. It's created via segvn_create(), handles S_EXEC
359 344 * (S_READ) pagefaults and is fully unmapped. If anything more complicated
360 345 * happens such as protection is changed, real COW fault happens, pagesize is
361 346 * changed, MC_LOCK is requested or segment is partially unmapped we turn off
362 347 * text replication by converting the segment back to vnode only segment
363 348 * (unmap segment's address range and set svd->amp to NULL).
364 349 *
365 350 * The original file can be changed after amp is inserted into
366 351 * svntr_hashtab. Processes that are launched after the file is already
367 352 * changed can't use the replica's created prior to the file change. To
368 353 * implement this functionality hash entries are timestamped. Replica's can
369 354 * only be used if current file modification time is the same as the timestamp
370 355 * saved when hash entry was created. However just timestamps alone are not
371 356 * sufficient to detect file modification via mmap(MAP_SHARED) mappings. We
372 357 * deal with file changes via MAP_SHARED mappings differently. When writable
373 358 * MAP_SHARED mappings are created to vnodes marked as executable we mark all
374 359 * existing replica's for this vnode as not usable for future text
375 360 * mappings. And we don't create new replica's for files that currently have
376 361 * potentially writable MAP_SHARED mappings (i.e. vn_is_mapped(V_WRITE) is
377 362 * true).
378 363 */
379 364
380 365 #define SEGVN_TEXTREPL_MAXBYTES_FACTOR (20)
381 366 size_t segvn_textrepl_max_bytes_factor = SEGVN_TEXTREPL_MAXBYTES_FACTOR;
382 367
383 368 static ulong_t svntr_hashtab_sz = 512;
384 369 static svntr_bucket_t *svntr_hashtab = NULL;
385 370 static struct kmem_cache *svntr_cache;
386 371 static svntr_stats_t *segvn_textrepl_stats;
387 372 static ksema_t segvn_trasync_sem;
388 373
389 374 int segvn_disable_textrepl = 1;
390 375 size_t textrepl_size_thresh = (size_t)-1;
391 376 size_t segvn_textrepl_bytes = 0;
392 377 size_t segvn_textrepl_max_bytes = 0;
393 378 clock_t segvn_update_textrepl_interval = 0;
394 379 int segvn_update_tr_time = 10;
395 380 int segvn_disable_textrepl_update = 0;
396 381
397 382 static void segvn_textrepl(struct seg *);
398 383 static void segvn_textunrepl(struct seg *, int);
399 384 static void segvn_inval_trcache(vnode_t *);
400 385 static void segvn_trasync_thread(void);
401 386 static void segvn_trupdate_wakeup(void *);
402 387 static void segvn_trupdate(void);
403 388 static void segvn_trupdate_seg(struct seg *, segvn_data_t *, svntr_t *,
404 389 ulong_t);
405 390
406 391 /*
407 392 * Initialize segvn data structures
408 393 */
409 394 void
410 395 segvn_init(void)
411 396 {
412 397 uint_t maxszc;
413 398 uint_t szc;
414 399 size_t pgsz;
415 400
416 401 segvn_cache = kmem_cache_create("segvn_cache",
417 402 sizeof (struct segvn_data), 0,
418 403 segvn_cache_constructor, segvn_cache_destructor, NULL,
419 404 NULL, NULL, 0);
420 405
421 406 if (segvn_lpg_disable == 0) {
422 407 szc = maxszc = page_num_pagesizes() - 1;
423 408 if (szc == 0) {
424 409 segvn_lpg_disable = 1;
425 410 }
426 411 if (page_get_pagesize(0) != PAGESIZE) {
427 412 panic("segvn_init: bad szc 0");
428 413 /*NOTREACHED*/
429 414 }
430 415 while (szc != 0) {
431 416 pgsz = page_get_pagesize(szc);
432 417 if (pgsz <= PAGESIZE || !IS_P2ALIGNED(pgsz, pgsz)) {
433 418 panic("segvn_init: bad szc %d", szc);
434 419 /*NOTREACHED*/
435 420 }
436 421 szc--;
437 422 }
438 423 if (segvn_maxpgszc == 0 || segvn_maxpgszc > maxszc)
439 424 segvn_maxpgszc = maxszc;
440 425 }
441 426
442 427 if (segvn_maxpgszc) {
443 428 segvn_szc_cache = (struct kmem_cache **)kmem_alloc(
444 429 (segvn_maxpgszc + 1) * sizeof (struct kmem_cache *),
445 430 KM_SLEEP);
446 431 }
447 432
448 433 for (szc = 1; szc <= segvn_maxpgszc; szc++) {
449 434 char str[32];
450 435
451 436 (void) sprintf(str, "segvn_szc_cache%d", szc);
452 437 segvn_szc_cache[szc] = kmem_cache_create(str,
453 438 page_get_pagecnt(szc) * sizeof (page_t *), 0,
454 439 NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
455 440 }
456 441
457 442
458 443 if (segvn_use_regions && !hat_supported(HAT_SHARED_REGIONS, NULL))
459 444 segvn_use_regions = 0;
460 445
461 446 /*
462 447 * For now shared regions and text replication segvn support
463 448 * are mutually exclusive. This is acceptable because
464 449 * currently significant benefit from text replication was
465 450 * only observed on AMD64 NUMA platforms (due to relatively
466 451 * small L2$ size) and currently we don't support shared
467 452 * regions on x86.
468 453 */
469 454 if (segvn_use_regions && !segvn_disable_textrepl) {
470 455 segvn_disable_textrepl = 1;
471 456 }
472 457
473 458 #if defined(_LP64)
474 459 if (lgrp_optimizations() && textrepl_size_thresh != (size_t)-1 &&
475 460 !segvn_disable_textrepl) {
476 461 ulong_t i;
477 462 size_t hsz = svntr_hashtab_sz * sizeof (svntr_bucket_t);
478 463
479 464 svntr_cache = kmem_cache_create("svntr_cache",
480 465 sizeof (svntr_t), 0, svntr_cache_constructor, NULL,
481 466 NULL, NULL, NULL, 0);
482 467 svntr_hashtab = kmem_zalloc(hsz, KM_SLEEP);
483 468 for (i = 0; i < svntr_hashtab_sz; i++) {
484 469 mutex_init(&svntr_hashtab[i].tr_lock, NULL,
485 470 MUTEX_DEFAULT, NULL);
486 471 }
487 472 segvn_textrepl_max_bytes = ptob(physmem) /
488 473 segvn_textrepl_max_bytes_factor;
489 474 segvn_textrepl_stats = kmem_zalloc(NCPU *
490 475 sizeof (svntr_stats_t), KM_SLEEP);
491 476 sema_init(&segvn_trasync_sem, 0, NULL, SEMA_DEFAULT, NULL);
492 477 (void) thread_create(NULL, 0, segvn_trasync_thread,
493 478 NULL, 0, &p0, TS_RUN, minclsyspri);
494 479 }
495 480 #endif
496 481
497 482 if (!ISP2(segvn_pglock_comb_balign) ||
498 483 segvn_pglock_comb_balign < PAGESIZE) {
499 484 segvn_pglock_comb_balign = 1UL << 16; /* 64K */
500 485 }
501 486 segvn_pglock_comb_bshift = highbit(segvn_pglock_comb_balign) - 1;
502 487 segvn_pglock_comb_palign = btop(segvn_pglock_comb_balign);
503 488 }
504 489
505 490 #define SEGVN_PAGEIO ((void *)0x1)
506 491 #define SEGVN_NOPAGEIO ((void *)0x2)
507 492
508 493 static void
509 494 segvn_setvnode_mpss(vnode_t *vp)
510 495 {
511 496 int err;
512 497
513 498 ASSERT(vp->v_mpssdata == NULL ||
514 499 vp->v_mpssdata == SEGVN_PAGEIO ||
515 500 vp->v_mpssdata == SEGVN_NOPAGEIO);
516 501
517 502 if (vp->v_mpssdata == NULL) {
518 503 if (vn_vmpss_usepageio(vp)) {
519 504 err = VOP_PAGEIO(vp, (page_t *)NULL,
520 505 (u_offset_t)0, 0, 0, CRED(), NULL);
521 506 } else {
522 507 err = ENOSYS;
523 508 }
524 509 /*
525 510 * set v_mpssdata just once per vnode life
526 511 * so that it never changes.
527 512 */
528 513 mutex_enter(&vp->v_lock);
529 514 if (vp->v_mpssdata == NULL) {
530 515 if (err == EINVAL) {
531 516 vp->v_mpssdata = SEGVN_PAGEIO;
532 517 } else {
533 518 vp->v_mpssdata = SEGVN_NOPAGEIO;
534 519 }
535 520 }
536 521 mutex_exit(&vp->v_lock);
537 522 }
538 523 }
539 524
540 525 int
541 526 segvn_create(struct seg *seg, void *argsp)
542 527 {
543 528 struct segvn_crargs *a = (struct segvn_crargs *)argsp;
544 529 struct segvn_data *svd;
545 530 size_t swresv = 0;
546 531 struct cred *cred;
547 532 struct anon_map *amp;
548 533 int error = 0;
549 534 size_t pgsz;
550 535 lgrp_mem_policy_t mpolicy = LGRP_MEM_POLICY_DEFAULT;
551 536 int use_rgn = 0;
552 537 int trok = 0;
553 538
554 539 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
555 540
556 541 if (a->type != MAP_PRIVATE && a->type != MAP_SHARED) {
557 542 panic("segvn_create type");
558 543 /*NOTREACHED*/
559 544 }
560 545
561 546 /*
562 547 * Check arguments. If a shared anon structure is given then
563 548 * it is illegal to also specify a vp.
564 549 */
565 550 if (a->amp != NULL && a->vp != NULL) {
566 551 panic("segvn_create anon_map");
567 552 /*NOTREACHED*/
568 553 }
569 554
570 555 if (a->type == MAP_PRIVATE && (a->flags & MAP_TEXT) &&
571 556 a->vp != NULL && a->prot == (PROT_USER | PROT_READ | PROT_EXEC) &&
572 557 segvn_use_regions) {
573 558 use_rgn = 1;
574 559 }
575 560
576 561 /* MAP_NORESERVE on a MAP_SHARED segment is meaningless. */
577 562 if (a->type == MAP_SHARED)
578 563 a->flags &= ~MAP_NORESERVE;
579 564
580 565 if (a->szc != 0) {
581 566 if (segvn_lpg_disable != 0 || (a->szc == AS_MAP_NO_LPOOB) ||
582 567 (a->amp != NULL && a->type == MAP_PRIVATE) ||
583 568 (a->flags & MAP_NORESERVE) || seg->s_as == &kas) {
584 569 a->szc = 0;
585 570 } else {
586 571 if (a->szc > segvn_maxpgszc)
587 572 a->szc = segvn_maxpgszc;
588 573 pgsz = page_get_pagesize(a->szc);
589 574 if (!IS_P2ALIGNED(seg->s_base, pgsz) ||
590 575 !IS_P2ALIGNED(seg->s_size, pgsz)) {
591 576 a->szc = 0;
592 577 } else if (a->vp != NULL) {
593 578 if (IS_SWAPFSVP(a->vp) || VN_ISKAS(a->vp)) {
594 579 /*
595 580 * paranoid check.
596 581 * hat_page_demote() is not supported
597 582 * on swapfs pages.
598 583 */
599 584 a->szc = 0;
600 585 } else if (map_addr_vacalign_check(seg->s_base,
601 586 a->offset & PAGEMASK)) {
602 587 a->szc = 0;
603 588 }
604 589 } else if (a->amp != NULL) {
605 590 pgcnt_t anum = btopr(a->offset);
606 591 pgcnt_t pgcnt = page_get_pagecnt(a->szc);
607 592 if (!IS_P2ALIGNED(anum, pgcnt)) {
608 593 a->szc = 0;
609 594 }
610 595 }
611 596 }
612 597 }
613 598
614 599 /*
615 600 * If segment may need private pages, reserve them now.
616 601 */
617 602 if (!(a->flags & MAP_NORESERVE) && ((a->vp == NULL && a->amp == NULL) ||
618 603 (a->type == MAP_PRIVATE && (a->prot & PROT_WRITE)))) {
619 604 if (anon_resv_zone(seg->s_size,
620 605 seg->s_as->a_proc->p_zone) == 0)
621 606 return (EAGAIN);
622 607 swresv = seg->s_size;
623 608 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
624 609 seg, swresv, 1);
625 610 }
626 611
627 612 /*
628 613 * Reserve any mapping structures that may be required.
629 614 *
630 615 * Don't do it for segments that may use regions. It's currently a
631 616 * noop in the hat implementations anyway.
632 617 */
633 618 if (!use_rgn) {
634 619 hat_map(seg->s_as->a_hat, seg->s_base, seg->s_size, HAT_MAP);
635 620 }
636 621
637 622 if (a->cred) {
638 623 cred = a->cred;
639 624 crhold(cred);
640 625 } else {
641 626 crhold(cred = CRED());
642 627 }
643 628
644 629 /* Inform the vnode of the new mapping */
645 630 if (a->vp != NULL) {
646 631 error = VOP_ADDMAP(a->vp, a->offset & PAGEMASK,
647 632 seg->s_as, seg->s_base, seg->s_size, a->prot,
648 633 a->maxprot, a->type, cred, NULL);
649 634 if (error) {
650 635 if (swresv != 0) {
651 636 anon_unresv_zone(swresv,
652 637 seg->s_as->a_proc->p_zone);
653 638 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
654 639 "anon proc:%p %lu %u", seg, swresv, 0);
655 640 }
656 641 crfree(cred);
657 642 if (!use_rgn) {
658 643 hat_unload(seg->s_as->a_hat, seg->s_base,
659 644 seg->s_size, HAT_UNLOAD_UNMAP);
660 645 }
661 646 return (error);
662 647 }
663 648 /*
664 649 * svntr_hashtab will be NULL if we support shared regions.
665 650 */
666 651 trok = ((a->flags & MAP_TEXT) &&
667 652 (seg->s_size > textrepl_size_thresh ||
668 653 (a->flags & _MAP_TEXTREPL)) &&
669 654 lgrp_optimizations() && svntr_hashtab != NULL &&
670 655 a->type == MAP_PRIVATE && swresv == 0 &&
671 656 !(a->flags & MAP_NORESERVE) &&
672 657 seg->s_as != &kas && a->vp->v_type == VREG);
673 658
674 659 ASSERT(!trok || !use_rgn);
675 660 }
676 661
677 662 /*
678 663 * MAP_NORESERVE mappings don't count towards the VSZ of a process
679 664 * until we fault the pages in.
680 665 */
681 666 if ((a->vp == NULL || a->vp->v_type != VREG) &&
682 667 a->flags & MAP_NORESERVE) {
683 668 seg->s_as->a_resvsize -= seg->s_size;
684 669 }
685 670
686 671 /*
687 672 * If more than one segment in the address space, and they're adjacent
688 673 * virtually, try to concatenate them. Don't concatenate if an
689 674 * explicit anon_map structure was supplied (e.g., SystemV shared
690 675 * memory) or if we'll use text replication for this segment.
691 676 */
692 677 if (a->amp == NULL && !use_rgn && !trok) {
693 678 struct seg *pseg, *nseg;
694 679 struct segvn_data *psvd, *nsvd;
695 680 lgrp_mem_policy_t ppolicy, npolicy;
696 681 uint_t lgrp_mem_policy_flags = 0;
697 682 extern lgrp_mem_policy_t lgrp_mem_default_policy;
698 683
699 684 /*
700 685 * Memory policy flags (lgrp_mem_policy_flags) is valid when
701 686 * extending stack/heap segments.
702 687 */
703 688 if ((a->vp == NULL) && (a->type == MAP_PRIVATE) &&
704 689 !(a->flags & MAP_NORESERVE) && (seg->s_as != &kas)) {
705 690 lgrp_mem_policy_flags = a->lgrp_mem_policy_flags;
706 691 } else {
707 692 /*
708 693 * Get policy when not extending it from another segment
709 694 */
710 695 mpolicy = lgrp_mem_policy_default(seg->s_size, a->type);
711 696 }
712 697
713 698 /*
714 699 * First, try to concatenate the previous and new segments
715 700 */
716 701 pseg = AS_SEGPREV(seg->s_as, seg);
717 702 if (pseg != NULL &&
718 703 pseg->s_base + pseg->s_size == seg->s_base &&
719 704 pseg->s_ops == &segvn_ops) {
720 705 /*
721 706 * Get memory allocation policy from previous segment.
722 707 * When extension is specified (e.g. for heap) apply
723 708 * this policy to the new segment regardless of the
724 709 * outcome of segment concatenation. Extension occurs
725 710 * for non-default policy otherwise default policy is
726 711 * used and is based on extended segment size.
727 712 */
728 713 psvd = (struct segvn_data *)pseg->s_data;
729 714 ppolicy = psvd->policy_info.mem_policy;
730 715 if (lgrp_mem_policy_flags ==
731 716 LGRP_MP_FLAG_EXTEND_UP) {
732 717 if (ppolicy != lgrp_mem_default_policy) {
733 718 mpolicy = ppolicy;
734 719 } else {
735 720 mpolicy = lgrp_mem_policy_default(
736 721 pseg->s_size + seg->s_size,
737 722 a->type);
738 723 }
739 724 }
740 725
741 726 if (mpolicy == ppolicy &&
742 727 (pseg->s_size + seg->s_size <=
743 728 segvn_comb_thrshld || psvd->amp == NULL) &&
744 729 segvn_extend_prev(pseg, seg, a, swresv) == 0) {
745 730 /*
746 731 * success! now try to concatenate
747 732 * with following seg
748 733 */
749 734 crfree(cred);
750 735 nseg = AS_SEGNEXT(pseg->s_as, pseg);
751 736 if (nseg != NULL &&
752 737 nseg != pseg &&
753 738 nseg->s_ops == &segvn_ops &&
754 739 pseg->s_base + pseg->s_size ==
755 740 nseg->s_base)
756 741 (void) segvn_concat(pseg, nseg, 0);
757 742 ASSERT(pseg->s_szc == 0 ||
758 743 (a->szc == pseg->s_szc &&
759 744 IS_P2ALIGNED(pseg->s_base, pgsz) &&
760 745 IS_P2ALIGNED(pseg->s_size, pgsz)));
761 746 return (0);
762 747 }
763 748 }
764 749
765 750 /*
766 751 * Failed, so try to concatenate with following seg
767 752 */
768 753 nseg = AS_SEGNEXT(seg->s_as, seg);
769 754 if (nseg != NULL &&
770 755 seg->s_base + seg->s_size == nseg->s_base &&
771 756 nseg->s_ops == &segvn_ops) {
772 757 /*
773 758 * Get memory allocation policy from next segment.
774 759 * When extension is specified (e.g. for stack) apply
775 760 * this policy to the new segment regardless of the
776 761 * outcome of segment concatenation. Extension occurs
777 762 * for non-default policy otherwise default policy is
778 763 * used and is based on extended segment size.
779 764 */
780 765 nsvd = (struct segvn_data *)nseg->s_data;
781 766 npolicy = nsvd->policy_info.mem_policy;
782 767 if (lgrp_mem_policy_flags ==
783 768 LGRP_MP_FLAG_EXTEND_DOWN) {
784 769 if (npolicy != lgrp_mem_default_policy) {
785 770 mpolicy = npolicy;
786 771 } else {
787 772 mpolicy = lgrp_mem_policy_default(
788 773 nseg->s_size + seg->s_size,
789 774 a->type);
790 775 }
791 776 }
792 777
793 778 if (mpolicy == npolicy &&
794 779 segvn_extend_next(seg, nseg, a, swresv) == 0) {
795 780 crfree(cred);
796 781 ASSERT(nseg->s_szc == 0 ||
797 782 (a->szc == nseg->s_szc &&
798 783 IS_P2ALIGNED(nseg->s_base, pgsz) &&
799 784 IS_P2ALIGNED(nseg->s_size, pgsz)));
800 785 return (0);
801 786 }
802 787 }
803 788 }
804 789
805 790 if (a->vp != NULL) {
806 791 VN_HOLD(a->vp);
807 792 if (a->type == MAP_SHARED)
808 793 lgrp_shm_policy_init(NULL, a->vp);
809 794 }
810 795 svd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
811 796
812 797 seg->s_ops = &segvn_ops;
813 798 seg->s_data = (void *)svd;
814 799 seg->s_szc = a->szc;
815 800
816 801 svd->seg = seg;
817 802 svd->vp = a->vp;
818 803 /*
819 804 * Anonymous mappings have no backing file so the offset is meaningless.
820 805 */
821 806 svd->offset = a->vp ? (a->offset & PAGEMASK) : 0;
822 807 svd->prot = a->prot;
823 808 svd->maxprot = a->maxprot;
824 809 svd->pageprot = 0;
825 810 svd->type = a->type;
826 811 svd->vpage = NULL;
827 812 svd->cred = cred;
828 813 svd->advice = MADV_NORMAL;
829 814 svd->pageadvice = 0;
830 815 svd->flags = (ushort_t)a->flags;
831 816 svd->softlockcnt = 0;
832 817 svd->softlockcnt_sbase = 0;
833 818 svd->softlockcnt_send = 0;
834 819 svd->svn_inz = 0;
835 820 svd->rcookie = HAT_INVALID_REGION_COOKIE;
836 821 svd->pageswap = 0;
837 822
838 823 if (a->szc != 0 && a->vp != NULL) {
839 824 segvn_setvnode_mpss(a->vp);
840 825 }
841 826 if (svd->type == MAP_SHARED && svd->vp != NULL &&
842 827 (svd->vp->v_flag & VVMEXEC) && (svd->prot & PROT_WRITE)) {
843 828 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
844 829 segvn_inval_trcache(svd->vp);
845 830 }
846 831
847 832 amp = a->amp;
848 833 if ((svd->amp = amp) == NULL) {
849 834 svd->anon_index = 0;
850 835 if (svd->type == MAP_SHARED) {
851 836 svd->swresv = 0;
852 837 /*
853 838 * Shared mappings to a vp need no other setup.
854 839 * If we have a shared mapping to an anon_map object
855 840 * which hasn't been allocated yet, allocate the
856 841 * struct now so that it will be properly shared
857 842 * by remembering the swap reservation there.
858 843 */
859 844 if (a->vp == NULL) {
860 845 svd->amp = anonmap_alloc(seg->s_size, swresv,
861 846 ANON_SLEEP);
862 847 svd->amp->a_szc = seg->s_szc;
863 848 }
864 849 } else {
865 850 /*
866 851 * Private mapping (with or without a vp).
867 852 * Allocate anon_map when needed.
868 853 */
869 854 svd->swresv = swresv;
870 855 }
871 856 } else {
872 857 pgcnt_t anon_num;
873 858
874 859 /*
875 860 * Mapping to an existing anon_map structure without a vp.
876 861 * For now we will insure that the segment size isn't larger
877 862 * than the size - offset gives us. Later on we may wish to
878 863 * have the anon array dynamically allocated itself so that
879 864 * we don't always have to allocate all the anon pointer slots.
880 865 * This of course involves adding extra code to check that we
881 866 * aren't trying to use an anon pointer slot beyond the end
882 867 * of the currently allocated anon array.
883 868 */
884 869 if ((amp->size - a->offset) < seg->s_size) {
885 870 panic("segvn_create anon_map size");
886 871 /*NOTREACHED*/
887 872 }
888 873
889 874 anon_num = btopr(a->offset);
890 875
891 876 if (a->type == MAP_SHARED) {
892 877 /*
893 878 * SHARED mapping to a given anon_map.
894 879 */
895 880 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
896 881 amp->refcnt++;
897 882 if (a->szc > amp->a_szc) {
898 883 amp->a_szc = a->szc;
899 884 }
900 885 ANON_LOCK_EXIT(&->a_rwlock);
901 886 svd->anon_index = anon_num;
902 887 svd->swresv = 0;
903 888 } else {
904 889 /*
905 890 * PRIVATE mapping to a given anon_map.
906 891 * Make sure that all the needed anon
907 892 * structures are created (so that we will
908 893 * share the underlying pages if nothing
909 894 * is written by this mapping) and then
910 895 * duplicate the anon array as is done
911 896 * when a privately mapped segment is dup'ed.
912 897 */
913 898 struct anon *ap;
914 899 caddr_t addr;
915 900 caddr_t eaddr;
916 901 ulong_t anon_idx;
917 902 int hat_flag = HAT_LOAD;
918 903
919 904 if (svd->flags & MAP_TEXT) {
920 905 hat_flag |= HAT_LOAD_TEXT;
921 906 }
922 907
923 908 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
924 909 svd->amp->a_szc = seg->s_szc;
925 910 svd->anon_index = 0;
926 911 svd->swresv = swresv;
927 912
928 913 /*
929 914 * Prevent 2 threads from allocating anon
930 915 * slots simultaneously.
931 916 */
932 917 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
933 918 eaddr = seg->s_base + seg->s_size;
934 919
935 920 for (anon_idx = anon_num, addr = seg->s_base;
936 921 addr < eaddr; addr += PAGESIZE, anon_idx++) {
937 922 page_t *pp;
938 923
939 924 if ((ap = anon_get_ptr(amp->ahp,
940 925 anon_idx)) != NULL)
941 926 continue;
942 927
943 928 /*
944 929 * Allocate the anon struct now.
945 930 * Might as well load up translation
946 931 * to the page while we're at it...
947 932 */
948 933 pp = anon_zero(seg, addr, &ap, cred);
949 934 if (ap == NULL || pp == NULL) {
950 935 panic("segvn_create anon_zero");
951 936 /*NOTREACHED*/
952 937 }
953 938
954 939 /*
955 940 * Re-acquire the anon_map lock and
956 941 * initialize the anon array entry.
957 942 */
958 943 ASSERT(anon_get_ptr(amp->ahp,
959 944 anon_idx) == NULL);
960 945 (void) anon_set_ptr(amp->ahp, anon_idx, ap,
961 946 ANON_SLEEP);
962 947
963 948 ASSERT(seg->s_szc == 0);
964 949 ASSERT(!IS_VMODSORT(pp->p_vnode));
965 950
966 951 ASSERT(use_rgn == 0);
967 952 hat_memload(seg->s_as->a_hat, addr, pp,
968 953 svd->prot & ~PROT_WRITE, hat_flag);
969 954
970 955 page_unlock(pp);
971 956 }
972 957 ASSERT(seg->s_szc == 0);
973 958 anon_dup(amp->ahp, anon_num, svd->amp->ahp,
974 959 0, seg->s_size);
975 960 ANON_LOCK_EXIT(&->a_rwlock);
976 961 }
977 962 }
978 963
979 964 /*
980 965 * Set default memory allocation policy for segment
981 966 *
982 967 * Always set policy for private memory at least for initialization
983 968 * even if this is a shared memory segment
984 969 */
985 970 (void) lgrp_privm_policy_set(mpolicy, &svd->policy_info, seg->s_size);
986 971
987 972 if (svd->type == MAP_SHARED)
988 973 (void) lgrp_shm_policy_set(mpolicy, svd->amp, svd->anon_index,
989 974 svd->vp, svd->offset, seg->s_size);
990 975
991 976 if (use_rgn) {
992 977 ASSERT(!trok);
993 978 ASSERT(svd->amp == NULL);
994 979 svd->rcookie = hat_join_region(seg->s_as->a_hat, seg->s_base,
995 980 seg->s_size, (void *)svd->vp, svd->offset, svd->prot,
996 981 (uchar_t)seg->s_szc, segvn_hat_rgn_unload_callback,
997 982 HAT_REGION_TEXT);
998 983 }
999 984
1000 985 ASSERT(!trok || !(svd->prot & PROT_WRITE));
1001 986 svd->tr_state = trok ? SEGVN_TR_INIT : SEGVN_TR_OFF;
1002 987
1003 988 return (0);
1004 989 }
1005 990
1006 991 /*
1007 992 * Concatenate two existing segments, if possible.
1008 993 * Return 0 on success, -1 if two segments are not compatible
1009 994 * or -2 on memory allocation failure.
1010 995 * If amp_cat == 1 then try and concat segments with anon maps
1011 996 */
1012 997 static int
1013 998 segvn_concat(struct seg *seg1, struct seg *seg2, int amp_cat)
1014 999 {
1015 1000 struct segvn_data *svd1 = seg1->s_data;
1016 1001 struct segvn_data *svd2 = seg2->s_data;
1017 1002 struct anon_map *amp1 = svd1->amp;
1018 1003 struct anon_map *amp2 = svd2->amp;
1019 1004 struct vpage *vpage1 = svd1->vpage;
1020 1005 struct vpage *vpage2 = svd2->vpage, *nvpage = NULL;
1021 1006 size_t size, nvpsize;
1022 1007 pgcnt_t npages1, npages2;
1023 1008
1024 1009 ASSERT(seg1->s_as && seg2->s_as && seg1->s_as == seg2->s_as);
1025 1010 ASSERT(AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1026 1011 ASSERT(seg1->s_ops == seg2->s_ops);
1027 1012
1028 1013 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie) ||
1029 1014 HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1030 1015 return (-1);
1031 1016 }
1032 1017
1033 1018 /* both segments exist, try to merge them */
1034 1019 #define incompat(x) (svd1->x != svd2->x)
1035 1020 if (incompat(vp) || incompat(maxprot) ||
1036 1021 (!svd1->pageadvice && !svd2->pageadvice && incompat(advice)) ||
1037 1022 (!svd1->pageprot && !svd2->pageprot && incompat(prot)) ||
1038 1023 incompat(type) || incompat(cred) || incompat(flags) ||
1039 1024 seg1->s_szc != seg2->s_szc || incompat(policy_info.mem_policy) ||
1040 1025 (svd2->softlockcnt > 0) || svd1->softlockcnt_send > 0)
1041 1026 return (-1);
1042 1027 #undef incompat
1043 1028
1044 1029 /*
1045 1030 * vp == NULL implies zfod, offset doesn't matter
1046 1031 */
1047 1032 if (svd1->vp != NULL &&
1048 1033 svd1->offset + seg1->s_size != svd2->offset) {
1049 1034 return (-1);
1050 1035 }
1051 1036
1052 1037 /*
1053 1038 * Don't concatenate if either segment uses text replication.
1054 1039 */
1055 1040 if (svd1->tr_state != SEGVN_TR_OFF || svd2->tr_state != SEGVN_TR_OFF) {
1056 1041 return (-1);
1057 1042 }
1058 1043
1059 1044 /*
1060 1045 * Fail early if we're not supposed to concatenate
1061 1046 * segments with non NULL amp.
1062 1047 */
1063 1048 if (amp_cat == 0 && (amp1 != NULL || amp2 != NULL)) {
1064 1049 return (-1);
1065 1050 }
1066 1051
1067 1052 if (svd1->vp == NULL && svd1->type == MAP_SHARED) {
1068 1053 if (amp1 != amp2) {
1069 1054 return (-1);
1070 1055 }
1071 1056 if (amp1 != NULL && svd1->anon_index + btop(seg1->s_size) !=
1072 1057 svd2->anon_index) {
1073 1058 return (-1);
1074 1059 }
1075 1060 ASSERT(amp1 == NULL || amp1->refcnt >= 2);
1076 1061 }
1077 1062
1078 1063 /*
1079 1064 * If either seg has vpages, create a new merged vpage array.
1080 1065 */
1081 1066 if (vpage1 != NULL || vpage2 != NULL) {
1082 1067 struct vpage *vp, *evp;
1083 1068
1084 1069 npages1 = seg_pages(seg1);
1085 1070 npages2 = seg_pages(seg2);
1086 1071 nvpsize = vpgtob(npages1 + npages2);
1087 1072
1088 1073 if ((nvpage = kmem_zalloc(nvpsize, KM_NOSLEEP)) == NULL) {
1089 1074 return (-2);
1090 1075 }
1091 1076
1092 1077 if (vpage1 != NULL) {
1093 1078 bcopy(vpage1, nvpage, vpgtob(npages1));
1094 1079 } else {
1095 1080 evp = nvpage + npages1;
1096 1081 for (vp = nvpage; vp < evp; vp++) {
1097 1082 VPP_SETPROT(vp, svd1->prot);
1098 1083 VPP_SETADVICE(vp, svd1->advice);
1099 1084 }
1100 1085 }
1101 1086
1102 1087 if (vpage2 != NULL) {
1103 1088 bcopy(vpage2, nvpage + npages1, vpgtob(npages2));
1104 1089 } else {
1105 1090 evp = nvpage + npages1 + npages2;
1106 1091 for (vp = nvpage + npages1; vp < evp; vp++) {
1107 1092 VPP_SETPROT(vp, svd2->prot);
1108 1093 VPP_SETADVICE(vp, svd2->advice);
1109 1094 }
1110 1095 }
1111 1096
1112 1097 if (svd2->pageswap && (!svd1->pageswap && svd1->swresv)) {
1113 1098 ASSERT(svd1->swresv == seg1->s_size);
1114 1099 ASSERT(!(svd1->flags & MAP_NORESERVE));
1115 1100 ASSERT(!(svd2->flags & MAP_NORESERVE));
1116 1101 evp = nvpage + npages1;
1117 1102 for (vp = nvpage; vp < evp; vp++) {
1118 1103 VPP_SETSWAPRES(vp);
1119 1104 }
1120 1105 }
1121 1106
1122 1107 if (svd1->pageswap && (!svd2->pageswap && svd2->swresv)) {
1123 1108 ASSERT(svd2->swresv == seg2->s_size);
1124 1109 ASSERT(!(svd1->flags & MAP_NORESERVE));
1125 1110 ASSERT(!(svd2->flags & MAP_NORESERVE));
1126 1111 vp = nvpage + npages1;
1127 1112 evp = vp + npages2;
1128 1113 for (; vp < evp; vp++) {
1129 1114 VPP_SETSWAPRES(vp);
1130 1115 }
1131 1116 }
1132 1117 }
1133 1118 ASSERT((vpage1 != NULL || vpage2 != NULL) ||
1134 1119 (svd1->pageswap == 0 && svd2->pageswap == 0));
1135 1120
1136 1121 /*
1137 1122 * If either segment has private pages, create a new merged anon
1138 1123 * array. If mergeing shared anon segments just decrement anon map's
1139 1124 * refcnt.
1140 1125 */
1141 1126 if (amp1 != NULL && svd1->type == MAP_SHARED) {
1142 1127 ASSERT(amp1 == amp2 && svd1->vp == NULL);
1143 1128 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1144 1129 ASSERT(amp1->refcnt >= 2);
1145 1130 amp1->refcnt--;
1146 1131 ANON_LOCK_EXIT(&1->a_rwlock);
1147 1132 svd2->amp = NULL;
1148 1133 } else if (amp1 != NULL || amp2 != NULL) {
1149 1134 struct anon_hdr *nahp;
1150 1135 struct anon_map *namp = NULL;
1151 1136 size_t asize;
1152 1137
1153 1138 ASSERT(svd1->type == MAP_PRIVATE);
1154 1139
1155 1140 asize = seg1->s_size + seg2->s_size;
1156 1141 if ((nahp = anon_create(btop(asize), ANON_NOSLEEP)) == NULL) {
1157 1142 if (nvpage != NULL) {
1158 1143 kmem_free(nvpage, nvpsize);
1159 1144 }
1160 1145 return (-2);
1161 1146 }
1162 1147 if (amp1 != NULL) {
1163 1148 /*
1164 1149 * XXX anon rwlock is not really needed because
1165 1150 * this is a private segment and we are writers.
1166 1151 */
1167 1152 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1168 1153 ASSERT(amp1->refcnt == 1);
1169 1154 if (anon_copy_ptr(amp1->ahp, svd1->anon_index,
1170 1155 nahp, 0, btop(seg1->s_size), ANON_NOSLEEP)) {
1171 1156 anon_release(nahp, btop(asize));
1172 1157 ANON_LOCK_EXIT(&1->a_rwlock);
1173 1158 if (nvpage != NULL) {
1174 1159 kmem_free(nvpage, nvpsize);
1175 1160 }
1176 1161 return (-2);
1177 1162 }
1178 1163 }
1179 1164 if (amp2 != NULL) {
1180 1165 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1181 1166 ASSERT(amp2->refcnt == 1);
1182 1167 if (anon_copy_ptr(amp2->ahp, svd2->anon_index,
1183 1168 nahp, btop(seg1->s_size), btop(seg2->s_size),
1184 1169 ANON_NOSLEEP)) {
1185 1170 anon_release(nahp, btop(asize));
1186 1171 ANON_LOCK_EXIT(&2->a_rwlock);
1187 1172 if (amp1 != NULL) {
1188 1173 ANON_LOCK_EXIT(&1->a_rwlock);
1189 1174 }
1190 1175 if (nvpage != NULL) {
1191 1176 kmem_free(nvpage, nvpsize);
1192 1177 }
1193 1178 return (-2);
1194 1179 }
1195 1180 }
1196 1181 if (amp1 != NULL) {
1197 1182 namp = amp1;
1198 1183 anon_release(amp1->ahp, btop(amp1->size));
1199 1184 }
1200 1185 if (amp2 != NULL) {
1201 1186 if (namp == NULL) {
1202 1187 ASSERT(amp1 == NULL);
1203 1188 namp = amp2;
1204 1189 anon_release(amp2->ahp, btop(amp2->size));
1205 1190 } else {
1206 1191 amp2->refcnt--;
1207 1192 ANON_LOCK_EXIT(&2->a_rwlock);
1208 1193 anonmap_free(amp2);
1209 1194 }
1210 1195 svd2->amp = NULL; /* needed for seg_free */
1211 1196 }
1212 1197 namp->ahp = nahp;
1213 1198 namp->size = asize;
1214 1199 svd1->amp = namp;
1215 1200 svd1->anon_index = 0;
1216 1201 ANON_LOCK_EXIT(&namp->a_rwlock);
1217 1202 }
1218 1203 /*
1219 1204 * Now free the old vpage structures.
1220 1205 */
1221 1206 if (nvpage != NULL) {
1222 1207 if (vpage1 != NULL) {
1223 1208 kmem_free(vpage1, vpgtob(npages1));
1224 1209 }
1225 1210 if (vpage2 != NULL) {
1226 1211 svd2->vpage = NULL;
1227 1212 kmem_free(vpage2, vpgtob(npages2));
1228 1213 }
1229 1214 if (svd2->pageprot) {
1230 1215 svd1->pageprot = 1;
1231 1216 }
1232 1217 if (svd2->pageadvice) {
1233 1218 svd1->pageadvice = 1;
1234 1219 }
1235 1220 if (svd2->pageswap) {
1236 1221 svd1->pageswap = 1;
1237 1222 }
1238 1223 svd1->vpage = nvpage;
1239 1224 }
1240 1225
1241 1226 /* all looks ok, merge segments */
1242 1227 svd1->swresv += svd2->swresv;
1243 1228 svd2->swresv = 0; /* so seg_free doesn't release swap space */
1244 1229 size = seg2->s_size;
1245 1230 seg_free(seg2);
1246 1231 seg1->s_size += size;
1247 1232 return (0);
1248 1233 }
1249 1234
1250 1235 /*
1251 1236 * Extend the previous segment (seg1) to include the
1252 1237 * new segment (seg2 + a), if possible.
1253 1238 * Return 0 on success.
1254 1239 */
1255 1240 static int
1256 1241 segvn_extend_prev(seg1, seg2, a, swresv)
1257 1242 struct seg *seg1, *seg2;
1258 1243 struct segvn_crargs *a;
1259 1244 size_t swresv;
1260 1245 {
1261 1246 struct segvn_data *svd1 = (struct segvn_data *)seg1->s_data;
1262 1247 size_t size;
1263 1248 struct anon_map *amp1;
1264 1249 struct vpage *new_vpage;
1265 1250
1266 1251 /*
1267 1252 * We don't need any segment level locks for "segvn" data
1268 1253 * since the address space is "write" locked.
1269 1254 */
1270 1255 ASSERT(seg1->s_as && AS_WRITE_HELD(seg1->s_as, &seg1->s_as->a_lock));
1271 1256
1272 1257 if (HAT_IS_REGION_COOKIE_VALID(svd1->rcookie)) {
1273 1258 return (-1);
1274 1259 }
1275 1260
1276 1261 /* second segment is new, try to extend first */
1277 1262 /* XXX - should also check cred */
1278 1263 if (svd1->vp != a->vp || svd1->maxprot != a->maxprot ||
1279 1264 (!svd1->pageprot && (svd1->prot != a->prot)) ||
1280 1265 svd1->type != a->type || svd1->flags != a->flags ||
1281 1266 seg1->s_szc != a->szc || svd1->softlockcnt_send > 0)
1282 1267 return (-1);
1283 1268
1284 1269 /* vp == NULL implies zfod, offset doesn't matter */
1285 1270 if (svd1->vp != NULL &&
1286 1271 svd1->offset + seg1->s_size != (a->offset & PAGEMASK))
1287 1272 return (-1);
1288 1273
1289 1274 if (svd1->tr_state != SEGVN_TR_OFF) {
1290 1275 return (-1);
1291 1276 }
1292 1277
1293 1278 amp1 = svd1->amp;
1294 1279 if (amp1) {
1295 1280 pgcnt_t newpgs;
1296 1281
1297 1282 /*
1298 1283 * Segment has private pages, can data structures
1299 1284 * be expanded?
1300 1285 *
1301 1286 * Acquire the anon_map lock to prevent it from changing,
1302 1287 * if it is shared. This ensures that the anon_map
1303 1288 * will not change while a thread which has a read/write
1304 1289 * lock on an address space references it.
1305 1290 * XXX - Don't need the anon_map lock at all if "refcnt"
1306 1291 * is 1.
1307 1292 *
1308 1293 * Can't grow a MAP_SHARED segment with an anonmap because
1309 1294 * there may be existing anon slots where we want to extend
1310 1295 * the segment and we wouldn't know what to do with them
1311 1296 * (e.g., for tmpfs right thing is to just leave them there,
1312 1297 * for /dev/zero they should be cleared out).
1313 1298 */
1314 1299 if (svd1->type == MAP_SHARED)
1315 1300 return (-1);
1316 1301
1317 1302 ANON_LOCK_ENTER(&1->a_rwlock, RW_WRITER);
1318 1303 if (amp1->refcnt > 1) {
1319 1304 ANON_LOCK_EXIT(&1->a_rwlock);
1320 1305 return (-1);
1321 1306 }
1322 1307 newpgs = anon_grow(amp1->ahp, &svd1->anon_index,
1323 1308 btop(seg1->s_size), btop(seg2->s_size), ANON_NOSLEEP);
1324 1309
1325 1310 if (newpgs == 0) {
1326 1311 ANON_LOCK_EXIT(&1->a_rwlock);
1327 1312 return (-1);
1328 1313 }
1329 1314 amp1->size = ptob(newpgs);
1330 1315 ANON_LOCK_EXIT(&1->a_rwlock);
1331 1316 }
1332 1317 if (svd1->vpage != NULL) {
1333 1318 struct vpage *vp, *evp;
1334 1319 new_vpage =
1335 1320 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1336 1321 KM_NOSLEEP);
1337 1322 if (new_vpage == NULL)
1338 1323 return (-1);
1339 1324 bcopy(svd1->vpage, new_vpage, vpgtob(seg_pages(seg1)));
1340 1325 kmem_free(svd1->vpage, vpgtob(seg_pages(seg1)));
1341 1326 svd1->vpage = new_vpage;
1342 1327
1343 1328 vp = new_vpage + seg_pages(seg1);
1344 1329 evp = vp + seg_pages(seg2);
1345 1330 for (; vp < evp; vp++)
1346 1331 VPP_SETPROT(vp, a->prot);
1347 1332 if (svd1->pageswap && swresv) {
1348 1333 ASSERT(!(svd1->flags & MAP_NORESERVE));
1349 1334 ASSERT(swresv == seg2->s_size);
1350 1335 vp = new_vpage + seg_pages(seg1);
1351 1336 for (; vp < evp; vp++) {
1352 1337 VPP_SETSWAPRES(vp);
1353 1338 }
1354 1339 }
1355 1340 }
1356 1341 ASSERT(svd1->vpage != NULL || svd1->pageswap == 0);
1357 1342 size = seg2->s_size;
1358 1343 seg_free(seg2);
1359 1344 seg1->s_size += size;
1360 1345 svd1->swresv += swresv;
1361 1346 if (svd1->pageprot && (a->prot & PROT_WRITE) &&
1362 1347 svd1->type == MAP_SHARED && svd1->vp != NULL &&
1363 1348 (svd1->vp->v_flag & VVMEXEC)) {
1364 1349 ASSERT(vn_is_mapped(svd1->vp, V_WRITE));
1365 1350 segvn_inval_trcache(svd1->vp);
1366 1351 }
1367 1352 return (0);
1368 1353 }
1369 1354
1370 1355 /*
1371 1356 * Extend the next segment (seg2) to include the
1372 1357 * new segment (seg1 + a), if possible.
1373 1358 * Return 0 on success.
1374 1359 */
1375 1360 static int
1376 1361 segvn_extend_next(
1377 1362 struct seg *seg1,
1378 1363 struct seg *seg2,
1379 1364 struct segvn_crargs *a,
1380 1365 size_t swresv)
1381 1366 {
1382 1367 struct segvn_data *svd2 = (struct segvn_data *)seg2->s_data;
1383 1368 size_t size;
1384 1369 struct anon_map *amp2;
1385 1370 struct vpage *new_vpage;
1386 1371
1387 1372 /*
1388 1373 * We don't need any segment level locks for "segvn" data
1389 1374 * since the address space is "write" locked.
1390 1375 */
1391 1376 ASSERT(seg2->s_as && AS_WRITE_HELD(seg2->s_as, &seg2->s_as->a_lock));
1392 1377
1393 1378 if (HAT_IS_REGION_COOKIE_VALID(svd2->rcookie)) {
1394 1379 return (-1);
1395 1380 }
1396 1381
1397 1382 /* first segment is new, try to extend second */
1398 1383 /* XXX - should also check cred */
1399 1384 if (svd2->vp != a->vp || svd2->maxprot != a->maxprot ||
1400 1385 (!svd2->pageprot && (svd2->prot != a->prot)) ||
1401 1386 svd2->type != a->type || svd2->flags != a->flags ||
1402 1387 seg2->s_szc != a->szc || svd2->softlockcnt_sbase > 0)
1403 1388 return (-1);
1404 1389 /* vp == NULL implies zfod, offset doesn't matter */
1405 1390 if (svd2->vp != NULL &&
1406 1391 (a->offset & PAGEMASK) + seg1->s_size != svd2->offset)
1407 1392 return (-1);
1408 1393
1409 1394 if (svd2->tr_state != SEGVN_TR_OFF) {
1410 1395 return (-1);
1411 1396 }
1412 1397
1413 1398 amp2 = svd2->amp;
1414 1399 if (amp2) {
1415 1400 pgcnt_t newpgs;
1416 1401
1417 1402 /*
1418 1403 * Segment has private pages, can data structures
1419 1404 * be expanded?
1420 1405 *
1421 1406 * Acquire the anon_map lock to prevent it from changing,
1422 1407 * if it is shared. This ensures that the anon_map
1423 1408 * will not change while a thread which has a read/write
1424 1409 * lock on an address space references it.
1425 1410 *
1426 1411 * XXX - Don't need the anon_map lock at all if "refcnt"
1427 1412 * is 1.
1428 1413 */
1429 1414 if (svd2->type == MAP_SHARED)
1430 1415 return (-1);
1431 1416
1432 1417 ANON_LOCK_ENTER(&2->a_rwlock, RW_WRITER);
1433 1418 if (amp2->refcnt > 1) {
1434 1419 ANON_LOCK_EXIT(&2->a_rwlock);
1435 1420 return (-1);
1436 1421 }
1437 1422 newpgs = anon_grow(amp2->ahp, &svd2->anon_index,
1438 1423 btop(seg2->s_size), btop(seg1->s_size),
1439 1424 ANON_NOSLEEP | ANON_GROWDOWN);
1440 1425
1441 1426 if (newpgs == 0) {
1442 1427 ANON_LOCK_EXIT(&2->a_rwlock);
1443 1428 return (-1);
1444 1429 }
1445 1430 amp2->size = ptob(newpgs);
1446 1431 ANON_LOCK_EXIT(&2->a_rwlock);
1447 1432 }
1448 1433 if (svd2->vpage != NULL) {
1449 1434 struct vpage *vp, *evp;
1450 1435 new_vpage =
1451 1436 kmem_zalloc(vpgtob(seg_pages(seg1) + seg_pages(seg2)),
1452 1437 KM_NOSLEEP);
1453 1438 if (new_vpage == NULL) {
1454 1439 /* Not merging segments so adjust anon_index back */
1455 1440 if (amp2)
1456 1441 svd2->anon_index += seg_pages(seg1);
1457 1442 return (-1);
1458 1443 }
1459 1444 bcopy(svd2->vpage, new_vpage + seg_pages(seg1),
1460 1445 vpgtob(seg_pages(seg2)));
1461 1446 kmem_free(svd2->vpage, vpgtob(seg_pages(seg2)));
1462 1447 svd2->vpage = new_vpage;
1463 1448
1464 1449 vp = new_vpage;
1465 1450 evp = vp + seg_pages(seg1);
1466 1451 for (; vp < evp; vp++)
1467 1452 VPP_SETPROT(vp, a->prot);
1468 1453 if (svd2->pageswap && swresv) {
1469 1454 ASSERT(!(svd2->flags & MAP_NORESERVE));
1470 1455 ASSERT(swresv == seg1->s_size);
1471 1456 vp = new_vpage;
1472 1457 for (; vp < evp; vp++) {
1473 1458 VPP_SETSWAPRES(vp);
1474 1459 }
1475 1460 }
1476 1461 }
1477 1462 ASSERT(svd2->vpage != NULL || svd2->pageswap == 0);
1478 1463 size = seg1->s_size;
1479 1464 seg_free(seg1);
1480 1465 seg2->s_size += size;
1481 1466 seg2->s_base -= size;
1482 1467 svd2->offset -= size;
1483 1468 svd2->swresv += swresv;
1484 1469 if (svd2->pageprot && (a->prot & PROT_WRITE) &&
1485 1470 svd2->type == MAP_SHARED && svd2->vp != NULL &&
1486 1471 (svd2->vp->v_flag & VVMEXEC)) {
1487 1472 ASSERT(vn_is_mapped(svd2->vp, V_WRITE));
1488 1473 segvn_inval_trcache(svd2->vp);
1489 1474 }
1490 1475 return (0);
1491 1476 }
1492 1477
1493 1478 /*
1494 1479 * Duplicate all the pages in the segment. This may break COW sharing for a
1495 1480 * given page. If the page is marked with inherit zero set, then instead of
1496 1481 * duplicating the page, we zero the page.
1497 1482 */
1498 1483 static int
1499 1484 segvn_dup_pages(struct seg *seg, struct seg *newseg)
1500 1485 {
1501 1486 int error;
1502 1487 uint_t prot;
1503 1488 page_t *pp;
1504 1489 struct anon *ap, *newap;
1505 1490 size_t i;
1506 1491 caddr_t addr;
1507 1492
1508 1493 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1509 1494 struct segvn_data *newsvd = (struct segvn_data *)newseg->s_data;
1510 1495 ulong_t old_idx = svd->anon_index;
1511 1496 ulong_t new_idx = 0;
1512 1497
1513 1498 i = btopr(seg->s_size);
1514 1499 addr = seg->s_base;
1515 1500
1516 1501 /*
1517 1502 * XXX break cow sharing using PAGESIZE
1518 1503 * pages. They will be relocated into larger
1519 1504 * pages at fault time.
1520 1505 */
1521 1506 while (i-- > 0) {
1522 1507 if ((ap = anon_get_ptr(svd->amp->ahp, old_idx)) != NULL) {
1523 1508 struct vpage *vpp;
1524 1509
1525 1510 vpp = &svd->vpage[seg_page(seg, addr)];
1526 1511
1527 1512 /*
1528 1513 * prot need not be computed below 'cause anon_private
1529 1514 * is going to ignore it anyway as child doesn't inherit
1530 1515 * pagelock from parent.
1531 1516 */
1532 1517 prot = svd->pageprot ? VPP_PROT(vpp) : svd->prot;
1533 1518
1534 1519 /*
1535 1520 * Check whether we should zero this or dup it.
1536 1521 */
1537 1522 if (svd->svn_inz == SEGVN_INZ_ALL ||
1538 1523 (svd->svn_inz == SEGVN_INZ_VPP &&
1539 1524 VPP_ISINHZERO(vpp))) {
1540 1525 pp = anon_zero(newseg, addr, &newap,
1541 1526 newsvd->cred);
1542 1527 } else {
1543 1528 page_t *anon_pl[1+1];
1544 1529 uint_t vpprot;
1545 1530 error = anon_getpage(&ap, &vpprot, anon_pl,
1546 1531 PAGESIZE, seg, addr, S_READ, svd->cred);
1547 1532 if (error != 0)
1548 1533 return (error);
1549 1534
1550 1535 pp = anon_private(&newap, newseg, addr, prot,
1551 1536 anon_pl[0], 0, newsvd->cred);
1552 1537 }
1553 1538 if (pp == NULL) {
1554 1539 return (ENOMEM);
1555 1540 }
1556 1541 (void) anon_set_ptr(newsvd->amp->ahp, new_idx, newap,
1557 1542 ANON_SLEEP);
1558 1543 page_unlock(pp);
1559 1544 }
1560 1545 addr += PAGESIZE;
1561 1546 old_idx++;
1562 1547 new_idx++;
1563 1548 }
1564 1549
1565 1550 return (0);
1566 1551 }
1567 1552
1568 1553 static int
1569 1554 segvn_dup(struct seg *seg, struct seg *newseg)
1570 1555 {
1571 1556 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1572 1557 struct segvn_data *newsvd;
1573 1558 pgcnt_t npages = seg_pages(seg);
1574 1559 int error = 0;
1575 1560 size_t len;
1576 1561 struct anon_map *amp;
1577 1562
1578 1563 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1579 1564 ASSERT(newseg->s_as->a_proc->p_parent == curproc);
1580 1565
1581 1566 /*
1582 1567 * If segment has anon reserved, reserve more for the new seg.
1583 1568 * For a MAP_NORESERVE segment swresv will be a count of all the
1584 1569 * allocated anon slots; thus we reserve for the child as many slots
1585 1570 * as the parent has allocated. This semantic prevents the child or
1586 1571 * parent from dieing during a copy-on-write fault caused by trying
1587 1572 * to write a shared pre-existing anon page.
1588 1573 */
1589 1574 if ((len = svd->swresv) != 0) {
1590 1575 if (anon_resv(svd->swresv) == 0)
1591 1576 return (ENOMEM);
1592 1577
1593 1578 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
1594 1579 seg, len, 0);
1595 1580 }
1596 1581
1597 1582 newsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
1598 1583
1599 1584 newseg->s_ops = &segvn_ops;
1600 1585 newseg->s_data = (void *)newsvd;
1601 1586 newseg->s_szc = seg->s_szc;
1602 1587
1603 1588 newsvd->seg = newseg;
1604 1589 if ((newsvd->vp = svd->vp) != NULL) {
1605 1590 VN_HOLD(svd->vp);
1606 1591 if (svd->type == MAP_SHARED)
1607 1592 lgrp_shm_policy_init(NULL, svd->vp);
1608 1593 }
1609 1594 newsvd->offset = svd->offset;
1610 1595 newsvd->prot = svd->prot;
1611 1596 newsvd->maxprot = svd->maxprot;
1612 1597 newsvd->pageprot = svd->pageprot;
1613 1598 newsvd->type = svd->type;
1614 1599 newsvd->cred = svd->cred;
1615 1600 crhold(newsvd->cred);
1616 1601 newsvd->advice = svd->advice;
1617 1602 newsvd->pageadvice = svd->pageadvice;
1618 1603 newsvd->svn_inz = svd->svn_inz;
1619 1604 newsvd->swresv = svd->swresv;
1620 1605 newsvd->pageswap = svd->pageswap;
1621 1606 newsvd->flags = svd->flags;
1622 1607 newsvd->softlockcnt = 0;
1623 1608 newsvd->softlockcnt_sbase = 0;
1624 1609 newsvd->softlockcnt_send = 0;
1625 1610 newsvd->policy_info = svd->policy_info;
1626 1611 newsvd->rcookie = HAT_INVALID_REGION_COOKIE;
1627 1612
1628 1613 if ((amp = svd->amp) == NULL || svd->tr_state == SEGVN_TR_ON) {
1629 1614 /*
1630 1615 * Not attaching to a shared anon object.
1631 1616 */
1632 1617 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie) ||
1633 1618 svd->tr_state == SEGVN_TR_OFF);
1634 1619 if (svd->tr_state == SEGVN_TR_ON) {
1635 1620 ASSERT(newsvd->vp != NULL && amp != NULL);
1636 1621 newsvd->tr_state = SEGVN_TR_INIT;
1637 1622 } else {
1638 1623 newsvd->tr_state = svd->tr_state;
1639 1624 }
1640 1625 newsvd->amp = NULL;
1641 1626 newsvd->anon_index = 0;
1642 1627 } else {
1643 1628 /* regions for now are only used on pure vnode segments */
1644 1629 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
1645 1630 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1646 1631 newsvd->tr_state = SEGVN_TR_OFF;
1647 1632 if (svd->type == MAP_SHARED) {
1648 1633 ASSERT(svd->svn_inz == SEGVN_INZ_NONE);
1649 1634 newsvd->amp = amp;
1650 1635 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
1651 1636 amp->refcnt++;
1652 1637 ANON_LOCK_EXIT(&->a_rwlock);
1653 1638 newsvd->anon_index = svd->anon_index;
1654 1639 } else {
1655 1640 int reclaim = 1;
1656 1641
1657 1642 /*
1658 1643 * Allocate and initialize new anon_map structure.
1659 1644 */
1660 1645 newsvd->amp = anonmap_alloc(newseg->s_size, 0,
1661 1646 ANON_SLEEP);
1662 1647 newsvd->amp->a_szc = newseg->s_szc;
1663 1648 newsvd->anon_index = 0;
1664 1649 ASSERT(svd->svn_inz == SEGVN_INZ_NONE ||
1665 1650 svd->svn_inz == SEGVN_INZ_ALL ||
1666 1651 svd->svn_inz == SEGVN_INZ_VPP);
1667 1652
1668 1653 /*
1669 1654 * We don't have to acquire the anon_map lock
1670 1655 * for the new segment (since it belongs to an
1671 1656 * address space that is still not associated
1672 1657 * with any process), or the segment in the old
1673 1658 * address space (since all threads in it
1674 1659 * are stopped while duplicating the address space).
1675 1660 */
1676 1661
1677 1662 /*
1678 1663 * The goal of the following code is to make sure that
1679 1664 * softlocked pages do not end up as copy on write
1680 1665 * pages. This would cause problems where one
1681 1666 * thread writes to a page that is COW and a different
1682 1667 * thread in the same process has softlocked it. The
1683 1668 * softlock lock would move away from this process
1684 1669 * because the write would cause this process to get
1685 1670 * a copy (without the softlock).
1686 1671 *
1687 1672 * The strategy here is to just break the
1688 1673 * sharing on pages that could possibly be
1689 1674 * softlocked.
1690 1675 *
1691 1676 * In addition, if any pages have been marked that they
1692 1677 * should be inherited as zero, then we immediately go
1693 1678 * ahead and break COW and zero them. In the case of a
1694 1679 * softlocked page that should be inherited zero, we
1695 1680 * break COW and just get a zero page.
1696 1681 */
1697 1682 retry:
1698 1683 if (svd->softlockcnt ||
1699 1684 svd->svn_inz != SEGVN_INZ_NONE) {
1700 1685 /*
1701 1686 * The softlock count might be non zero
1702 1687 * because some pages are still stuck in the
1703 1688 * cache for lazy reclaim. Flush the cache
1704 1689 * now. This should drop the count to zero.
1705 1690 * [or there is really I/O going on to these
1706 1691 * pages]. Note, we have the writers lock so
1707 1692 * nothing gets inserted during the flush.
1708 1693 */
1709 1694 if (svd->softlockcnt && reclaim == 1) {
1710 1695 segvn_purge(seg);
1711 1696 reclaim = 0;
1712 1697 goto retry;
1713 1698 }
1714 1699
1715 1700 error = segvn_dup_pages(seg, newseg);
1716 1701 if (error != 0) {
1717 1702 newsvd->vpage = NULL;
1718 1703 goto out;
1719 1704 }
1720 1705 } else { /* common case */
1721 1706 if (seg->s_szc != 0) {
1722 1707 /*
1723 1708 * If at least one of anon slots of a
1724 1709 * large page exists then make sure
1725 1710 * all anon slots of a large page
1726 1711 * exist to avoid partial cow sharing
1727 1712 * of a large page in the future.
1728 1713 */
1729 1714 anon_dup_fill_holes(amp->ahp,
1730 1715 svd->anon_index, newsvd->amp->ahp,
1731 1716 0, seg->s_size, seg->s_szc,
1732 1717 svd->vp != NULL);
1733 1718 } else {
1734 1719 anon_dup(amp->ahp, svd->anon_index,
1735 1720 newsvd->amp->ahp, 0, seg->s_size);
1736 1721 }
1737 1722
1738 1723 hat_clrattr(seg->s_as->a_hat, seg->s_base,
1739 1724 seg->s_size, PROT_WRITE);
1740 1725 }
1741 1726 }
1742 1727 }
1743 1728 /*
1744 1729 * If necessary, create a vpage structure for the new segment.
1745 1730 * Do not copy any page lock indications.
1746 1731 */
1747 1732 if (svd->vpage != NULL) {
1748 1733 uint_t i;
1749 1734 struct vpage *ovp = svd->vpage;
1750 1735 struct vpage *nvp;
1751 1736
1752 1737 nvp = newsvd->vpage =
1753 1738 kmem_alloc(vpgtob(npages), KM_SLEEP);
1754 1739 for (i = 0; i < npages; i++) {
1755 1740 *nvp = *ovp++;
1756 1741 VPP_CLRPPLOCK(nvp++);
1757 1742 }
1758 1743 } else
1759 1744 newsvd->vpage = NULL;
1760 1745
1761 1746 /* Inform the vnode of the new mapping */
1762 1747 if (newsvd->vp != NULL) {
1763 1748 error = VOP_ADDMAP(newsvd->vp, (offset_t)newsvd->offset,
1764 1749 newseg->s_as, newseg->s_base, newseg->s_size, newsvd->prot,
1765 1750 newsvd->maxprot, newsvd->type, newsvd->cred, NULL);
1766 1751 }
1767 1752 out:
1768 1753 if (error == 0 && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1769 1754 ASSERT(newsvd->amp == NULL);
1770 1755 ASSERT(newsvd->tr_state == SEGVN_TR_OFF);
1771 1756 newsvd->rcookie = svd->rcookie;
1772 1757 hat_dup_region(newseg->s_as->a_hat, newsvd->rcookie);
1773 1758 }
1774 1759 return (error);
1775 1760 }
1776 1761
1777 1762
1778 1763 /*
1779 1764 * callback function to invoke free_vp_pages() for only those pages actually
1780 1765 * processed by the HAT when a shared region is destroyed.
1781 1766 */
1782 1767 extern int free_pages;
1783 1768
1784 1769 static void
1785 1770 segvn_hat_rgn_unload_callback(caddr_t saddr, caddr_t eaddr, caddr_t r_saddr,
1786 1771 size_t r_size, void *r_obj, u_offset_t r_objoff)
1787 1772 {
1788 1773 u_offset_t off;
1789 1774 size_t len;
1790 1775 vnode_t *vp = (vnode_t *)r_obj;
1791 1776
1792 1777 ASSERT(eaddr > saddr);
1793 1778 ASSERT(saddr >= r_saddr);
1794 1779 ASSERT(saddr < r_saddr + r_size);
1795 1780 ASSERT(eaddr > r_saddr);
1796 1781 ASSERT(eaddr <= r_saddr + r_size);
1797 1782 ASSERT(vp != NULL);
1798 1783
1799 1784 if (!free_pages) {
1800 1785 return;
1801 1786 }
1802 1787
1803 1788 len = eaddr - saddr;
1804 1789 off = (saddr - r_saddr) + r_objoff;
1805 1790 free_vp_pages(vp, off, len);
1806 1791 }
1807 1792
1808 1793 /*
1809 1794 * callback function used by segvn_unmap to invoke free_vp_pages() for only
1810 1795 * those pages actually processed by the HAT
1811 1796 */
1812 1797 static void
1813 1798 segvn_hat_unload_callback(hat_callback_t *cb)
1814 1799 {
1815 1800 struct seg *seg = cb->hcb_data;
1816 1801 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1817 1802 size_t len;
1818 1803 u_offset_t off;
1819 1804
1820 1805 ASSERT(svd->vp != NULL);
1821 1806 ASSERT(cb->hcb_end_addr > cb->hcb_start_addr);
1822 1807 ASSERT(cb->hcb_start_addr >= seg->s_base);
1823 1808
1824 1809 len = cb->hcb_end_addr - cb->hcb_start_addr;
1825 1810 off = cb->hcb_start_addr - seg->s_base;
1826 1811 free_vp_pages(svd->vp, svd->offset + off, len);
1827 1812 }
1828 1813
1829 1814 /*
1830 1815 * This function determines the number of bytes of swap reserved by
1831 1816 * a segment for which per-page accounting is present. It is used to
1832 1817 * calculate the correct value of a segvn_data's swresv.
1833 1818 */
1834 1819 static size_t
1835 1820 segvn_count_swap_by_vpages(struct seg *seg)
1836 1821 {
1837 1822 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1838 1823 struct vpage *vp, *evp;
1839 1824 size_t nswappages = 0;
1840 1825
1841 1826 ASSERT(svd->pageswap);
1842 1827 ASSERT(svd->vpage != NULL);
1843 1828
1844 1829 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
1845 1830
1846 1831 for (vp = svd->vpage; vp < evp; vp++) {
1847 1832 if (VPP_ISSWAPRES(vp))
1848 1833 nswappages++;
1849 1834 }
1850 1835
1851 1836 return (nswappages << PAGESHIFT);
1852 1837 }
1853 1838
1854 1839 static int
1855 1840 segvn_unmap(struct seg *seg, caddr_t addr, size_t len)
1856 1841 {
1857 1842 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
1858 1843 struct segvn_data *nsvd;
1859 1844 struct seg *nseg;
1860 1845 struct anon_map *amp;
1861 1846 pgcnt_t opages; /* old segment size in pages */
1862 1847 pgcnt_t npages; /* new segment size in pages */
1863 1848 pgcnt_t dpages; /* pages being deleted (unmapped) */
1864 1849 hat_callback_t callback; /* used for free_vp_pages() */
1865 1850 hat_callback_t *cbp = NULL;
1866 1851 caddr_t nbase;
1867 1852 size_t nsize;
1868 1853 size_t oswresv;
1869 1854 int reclaim = 1;
1870 1855
1871 1856 /*
1872 1857 * We don't need any segment level locks for "segvn" data
1873 1858 * since the address space is "write" locked.
1874 1859 */
1875 1860 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
1876 1861
1877 1862 /*
1878 1863 * Fail the unmap if pages are SOFTLOCKed through this mapping.
1879 1864 * softlockcnt is protected from change by the as write lock.
1880 1865 */
1881 1866 retry:
1882 1867 if (svd->softlockcnt > 0) {
1883 1868 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1884 1869
1885 1870 /*
1886 1871 * If this is shared segment non 0 softlockcnt
1887 1872 * means locked pages are still in use.
1888 1873 */
1889 1874 if (svd->type == MAP_SHARED) {
1890 1875 return (EAGAIN);
1891 1876 }
1892 1877
1893 1878 /*
1894 1879 * since we do have the writers lock nobody can fill
1895 1880 * the cache during the purge. The flush either succeeds
1896 1881 * or we still have pending I/Os.
1897 1882 */
1898 1883 if (reclaim == 1) {
1899 1884 segvn_purge(seg);
1900 1885 reclaim = 0;
1901 1886 goto retry;
1902 1887 }
1903 1888 return (EAGAIN);
1904 1889 }
1905 1890
1906 1891 /*
1907 1892 * Check for bad sizes
1908 1893 */
1909 1894 if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
1910 1895 (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET)) {
1911 1896 panic("segvn_unmap");
1912 1897 /*NOTREACHED*/
1913 1898 }
1914 1899
1915 1900 if (seg->s_szc != 0) {
1916 1901 size_t pgsz = page_get_pagesize(seg->s_szc);
1917 1902 int err;
1918 1903 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
1919 1904 ASSERT(seg->s_base != addr || seg->s_size != len);
1920 1905 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1921 1906 ASSERT(svd->amp == NULL);
1922 1907 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1923 1908 hat_leave_region(seg->s_as->a_hat,
1924 1909 svd->rcookie, HAT_REGION_TEXT);
1925 1910 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1926 1911 /*
1927 1912 * could pass a flag to segvn_demote_range()
1928 1913 * below to tell it not to do any unloads but
1929 1914 * this case is rare enough to not bother for
1930 1915 * now.
1931 1916 */
1932 1917 } else if (svd->tr_state == SEGVN_TR_INIT) {
1933 1918 svd->tr_state = SEGVN_TR_OFF;
1934 1919 } else if (svd->tr_state == SEGVN_TR_ON) {
1935 1920 ASSERT(svd->amp != NULL);
1936 1921 segvn_textunrepl(seg, 1);
1937 1922 ASSERT(svd->amp == NULL);
1938 1923 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1939 1924 }
1940 1925 VM_STAT_ADD(segvnvmstats.demoterange[0]);
1941 1926 err = segvn_demote_range(seg, addr, len, SDR_END, 0);
1942 1927 if (err == 0) {
1943 1928 return (IE_RETRY);
1944 1929 }
1945 1930 return (err);
1946 1931 }
1947 1932 }
1948 1933
1949 1934 /* Inform the vnode of the unmapping. */
1950 1935 if (svd->vp) {
1951 1936 int error;
1952 1937
1953 1938 error = VOP_DELMAP(svd->vp,
1954 1939 (offset_t)svd->offset + (uintptr_t)(addr - seg->s_base),
1955 1940 seg->s_as, addr, len, svd->prot, svd->maxprot,
1956 1941 svd->type, svd->cred, NULL);
1957 1942
1958 1943 if (error == EAGAIN)
1959 1944 return (error);
1960 1945 }
1961 1946
1962 1947 /*
1963 1948 * Remove any page locks set through this mapping.
1964 1949 * If text replication is not off no page locks could have been
1965 1950 * established via this mapping.
1966 1951 */
1967 1952 if (svd->tr_state == SEGVN_TR_OFF) {
1968 1953 (void) segvn_lockop(seg, addr, len, 0, MC_UNLOCK, NULL, 0);
1969 1954 }
1970 1955
1971 1956 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
1972 1957 ASSERT(svd->amp == NULL);
1973 1958 ASSERT(svd->tr_state == SEGVN_TR_OFF);
1974 1959 ASSERT(svd->type == MAP_PRIVATE);
1975 1960 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
1976 1961 HAT_REGION_TEXT);
1977 1962 svd->rcookie = HAT_INVALID_REGION_COOKIE;
1978 1963 } else if (svd->tr_state == SEGVN_TR_ON) {
1979 1964 ASSERT(svd->amp != NULL);
1980 1965 ASSERT(svd->pageprot == 0 && !(svd->prot & PROT_WRITE));
1981 1966 segvn_textunrepl(seg, 1);
1982 1967 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
1983 1968 } else {
1984 1969 if (svd->tr_state != SEGVN_TR_OFF) {
1985 1970 ASSERT(svd->tr_state == SEGVN_TR_INIT);
1986 1971 svd->tr_state = SEGVN_TR_OFF;
1987 1972 }
1988 1973 /*
1989 1974 * Unload any hardware translations in the range to be taken
1990 1975 * out. Use a callback to invoke free_vp_pages() effectively.
1991 1976 */
1992 1977 if (svd->vp != NULL && free_pages != 0) {
1993 1978 callback.hcb_data = seg;
1994 1979 callback.hcb_function = segvn_hat_unload_callback;
1995 1980 cbp = &callback;
1996 1981 }
1997 1982 hat_unload_callback(seg->s_as->a_hat, addr, len,
1998 1983 HAT_UNLOAD_UNMAP, cbp);
1999 1984
2000 1985 if (svd->type == MAP_SHARED && svd->vp != NULL &&
2001 1986 (svd->vp->v_flag & VVMEXEC) &&
2002 1987 ((svd->prot & PROT_WRITE) || svd->pageprot)) {
2003 1988 segvn_inval_trcache(svd->vp);
2004 1989 }
2005 1990 }
2006 1991
2007 1992 /*
2008 1993 * Check for entire segment
2009 1994 */
2010 1995 if (addr == seg->s_base && len == seg->s_size) {
2011 1996 seg_free(seg);
2012 1997 return (0);
2013 1998 }
2014 1999
2015 2000 opages = seg_pages(seg);
2016 2001 dpages = btop(len);
2017 2002 npages = opages - dpages;
2018 2003 amp = svd->amp;
2019 2004 ASSERT(amp == NULL || amp->a_szc >= seg->s_szc);
2020 2005
2021 2006 /*
2022 2007 * Check for beginning of segment
2023 2008 */
2024 2009 if (addr == seg->s_base) {
2025 2010 if (svd->vpage != NULL) {
2026 2011 size_t nbytes;
2027 2012 struct vpage *ovpage;
2028 2013
2029 2014 ovpage = svd->vpage; /* keep pointer to vpage */
2030 2015
2031 2016 nbytes = vpgtob(npages);
2032 2017 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2033 2018 bcopy(&ovpage[dpages], svd->vpage, nbytes);
2034 2019
2035 2020 /* free up old vpage */
2036 2021 kmem_free(ovpage, vpgtob(opages));
2037 2022 }
2038 2023 if (amp != NULL) {
2039 2024 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2040 2025 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2041 2026 /*
2042 2027 * Shared anon map is no longer in use. Before
2043 2028 * freeing its pages purge all entries from
2044 2029 * pcache that belong to this amp.
2045 2030 */
2046 2031 if (svd->type == MAP_SHARED) {
2047 2032 ASSERT(amp->refcnt == 1);
2048 2033 ASSERT(svd->softlockcnt == 0);
2049 2034 anonmap_purge(amp);
2050 2035 }
2051 2036 /*
2052 2037 * Free up now unused parts of anon_map array.
2053 2038 */
2054 2039 if (amp->a_szc == seg->s_szc) {
2055 2040 if (seg->s_szc != 0) {
2056 2041 anon_free_pages(amp->ahp,
2057 2042 svd->anon_index, len,
2058 2043 seg->s_szc);
2059 2044 } else {
2060 2045 anon_free(amp->ahp,
2061 2046 svd->anon_index,
2062 2047 len);
2063 2048 }
2064 2049 } else {
2065 2050 ASSERT(svd->type == MAP_SHARED);
2066 2051 ASSERT(amp->a_szc > seg->s_szc);
2067 2052 anon_shmap_free_pages(amp,
2068 2053 svd->anon_index, len);
2069 2054 }
2070 2055
2071 2056 /*
2072 2057 * Unreserve swap space for the
2073 2058 * unmapped chunk of this segment in
2074 2059 * case it's MAP_SHARED
2075 2060 */
2076 2061 if (svd->type == MAP_SHARED) {
2077 2062 anon_unresv_zone(len,
2078 2063 seg->s_as->a_proc->p_zone);
2079 2064 amp->swresv -= len;
2080 2065 }
2081 2066 }
2082 2067 ANON_LOCK_EXIT(&->a_rwlock);
2083 2068 svd->anon_index += dpages;
2084 2069 }
2085 2070 if (svd->vp != NULL)
2086 2071 svd->offset += len;
2087 2072
2088 2073 seg->s_base += len;
2089 2074 seg->s_size -= len;
2090 2075
2091 2076 if (svd->swresv) {
2092 2077 if (svd->flags & MAP_NORESERVE) {
2093 2078 ASSERT(amp);
2094 2079 oswresv = svd->swresv;
2095 2080
2096 2081 svd->swresv = ptob(anon_pages(amp->ahp,
2097 2082 svd->anon_index, npages));
2098 2083 anon_unresv_zone(oswresv - svd->swresv,
2099 2084 seg->s_as->a_proc->p_zone);
2100 2085 if (SEG_IS_PARTIAL_RESV(seg))
2101 2086 seg->s_as->a_resvsize -= oswresv -
2102 2087 svd->swresv;
2103 2088 } else {
2104 2089 size_t unlen;
2105 2090
2106 2091 if (svd->pageswap) {
2107 2092 oswresv = svd->swresv;
2108 2093 svd->swresv =
2109 2094 segvn_count_swap_by_vpages(seg);
2110 2095 ASSERT(oswresv >= svd->swresv);
2111 2096 unlen = oswresv - svd->swresv;
2112 2097 } else {
2113 2098 svd->swresv -= len;
2114 2099 ASSERT(svd->swresv == seg->s_size);
2115 2100 unlen = len;
2116 2101 }
2117 2102 anon_unresv_zone(unlen,
2118 2103 seg->s_as->a_proc->p_zone);
2119 2104 }
2120 2105 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2121 2106 seg, len, 0);
2122 2107 }
2123 2108
2124 2109 return (0);
2125 2110 }
2126 2111
2127 2112 /*
2128 2113 * Check for end of segment
2129 2114 */
2130 2115 if (addr + len == seg->s_base + seg->s_size) {
2131 2116 if (svd->vpage != NULL) {
2132 2117 size_t nbytes;
2133 2118 struct vpage *ovpage;
2134 2119
2135 2120 ovpage = svd->vpage; /* keep pointer to vpage */
2136 2121
2137 2122 nbytes = vpgtob(npages);
2138 2123 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2139 2124 bcopy(ovpage, svd->vpage, nbytes);
2140 2125
2141 2126 /* free up old vpage */
2142 2127 kmem_free(ovpage, vpgtob(opages));
2143 2128
2144 2129 }
2145 2130 if (amp != NULL) {
2146 2131 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2147 2132 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2148 2133 /*
2149 2134 * Free up now unused parts of anon_map array.
2150 2135 */
2151 2136 ulong_t an_idx = svd->anon_index + npages;
2152 2137
2153 2138 /*
2154 2139 * Shared anon map is no longer in use. Before
2155 2140 * freeing its pages purge all entries from
2156 2141 * pcache that belong to this amp.
2157 2142 */
2158 2143 if (svd->type == MAP_SHARED) {
2159 2144 ASSERT(amp->refcnt == 1);
2160 2145 ASSERT(svd->softlockcnt == 0);
2161 2146 anonmap_purge(amp);
2162 2147 }
2163 2148
2164 2149 if (amp->a_szc == seg->s_szc) {
2165 2150 if (seg->s_szc != 0) {
2166 2151 anon_free_pages(amp->ahp,
2167 2152 an_idx, len,
2168 2153 seg->s_szc);
2169 2154 } else {
2170 2155 anon_free(amp->ahp, an_idx,
2171 2156 len);
2172 2157 }
2173 2158 } else {
2174 2159 ASSERT(svd->type == MAP_SHARED);
2175 2160 ASSERT(amp->a_szc > seg->s_szc);
2176 2161 anon_shmap_free_pages(amp,
2177 2162 an_idx, len);
2178 2163 }
2179 2164
2180 2165 /*
2181 2166 * Unreserve swap space for the
2182 2167 * unmapped chunk of this segment in
2183 2168 * case it's MAP_SHARED
2184 2169 */
2185 2170 if (svd->type == MAP_SHARED) {
2186 2171 anon_unresv_zone(len,
2187 2172 seg->s_as->a_proc->p_zone);
2188 2173 amp->swresv -= len;
2189 2174 }
2190 2175 }
2191 2176 ANON_LOCK_EXIT(&->a_rwlock);
2192 2177 }
2193 2178
2194 2179 seg->s_size -= len;
2195 2180
2196 2181 if (svd->swresv) {
2197 2182 if (svd->flags & MAP_NORESERVE) {
2198 2183 ASSERT(amp);
2199 2184 oswresv = svd->swresv;
2200 2185 svd->swresv = ptob(anon_pages(amp->ahp,
2201 2186 svd->anon_index, npages));
2202 2187 anon_unresv_zone(oswresv - svd->swresv,
2203 2188 seg->s_as->a_proc->p_zone);
2204 2189 if (SEG_IS_PARTIAL_RESV(seg))
2205 2190 seg->s_as->a_resvsize -= oswresv -
2206 2191 svd->swresv;
2207 2192 } else {
2208 2193 size_t unlen;
2209 2194
2210 2195 if (svd->pageswap) {
2211 2196 oswresv = svd->swresv;
2212 2197 svd->swresv =
2213 2198 segvn_count_swap_by_vpages(seg);
2214 2199 ASSERT(oswresv >= svd->swresv);
2215 2200 unlen = oswresv - svd->swresv;
2216 2201 } else {
2217 2202 svd->swresv -= len;
2218 2203 ASSERT(svd->swresv == seg->s_size);
2219 2204 unlen = len;
2220 2205 }
2221 2206 anon_unresv_zone(unlen,
2222 2207 seg->s_as->a_proc->p_zone);
2223 2208 }
2224 2209 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2225 2210 "anon proc:%p %lu %u", seg, len, 0);
2226 2211 }
2227 2212
2228 2213 return (0);
2229 2214 }
2230 2215
2231 2216 /*
2232 2217 * The section to go is in the middle of the segment,
2233 2218 * have to make it into two segments. nseg is made for
2234 2219 * the high end while seg is cut down at the low end.
2235 2220 */
2236 2221 nbase = addr + len; /* new seg base */
2237 2222 nsize = (seg->s_base + seg->s_size) - nbase; /* new seg size */
2238 2223 seg->s_size = addr - seg->s_base; /* shrink old seg */
2239 2224 nseg = seg_alloc(seg->s_as, nbase, nsize);
2240 2225 if (nseg == NULL) {
2241 2226 panic("segvn_unmap seg_alloc");
2242 2227 /*NOTREACHED*/
2243 2228 }
2244 2229 nseg->s_ops = seg->s_ops;
2245 2230 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
2246 2231 nseg->s_data = (void *)nsvd;
2247 2232 nseg->s_szc = seg->s_szc;
2248 2233 *nsvd = *svd;
2249 2234 nsvd->seg = nseg;
2250 2235 nsvd->offset = svd->offset + (uintptr_t)(nseg->s_base - seg->s_base);
2251 2236 nsvd->swresv = 0;
2252 2237 nsvd->softlockcnt = 0;
2253 2238 nsvd->softlockcnt_sbase = 0;
2254 2239 nsvd->softlockcnt_send = 0;
2255 2240 nsvd->svn_inz = svd->svn_inz;
2256 2241 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
2257 2242
2258 2243 if (svd->vp != NULL) {
2259 2244 VN_HOLD(nsvd->vp);
2260 2245 if (nsvd->type == MAP_SHARED)
2261 2246 lgrp_shm_policy_init(NULL, nsvd->vp);
2262 2247 }
2263 2248 crhold(svd->cred);
2264 2249
2265 2250 if (svd->vpage == NULL) {
2266 2251 nsvd->vpage = NULL;
2267 2252 } else {
2268 2253 /* need to split vpage into two arrays */
2269 2254 size_t nbytes;
2270 2255 struct vpage *ovpage;
2271 2256
2272 2257 ovpage = svd->vpage; /* keep pointer to vpage */
2273 2258
2274 2259 npages = seg_pages(seg); /* seg has shrunk */
2275 2260 nbytes = vpgtob(npages);
2276 2261 svd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2277 2262
2278 2263 bcopy(ovpage, svd->vpage, nbytes);
2279 2264
2280 2265 npages = seg_pages(nseg);
2281 2266 nbytes = vpgtob(npages);
2282 2267 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
2283 2268
2284 2269 bcopy(&ovpage[opages - npages], nsvd->vpage, nbytes);
2285 2270
2286 2271 /* free up old vpage */
2287 2272 kmem_free(ovpage, vpgtob(opages));
2288 2273 }
2289 2274
2290 2275 if (amp == NULL) {
2291 2276 nsvd->amp = NULL;
2292 2277 nsvd->anon_index = 0;
2293 2278 } else {
2294 2279 /*
2295 2280 * Need to create a new anon map for the new segment.
2296 2281 * We'll also allocate a new smaller array for the old
2297 2282 * smaller segment to save space.
2298 2283 */
2299 2284 opages = btop((uintptr_t)(addr - seg->s_base));
2300 2285 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2301 2286 if (amp->refcnt == 1 || svd->type == MAP_PRIVATE) {
2302 2287 /*
2303 2288 * Free up now unused parts of anon_map array.
2304 2289 */
2305 2290 ulong_t an_idx = svd->anon_index + opages;
2306 2291
2307 2292 /*
2308 2293 * Shared anon map is no longer in use. Before
2309 2294 * freeing its pages purge all entries from
2310 2295 * pcache that belong to this amp.
2311 2296 */
2312 2297 if (svd->type == MAP_SHARED) {
2313 2298 ASSERT(amp->refcnt == 1);
2314 2299 ASSERT(svd->softlockcnt == 0);
2315 2300 anonmap_purge(amp);
2316 2301 }
2317 2302
2318 2303 if (amp->a_szc == seg->s_szc) {
2319 2304 if (seg->s_szc != 0) {
2320 2305 anon_free_pages(amp->ahp, an_idx, len,
2321 2306 seg->s_szc);
2322 2307 } else {
2323 2308 anon_free(amp->ahp, an_idx,
2324 2309 len);
2325 2310 }
2326 2311 } else {
2327 2312 ASSERT(svd->type == MAP_SHARED);
2328 2313 ASSERT(amp->a_szc > seg->s_szc);
2329 2314 anon_shmap_free_pages(amp, an_idx, len);
2330 2315 }
2331 2316
2332 2317 /*
2333 2318 * Unreserve swap space for the
2334 2319 * unmapped chunk of this segment in
2335 2320 * case it's MAP_SHARED
2336 2321 */
2337 2322 if (svd->type == MAP_SHARED) {
2338 2323 anon_unresv_zone(len,
2339 2324 seg->s_as->a_proc->p_zone);
2340 2325 amp->swresv -= len;
2341 2326 }
2342 2327 }
2343 2328 nsvd->anon_index = svd->anon_index +
2344 2329 btop((uintptr_t)(nseg->s_base - seg->s_base));
2345 2330 if (svd->type == MAP_SHARED) {
2346 2331 amp->refcnt++;
2347 2332 nsvd->amp = amp;
2348 2333 } else {
2349 2334 struct anon_map *namp;
2350 2335 struct anon_hdr *nahp;
2351 2336
2352 2337 ASSERT(svd->type == MAP_PRIVATE);
2353 2338 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
2354 2339 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
2355 2340 namp->a_szc = seg->s_szc;
2356 2341 (void) anon_copy_ptr(amp->ahp, svd->anon_index, nahp,
2357 2342 0, btop(seg->s_size), ANON_SLEEP);
2358 2343 (void) anon_copy_ptr(amp->ahp, nsvd->anon_index,
2359 2344 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
2360 2345 anon_release(amp->ahp, btop(amp->size));
2361 2346 svd->anon_index = 0;
2362 2347 nsvd->anon_index = 0;
2363 2348 amp->ahp = nahp;
2364 2349 amp->size = seg->s_size;
2365 2350 nsvd->amp = namp;
2366 2351 }
2367 2352 ANON_LOCK_EXIT(&->a_rwlock);
2368 2353 }
2369 2354 if (svd->swresv) {
2370 2355 if (svd->flags & MAP_NORESERVE) {
2371 2356 ASSERT(amp);
2372 2357 oswresv = svd->swresv;
2373 2358 svd->swresv = ptob(anon_pages(amp->ahp,
2374 2359 svd->anon_index, btop(seg->s_size)));
2375 2360 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
2376 2361 nsvd->anon_index, btop(nseg->s_size)));
2377 2362 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2378 2363 anon_unresv_zone(oswresv - (svd->swresv + nsvd->swresv),
2379 2364 seg->s_as->a_proc->p_zone);
2380 2365 if (SEG_IS_PARTIAL_RESV(seg))
2381 2366 seg->s_as->a_resvsize -= oswresv -
2382 2367 (svd->swresv + nsvd->swresv);
2383 2368 } else {
2384 2369 size_t unlen;
2385 2370
2386 2371 if (svd->pageswap) {
2387 2372 oswresv = svd->swresv;
2388 2373 svd->swresv = segvn_count_swap_by_vpages(seg);
2389 2374 nsvd->swresv = segvn_count_swap_by_vpages(nseg);
2390 2375 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
2391 2376 unlen = oswresv - (svd->swresv + nsvd->swresv);
2392 2377 } else {
2393 2378 if (seg->s_size + nseg->s_size + len !=
2394 2379 svd->swresv) {
2395 2380 panic("segvn_unmap: cannot split "
2396 2381 "swap reservation");
2397 2382 /*NOTREACHED*/
2398 2383 }
2399 2384 svd->swresv = seg->s_size;
2400 2385 nsvd->swresv = nseg->s_size;
2401 2386 unlen = len;
2402 2387 }
2403 2388 anon_unresv_zone(unlen,
2404 2389 seg->s_as->a_proc->p_zone);
2405 2390 }
2406 2391 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2407 2392 seg, len, 0);
2408 2393 }
2409 2394
2410 2395 return (0); /* I'm glad that's all over with! */
2411 2396 }
2412 2397
2413 2398 static void
2414 2399 segvn_free(struct seg *seg)
2415 2400 {
2416 2401 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2417 2402 pgcnt_t npages = seg_pages(seg);
2418 2403 struct anon_map *amp;
2419 2404 size_t len;
2420 2405
2421 2406 /*
2422 2407 * We don't need any segment level locks for "segvn" data
2423 2408 * since the address space is "write" locked.
2424 2409 */
2425 2410 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
2426 2411 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2427 2412
2428 2413 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2429 2414
2430 2415 /*
2431 2416 * Be sure to unlock pages. XXX Why do things get free'ed instead
2432 2417 * of unmapped? XXX
2433 2418 */
2434 2419 (void) segvn_lockop(seg, seg->s_base, seg->s_size,
2435 2420 0, MC_UNLOCK, NULL, 0);
2436 2421
2437 2422 /*
2438 2423 * Deallocate the vpage and anon pointers if necessary and possible.
2439 2424 */
2440 2425 if (svd->vpage != NULL) {
2441 2426 kmem_free(svd->vpage, vpgtob(npages));
2442 2427 svd->vpage = NULL;
2443 2428 }
2444 2429 if ((amp = svd->amp) != NULL) {
2445 2430 /*
2446 2431 * If there are no more references to this anon_map
2447 2432 * structure, then deallocate the structure after freeing
2448 2433 * up all the anon slot pointers that we can.
2449 2434 */
2450 2435 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
2451 2436 ASSERT(amp->a_szc >= seg->s_szc);
2452 2437 if (--amp->refcnt == 0) {
2453 2438 if (svd->type == MAP_PRIVATE) {
2454 2439 /*
2455 2440 * Private - we only need to anon_free
2456 2441 * the part that this segment refers to.
2457 2442 */
2458 2443 if (seg->s_szc != 0) {
2459 2444 anon_free_pages(amp->ahp,
2460 2445 svd->anon_index, seg->s_size,
2461 2446 seg->s_szc);
2462 2447 } else {
2463 2448 anon_free(amp->ahp, svd->anon_index,
2464 2449 seg->s_size);
2465 2450 }
2466 2451 } else {
2467 2452
2468 2453 /*
2469 2454 * Shared anon map is no longer in use. Before
2470 2455 * freeing its pages purge all entries from
2471 2456 * pcache that belong to this amp.
2472 2457 */
2473 2458 ASSERT(svd->softlockcnt == 0);
2474 2459 anonmap_purge(amp);
2475 2460
2476 2461 /*
2477 2462 * Shared - anon_free the entire
2478 2463 * anon_map's worth of stuff and
2479 2464 * release any swap reservation.
2480 2465 */
2481 2466 if (amp->a_szc != 0) {
2482 2467 anon_shmap_free_pages(amp, 0,
2483 2468 amp->size);
2484 2469 } else {
2485 2470 anon_free(amp->ahp, 0, amp->size);
2486 2471 }
2487 2472 if ((len = amp->swresv) != 0) {
2488 2473 anon_unresv_zone(len,
2489 2474 seg->s_as->a_proc->p_zone);
2490 2475 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
2491 2476 "anon proc:%p %lu %u", seg, len, 0);
2492 2477 }
2493 2478 }
2494 2479 svd->amp = NULL;
2495 2480 ANON_LOCK_EXIT(&->a_rwlock);
2496 2481 anonmap_free(amp);
2497 2482 } else if (svd->type == MAP_PRIVATE) {
2498 2483 /*
2499 2484 * We had a private mapping which still has
2500 2485 * a held anon_map so just free up all the
2501 2486 * anon slot pointers that we were using.
2502 2487 */
2503 2488 if (seg->s_szc != 0) {
2504 2489 anon_free_pages(amp->ahp, svd->anon_index,
2505 2490 seg->s_size, seg->s_szc);
2506 2491 } else {
2507 2492 anon_free(amp->ahp, svd->anon_index,
2508 2493 seg->s_size);
2509 2494 }
2510 2495 ANON_LOCK_EXIT(&->a_rwlock);
2511 2496 } else {
2512 2497 ANON_LOCK_EXIT(&->a_rwlock);
2513 2498 }
2514 2499 }
2515 2500
2516 2501 /*
2517 2502 * Release swap reservation.
2518 2503 */
2519 2504 if ((len = svd->swresv) != 0) {
2520 2505 anon_unresv_zone(svd->swresv,
2521 2506 seg->s_as->a_proc->p_zone);
2522 2507 TRACE_3(TR_FAC_VM, TR_ANON_PROC, "anon proc:%p %lu %u",
2523 2508 seg, len, 0);
2524 2509 if (SEG_IS_PARTIAL_RESV(seg))
2525 2510 seg->s_as->a_resvsize -= svd->swresv;
2526 2511 svd->swresv = 0;
2527 2512 }
2528 2513 /*
2529 2514 * Release claim on vnode, credentials, and finally free the
2530 2515 * private data.
2531 2516 */
2532 2517 if (svd->vp != NULL) {
2533 2518 if (svd->type == MAP_SHARED)
2534 2519 lgrp_shm_policy_fini(NULL, svd->vp);
2535 2520 VN_RELE(svd->vp);
2536 2521 svd->vp = NULL;
2537 2522 }
2538 2523 crfree(svd->cred);
2539 2524 svd->pageprot = 0;
2540 2525 svd->pageadvice = 0;
2541 2526 svd->pageswap = 0;
2542 2527 svd->cred = NULL;
2543 2528
2544 2529 /*
2545 2530 * Take segfree_syncmtx lock to let segvn_reclaim() finish if it's
2546 2531 * still working with this segment without holding as lock (in case
2547 2532 * it's called by pcache async thread).
2548 2533 */
2549 2534 ASSERT(svd->softlockcnt == 0);
2550 2535 mutex_enter(&svd->segfree_syncmtx);
2551 2536 mutex_exit(&svd->segfree_syncmtx);
2552 2537
2553 2538 seg->s_data = NULL;
2554 2539 kmem_cache_free(segvn_cache, svd);
2555 2540 }
2556 2541
2557 2542 /*
2558 2543 * Do a F_SOFTUNLOCK call over the range requested. The range must have
2559 2544 * already been F_SOFTLOCK'ed.
2560 2545 * Caller must always match addr and len of a softunlock with a previous
2561 2546 * softlock with exactly the same addr and len.
2562 2547 */
2563 2548 static void
2564 2549 segvn_softunlock(struct seg *seg, caddr_t addr, size_t len, enum seg_rw rw)
2565 2550 {
2566 2551 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2567 2552 page_t *pp;
2568 2553 caddr_t adr;
2569 2554 struct vnode *vp;
2570 2555 u_offset_t offset;
2571 2556 ulong_t anon_index;
2572 2557 struct anon_map *amp;
2573 2558 struct anon *ap = NULL;
2574 2559
2575 2560 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
2576 2561 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
2577 2562
2578 2563 if ((amp = svd->amp) != NULL)
2579 2564 anon_index = svd->anon_index + seg_page(seg, addr);
2580 2565
2581 2566 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
2582 2567 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2583 2568 hat_unlock_region(seg->s_as->a_hat, addr, len, svd->rcookie);
2584 2569 } else {
2585 2570 hat_unlock(seg->s_as->a_hat, addr, len);
2586 2571 }
2587 2572 for (adr = addr; adr < addr + len; adr += PAGESIZE) {
2588 2573 if (amp != NULL) {
2589 2574 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
2590 2575 if ((ap = anon_get_ptr(amp->ahp, anon_index++))
2591 2576 != NULL) {
2592 2577 swap_xlate(ap, &vp, &offset);
2593 2578 } else {
2594 2579 vp = svd->vp;
2595 2580 offset = svd->offset +
2596 2581 (uintptr_t)(adr - seg->s_base);
2597 2582 }
2598 2583 ANON_LOCK_EXIT(&->a_rwlock);
2599 2584 } else {
2600 2585 vp = svd->vp;
2601 2586 offset = svd->offset +
2602 2587 (uintptr_t)(adr - seg->s_base);
2603 2588 }
2604 2589
2605 2590 /*
2606 2591 * Use page_find() instead of page_lookup() to
2607 2592 * find the page since we know that it is locked.
2608 2593 */
2609 2594 pp = page_find(vp, offset);
2610 2595 if (pp == NULL) {
2611 2596 panic(
2612 2597 "segvn_softunlock: addr %p, ap %p, vp %p, off %llx",
2613 2598 (void *)adr, (void *)ap, (void *)vp, offset);
2614 2599 /*NOTREACHED*/
2615 2600 }
2616 2601
2617 2602 if (rw == S_WRITE) {
2618 2603 hat_setrefmod(pp);
2619 2604 if (seg->s_as->a_vbits)
2620 2605 hat_setstat(seg->s_as, adr, PAGESIZE,
2621 2606 P_REF | P_MOD);
2622 2607 } else if (rw != S_OTHER) {
2623 2608 hat_setref(pp);
2624 2609 if (seg->s_as->a_vbits)
2625 2610 hat_setstat(seg->s_as, adr, PAGESIZE, P_REF);
2626 2611 }
2627 2612 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2628 2613 "segvn_fault:pp %p vp %p offset %llx", pp, vp, offset);
2629 2614 page_unlock(pp);
2630 2615 }
2631 2616 ASSERT(svd->softlockcnt >= btop(len));
2632 2617 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -btop(len))) {
2633 2618 /*
2634 2619 * All SOFTLOCKS are gone. Wakeup any waiting
2635 2620 * unmappers so they can try again to unmap.
2636 2621 * Check for waiters first without the mutex
2637 2622 * held so we don't always grab the mutex on
2638 2623 * softunlocks.
2639 2624 */
2640 2625 if (AS_ISUNMAPWAIT(seg->s_as)) {
2641 2626 mutex_enter(&seg->s_as->a_contents);
2642 2627 if (AS_ISUNMAPWAIT(seg->s_as)) {
2643 2628 AS_CLRUNMAPWAIT(seg->s_as);
2644 2629 cv_broadcast(&seg->s_as->a_cv);
2645 2630 }
2646 2631 mutex_exit(&seg->s_as->a_contents);
2647 2632 }
2648 2633 }
2649 2634 }
2650 2635
2651 2636 #define PAGE_HANDLED ((page_t *)-1)
2652 2637
2653 2638 /*
2654 2639 * Release all the pages in the NULL terminated ppp list
2655 2640 * which haven't already been converted to PAGE_HANDLED.
2656 2641 */
2657 2642 static void
2658 2643 segvn_pagelist_rele(page_t **ppp)
2659 2644 {
2660 2645 for (; *ppp != NULL; ppp++) {
2661 2646 if (*ppp != PAGE_HANDLED)
2662 2647 page_unlock(*ppp);
2663 2648 }
2664 2649 }
2665 2650
2666 2651 static int stealcow = 1;
2667 2652
2668 2653 /*
2669 2654 * Workaround for viking chip bug. See bug id 1220902.
2670 2655 * To fix this down in pagefault() would require importing so
2671 2656 * much as and segvn code as to be unmaintainable.
2672 2657 */
2673 2658 int enable_mbit_wa = 0;
2674 2659
2675 2660 /*
2676 2661 * Handles all the dirty work of getting the right
2677 2662 * anonymous pages and loading up the translations.
2678 2663 * This routine is called only from segvn_fault()
2679 2664 * when looping over the range of addresses requested.
2680 2665 *
2681 2666 * The basic algorithm here is:
2682 2667 * If this is an anon_zero case
2683 2668 * Call anon_zero to allocate page
2684 2669 * Load up translation
2685 2670 * Return
2686 2671 * endif
2687 2672 * If this is an anon page
2688 2673 * Use anon_getpage to get the page
2689 2674 * else
2690 2675 * Find page in pl[] list passed in
2691 2676 * endif
2692 2677 * If not a cow
2693 2678 * Load up the translation to the page
2694 2679 * return
2695 2680 * endif
2696 2681 * Call anon_private to handle cow
2697 2682 * Load up (writable) translation to new page
2698 2683 */
2699 2684 static faultcode_t
2700 2685 segvn_faultpage(
2701 2686 struct hat *hat, /* the hat to use for mapping */
2702 2687 struct seg *seg, /* seg_vn of interest */
2703 2688 caddr_t addr, /* address in as */
2704 2689 u_offset_t off, /* offset in vp */
2705 2690 struct vpage *vpage, /* pointer to vpage for vp, off */
2706 2691 page_t *pl[], /* object source page pointer */
2707 2692 uint_t vpprot, /* access allowed to object pages */
2708 2693 enum fault_type type, /* type of fault */
2709 2694 enum seg_rw rw, /* type of access at fault */
2710 2695 int brkcow) /* we may need to break cow */
2711 2696 {
2712 2697 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
2713 2698 page_t *pp, **ppp;
2714 2699 uint_t pageflags = 0;
2715 2700 page_t *anon_pl[1 + 1];
2716 2701 page_t *opp = NULL; /* original page */
2717 2702 uint_t prot;
2718 2703 int err;
2719 2704 int cow;
2720 2705 int claim;
2721 2706 int steal = 0;
2722 2707 ulong_t anon_index;
2723 2708 struct anon *ap, *oldap;
2724 2709 struct anon_map *amp;
2725 2710 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
2726 2711 int anon_lock = 0;
2727 2712 anon_sync_obj_t cookie;
2728 2713
2729 2714 if (svd->flags & MAP_TEXT) {
2730 2715 hat_flag |= HAT_LOAD_TEXT;
2731 2716 }
2732 2717
2733 2718 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
2734 2719 ASSERT(seg->s_szc == 0);
2735 2720 ASSERT(svd->tr_state != SEGVN_TR_INIT);
2736 2721
2737 2722 /*
2738 2723 * Initialize protection value for this page.
2739 2724 * If we have per page protection values check it now.
2740 2725 */
2741 2726 if (svd->pageprot) {
2742 2727 uint_t protchk;
2743 2728
2744 2729 switch (rw) {
2745 2730 case S_READ:
2746 2731 protchk = PROT_READ;
2747 2732 break;
2748 2733 case S_WRITE:
2749 2734 protchk = PROT_WRITE;
2750 2735 break;
2751 2736 case S_EXEC:
2752 2737 protchk = PROT_EXEC;
2753 2738 break;
2754 2739 case S_OTHER:
2755 2740 default:
2756 2741 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
2757 2742 break;
2758 2743 }
2759 2744
2760 2745 prot = VPP_PROT(vpage);
2761 2746 if ((prot & protchk) == 0)
2762 2747 return (FC_PROT); /* illegal access type */
2763 2748 } else {
2764 2749 prot = svd->prot;
2765 2750 }
2766 2751
2767 2752 if (type == F_SOFTLOCK) {
2768 2753 atomic_inc_ulong((ulong_t *)&svd->softlockcnt);
2769 2754 }
2770 2755
2771 2756 /*
2772 2757 * Always acquire the anon array lock to prevent 2 threads from
2773 2758 * allocating separate anon slots for the same "addr".
2774 2759 */
2775 2760
2776 2761 if ((amp = svd->amp) != NULL) {
2777 2762 ASSERT(RW_READ_HELD(&->a_rwlock));
2778 2763 anon_index = svd->anon_index + seg_page(seg, addr);
2779 2764 anon_array_enter(amp, anon_index, &cookie);
2780 2765 anon_lock = 1;
2781 2766 }
2782 2767
2783 2768 if (svd->vp == NULL && amp != NULL) {
2784 2769 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL) {
2785 2770 /*
2786 2771 * Allocate a (normally) writable anonymous page of
2787 2772 * zeroes. If no advance reservations, reserve now.
2788 2773 */
2789 2774 if (svd->flags & MAP_NORESERVE) {
2790 2775 if (anon_resv_zone(ptob(1),
2791 2776 seg->s_as->a_proc->p_zone)) {
2792 2777 atomic_add_long(&svd->swresv, ptob(1));
2793 2778 atomic_add_long(&seg->s_as->a_resvsize,
2794 2779 ptob(1));
2795 2780 } else {
2796 2781 err = ENOMEM;
2797 2782 goto out;
2798 2783 }
2799 2784 }
2800 2785 if ((pp = anon_zero(seg, addr, &ap,
2801 2786 svd->cred)) == NULL) {
2802 2787 err = ENOMEM;
2803 2788 goto out; /* out of swap space */
2804 2789 }
2805 2790 /*
2806 2791 * Re-acquire the anon_map lock and
2807 2792 * initialize the anon array entry.
2808 2793 */
2809 2794 (void) anon_set_ptr(amp->ahp, anon_index, ap,
2810 2795 ANON_SLEEP);
2811 2796
2812 2797 ASSERT(pp->p_szc == 0);
2813 2798
2814 2799 /*
2815 2800 * Handle pages that have been marked for migration
2816 2801 */
2817 2802 if (lgrp_optimizations())
2818 2803 page_migrate(seg, addr, &pp, 1);
2819 2804
2820 2805 if (enable_mbit_wa) {
2821 2806 if (rw == S_WRITE)
2822 2807 hat_setmod(pp);
2823 2808 else if (!hat_ismod(pp))
2824 2809 prot &= ~PROT_WRITE;
2825 2810 }
2826 2811 /*
2827 2812 * If AS_PAGLCK is set in a_flags (via memcntl(2)
2828 2813 * with MC_LOCKAS, MCL_FUTURE) and this is a
2829 2814 * MAP_NORESERVE segment, we may need to
2830 2815 * permanently lock the page as it is being faulted
2831 2816 * for the first time. The following text applies
2832 2817 * only to MAP_NORESERVE segments:
2833 2818 *
2834 2819 * As per memcntl(2), if this segment was created
2835 2820 * after MCL_FUTURE was applied (a "future"
2836 2821 * segment), its pages must be locked. If this
2837 2822 * segment existed at MCL_FUTURE application (a
2838 2823 * "past" segment), the interface is unclear.
2839 2824 *
2840 2825 * We decide to lock only if vpage is present:
2841 2826 *
2842 2827 * - "future" segments will have a vpage array (see
2843 2828 * as_map), and so will be locked as required
2844 2829 *
2845 2830 * - "past" segments may not have a vpage array,
2846 2831 * depending on whether events (such as
2847 2832 * mprotect) have occurred. Locking if vpage
2848 2833 * exists will preserve legacy behavior. Not
2849 2834 * locking if vpage is absent, will not break
2850 2835 * the interface or legacy behavior. Note that
2851 2836 * allocating vpage here if it's absent requires
2852 2837 * upgrading the segvn reader lock, the cost of
2853 2838 * which does not seem worthwhile.
2854 2839 *
2855 2840 * Usually testing and setting VPP_ISPPLOCK and
2856 2841 * VPP_SETPPLOCK requires holding the segvn lock as
2857 2842 * writer, but in this case all readers are
2858 2843 * serializing on the anon array lock.
2859 2844 */
2860 2845 if (AS_ISPGLCK(seg->s_as) && vpage != NULL &&
2861 2846 (svd->flags & MAP_NORESERVE) &&
2862 2847 !VPP_ISPPLOCK(vpage)) {
2863 2848 proc_t *p = seg->s_as->a_proc;
2864 2849 ASSERT(svd->type == MAP_PRIVATE);
2865 2850 mutex_enter(&p->p_lock);
2866 2851 if (rctl_incr_locked_mem(p, NULL, PAGESIZE,
2867 2852 1) == 0) {
2868 2853 claim = VPP_PROT(vpage) & PROT_WRITE;
2869 2854 if (page_pp_lock(pp, claim, 0)) {
2870 2855 VPP_SETPPLOCK(vpage);
2871 2856 } else {
2872 2857 rctl_decr_locked_mem(p, NULL,
2873 2858 PAGESIZE, 1);
2874 2859 }
2875 2860 }
2876 2861 mutex_exit(&p->p_lock);
2877 2862 }
2878 2863
2879 2864 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
2880 2865 hat_memload(hat, addr, pp, prot, hat_flag);
2881 2866
2882 2867 if (!(hat_flag & HAT_LOAD_LOCK))
2883 2868 page_unlock(pp);
2884 2869
2885 2870 anon_array_exit(&cookie);
2886 2871 return (0);
2887 2872 }
2888 2873 }
2889 2874
2890 2875 /*
2891 2876 * Obtain the page structure via anon_getpage() if it is
2892 2877 * a private copy of an object (the result of a previous
2893 2878 * copy-on-write).
2894 2879 */
2895 2880 if (amp != NULL) {
2896 2881 if ((ap = anon_get_ptr(amp->ahp, anon_index)) != NULL) {
2897 2882 err = anon_getpage(&ap, &vpprot, anon_pl, PAGESIZE,
2898 2883 seg, addr, rw, svd->cred);
2899 2884 if (err)
2900 2885 goto out;
2901 2886
2902 2887 if (svd->type == MAP_SHARED) {
2903 2888 /*
2904 2889 * If this is a shared mapping to an
2905 2890 * anon_map, then ignore the write
2906 2891 * permissions returned by anon_getpage().
2907 2892 * They apply to the private mappings
2908 2893 * of this anon_map.
2909 2894 */
2910 2895 vpprot |= PROT_WRITE;
2911 2896 }
2912 2897 opp = anon_pl[0];
2913 2898 }
2914 2899 }
2915 2900
2916 2901 /*
2917 2902 * Search the pl[] list passed in if it is from the
2918 2903 * original object (i.e., not a private copy).
2919 2904 */
2920 2905 if (opp == NULL) {
2921 2906 /*
2922 2907 * Find original page. We must be bringing it in
2923 2908 * from the list in pl[].
2924 2909 */
2925 2910 for (ppp = pl; (opp = *ppp) != NULL; ppp++) {
2926 2911 if (opp == PAGE_HANDLED)
2927 2912 continue;
2928 2913 ASSERT(opp->p_vnode == svd->vp); /* XXX */
2929 2914 if (opp->p_offset == off)
2930 2915 break;
2931 2916 }
2932 2917 if (opp == NULL) {
2933 2918 panic("segvn_faultpage not found");
2934 2919 /*NOTREACHED*/
2935 2920 }
2936 2921 *ppp = PAGE_HANDLED;
2937 2922
2938 2923 }
2939 2924
2940 2925 ASSERT(PAGE_LOCKED(opp));
2941 2926
2942 2927 TRACE_3(TR_FAC_VM, TR_SEGVN_FAULT,
2943 2928 "segvn_fault:pp %p vp %p offset %llx", opp, NULL, 0);
2944 2929
2945 2930 /*
2946 2931 * The fault is treated as a copy-on-write fault if a
2947 2932 * write occurs on a private segment and the object
2948 2933 * page (i.e., mapping) is write protected. We assume
2949 2934 * that fatal protection checks have already been made.
2950 2935 */
2951 2936
2952 2937 if (brkcow) {
2953 2938 ASSERT(svd->tr_state == SEGVN_TR_OFF);
2954 2939 cow = !(vpprot & PROT_WRITE);
2955 2940 } else if (svd->tr_state == SEGVN_TR_ON) {
2956 2941 /*
2957 2942 * If we are doing text replication COW on first touch.
2958 2943 */
2959 2944 ASSERT(amp != NULL);
2960 2945 ASSERT(svd->vp != NULL);
2961 2946 ASSERT(rw != S_WRITE);
2962 2947 cow = (ap == NULL);
2963 2948 } else {
2964 2949 cow = 0;
2965 2950 }
2966 2951
2967 2952 /*
2968 2953 * If not a copy-on-write case load the translation
2969 2954 * and return.
2970 2955 */
2971 2956 if (cow == 0) {
2972 2957
2973 2958 /*
2974 2959 * Handle pages that have been marked for migration
2975 2960 */
2976 2961 if (lgrp_optimizations())
2977 2962 page_migrate(seg, addr, &opp, 1);
2978 2963
2979 2964 if (IS_VMODSORT(opp->p_vnode) || enable_mbit_wa) {
2980 2965 if (rw == S_WRITE)
2981 2966 hat_setmod(opp);
2982 2967 else if (rw != S_OTHER && !hat_ismod(opp))
2983 2968 prot &= ~PROT_WRITE;
2984 2969 }
2985 2970
2986 2971 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
2987 2972 (!svd->pageprot && svd->prot == (prot & vpprot)));
2988 2973 ASSERT(amp == NULL ||
2989 2974 svd->rcookie == HAT_INVALID_REGION_COOKIE);
2990 2975 hat_memload_region(hat, addr, opp, prot & vpprot, hat_flag,
2991 2976 svd->rcookie);
2992 2977
2993 2978 if (!(hat_flag & HAT_LOAD_LOCK))
2994 2979 page_unlock(opp);
2995 2980
2996 2981 if (anon_lock) {
2997 2982 anon_array_exit(&cookie);
2998 2983 }
2999 2984 return (0);
3000 2985 }
3001 2986
3002 2987 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3003 2988
3004 2989 hat_setref(opp);
3005 2990
3006 2991 ASSERT(amp != NULL && anon_lock);
3007 2992
3008 2993 /*
3009 2994 * Steal the page only if it isn't a private page
3010 2995 * since stealing a private page is not worth the effort.
3011 2996 */
3012 2997 if ((ap = anon_get_ptr(amp->ahp, anon_index)) == NULL)
3013 2998 steal = 1;
3014 2999
3015 3000 /*
3016 3001 * Steal the original page if the following conditions are true:
3017 3002 *
3018 3003 * We are low on memory, the page is not private, page is not large,
3019 3004 * not shared, not modified, not `locked' or if we have it `locked'
3020 3005 * (i.e., p_cowcnt == 1 and p_lckcnt == 0, which also implies
3021 3006 * that the page is not shared) and if it doesn't have any
3022 3007 * translations. page_struct_lock isn't needed to look at p_cowcnt
3023 3008 * and p_lckcnt because we first get exclusive lock on page.
3024 3009 */
3025 3010 (void) hat_pagesync(opp, HAT_SYNC_DONTZERO | HAT_SYNC_STOPON_MOD);
3026 3011
3027 3012 if (stealcow && freemem < minfree && steal && opp->p_szc == 0 &&
3028 3013 page_tryupgrade(opp) && !hat_ismod(opp) &&
3029 3014 ((opp->p_lckcnt == 0 && opp->p_cowcnt == 0) ||
3030 3015 (opp->p_lckcnt == 0 && opp->p_cowcnt == 1 &&
3031 3016 vpage != NULL && VPP_ISPPLOCK(vpage)))) {
3032 3017 /*
3033 3018 * Check if this page has other translations
3034 3019 * after unloading our translation.
3035 3020 */
3036 3021 if (hat_page_is_mapped(opp)) {
3037 3022 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3038 3023 hat_unload(seg->s_as->a_hat, addr, PAGESIZE,
3039 3024 HAT_UNLOAD);
3040 3025 }
3041 3026
3042 3027 /*
3043 3028 * hat_unload() might sync back someone else's recent
3044 3029 * modification, so check again.
3045 3030 */
3046 3031 if (!hat_ismod(opp) && !hat_page_is_mapped(opp))
3047 3032 pageflags |= STEAL_PAGE;
3048 3033 }
3049 3034
3050 3035 /*
3051 3036 * If we have a vpage pointer, see if it indicates that we have
3052 3037 * ``locked'' the page we map -- if so, tell anon_private to
3053 3038 * transfer the locking resource to the new page.
3054 3039 *
3055 3040 * See Statement at the beginning of segvn_lockop regarding
3056 3041 * the way lockcnts/cowcnts are handled during COW.
3057 3042 *
3058 3043 */
3059 3044 if (vpage != NULL && VPP_ISPPLOCK(vpage))
3060 3045 pageflags |= LOCK_PAGE;
3061 3046
3062 3047 /*
3063 3048 * Allocate a private page and perform the copy.
3064 3049 * For MAP_NORESERVE reserve swap space now, unless this
3065 3050 * is a cow fault on an existing anon page in which case
3066 3051 * MAP_NORESERVE will have made advance reservations.
3067 3052 */
3068 3053 if ((svd->flags & MAP_NORESERVE) && (ap == NULL)) {
3069 3054 if (anon_resv_zone(ptob(1), seg->s_as->a_proc->p_zone)) {
3070 3055 atomic_add_long(&svd->swresv, ptob(1));
3071 3056 atomic_add_long(&seg->s_as->a_resvsize, ptob(1));
3072 3057 } else {
3073 3058 page_unlock(opp);
3074 3059 err = ENOMEM;
3075 3060 goto out;
3076 3061 }
3077 3062 }
3078 3063 oldap = ap;
3079 3064 pp = anon_private(&ap, seg, addr, prot, opp, pageflags, svd->cred);
3080 3065 if (pp == NULL) {
3081 3066 err = ENOMEM; /* out of swap space */
3082 3067 goto out;
3083 3068 }
3084 3069
3085 3070 /*
3086 3071 * If we copied away from an anonymous page, then
3087 3072 * we are one step closer to freeing up an anon slot.
3088 3073 *
3089 3074 * NOTE: The original anon slot must be released while
3090 3075 * holding the "anon_map" lock. This is necessary to prevent
3091 3076 * other threads from obtaining a pointer to the anon slot
3092 3077 * which may be freed if its "refcnt" is 1.
3093 3078 */
3094 3079 if (oldap != NULL)
3095 3080 anon_decref(oldap);
3096 3081
3097 3082 (void) anon_set_ptr(amp->ahp, anon_index, ap, ANON_SLEEP);
3098 3083
3099 3084 /*
3100 3085 * Handle pages that have been marked for migration
3101 3086 */
3102 3087 if (lgrp_optimizations())
3103 3088 page_migrate(seg, addr, &pp, 1);
3104 3089
3105 3090 ASSERT(pp->p_szc == 0);
3106 3091
3107 3092 ASSERT(!IS_VMODSORT(pp->p_vnode));
3108 3093 if (enable_mbit_wa) {
3109 3094 if (rw == S_WRITE)
3110 3095 hat_setmod(pp);
3111 3096 else if (!hat_ismod(pp))
3112 3097 prot &= ~PROT_WRITE;
3113 3098 }
3114 3099
3115 3100 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
3116 3101 hat_memload(hat, addr, pp, prot, hat_flag);
3117 3102
3118 3103 if (!(hat_flag & HAT_LOAD_LOCK))
3119 3104 page_unlock(pp);
3120 3105
3121 3106 ASSERT(anon_lock);
3122 3107 anon_array_exit(&cookie);
3123 3108 return (0);
3124 3109 out:
3125 3110 if (anon_lock)
3126 3111 anon_array_exit(&cookie);
3127 3112
3128 3113 if (type == F_SOFTLOCK) {
3129 3114 atomic_dec_ulong((ulong_t *)&svd->softlockcnt);
3130 3115 }
3131 3116 return (FC_MAKE_ERR(err));
3132 3117 }
3133 3118
3134 3119 /*
3135 3120 * relocate a bunch of smaller targ pages into one large repl page. all targ
3136 3121 * pages must be complete pages smaller than replacement pages.
3137 3122 * it's assumed that no page's szc can change since they are all PAGESIZE or
3138 3123 * complete large pages locked SHARED.
3139 3124 */
3140 3125 static void
3141 3126 segvn_relocate_pages(page_t **targ, page_t *replacement)
3142 3127 {
3143 3128 page_t *pp;
3144 3129 pgcnt_t repl_npgs, curnpgs;
3145 3130 pgcnt_t i;
3146 3131 uint_t repl_szc = replacement->p_szc;
3147 3132 page_t *first_repl = replacement;
3148 3133 page_t *repl;
3149 3134 spgcnt_t npgs;
3150 3135
3151 3136 VM_STAT_ADD(segvnvmstats.relocatepages[0]);
3152 3137
3153 3138 ASSERT(repl_szc != 0);
3154 3139 npgs = repl_npgs = page_get_pagecnt(repl_szc);
3155 3140
3156 3141 i = 0;
3157 3142 while (repl_npgs) {
3158 3143 spgcnt_t nreloc;
3159 3144 int err;
3160 3145 ASSERT(replacement != NULL);
3161 3146 pp = targ[i];
3162 3147 ASSERT(pp->p_szc < repl_szc);
3163 3148 ASSERT(PAGE_EXCL(pp));
3164 3149 ASSERT(!PP_ISFREE(pp));
3165 3150 curnpgs = page_get_pagecnt(pp->p_szc);
3166 3151 if (curnpgs == 1) {
3167 3152 VM_STAT_ADD(segvnvmstats.relocatepages[1]);
3168 3153 repl = replacement;
3169 3154 page_sub(&replacement, repl);
3170 3155 ASSERT(PAGE_EXCL(repl));
3171 3156 ASSERT(!PP_ISFREE(repl));
3172 3157 ASSERT(repl->p_szc == repl_szc);
3173 3158 } else {
3174 3159 page_t *repl_savepp;
3175 3160 int j;
3176 3161 VM_STAT_ADD(segvnvmstats.relocatepages[2]);
3177 3162 repl_savepp = replacement;
3178 3163 for (j = 0; j < curnpgs; j++) {
3179 3164 repl = replacement;
3180 3165 page_sub(&replacement, repl);
3181 3166 ASSERT(PAGE_EXCL(repl));
3182 3167 ASSERT(!PP_ISFREE(repl));
3183 3168 ASSERT(repl->p_szc == repl_szc);
3184 3169 ASSERT(page_pptonum(targ[i + j]) ==
3185 3170 page_pptonum(targ[i]) + j);
3186 3171 }
3187 3172 repl = repl_savepp;
3188 3173 ASSERT(IS_P2ALIGNED(page_pptonum(repl), curnpgs));
3189 3174 }
3190 3175 err = page_relocate(&pp, &repl, 0, 1, &nreloc, NULL);
3191 3176 if (err || nreloc != curnpgs) {
3192 3177 panic("segvn_relocate_pages: "
3193 3178 "page_relocate failed err=%d curnpgs=%ld "
3194 3179 "nreloc=%ld", err, curnpgs, nreloc);
3195 3180 }
3196 3181 ASSERT(curnpgs <= repl_npgs);
3197 3182 repl_npgs -= curnpgs;
3198 3183 i += curnpgs;
3199 3184 }
3200 3185 ASSERT(replacement == NULL);
3201 3186
3202 3187 repl = first_repl;
3203 3188 repl_npgs = npgs;
3204 3189 for (i = 0; i < repl_npgs; i++) {
3205 3190 ASSERT(PAGE_EXCL(repl));
3206 3191 ASSERT(!PP_ISFREE(repl));
3207 3192 targ[i] = repl;
3208 3193 page_downgrade(targ[i]);
3209 3194 repl++;
3210 3195 }
3211 3196 }
3212 3197
3213 3198 /*
3214 3199 * Check if all pages in ppa array are complete smaller than szc pages and
3215 3200 * their roots will still be aligned relative to their current size if the
3216 3201 * entire ppa array is relocated into one szc page. If these conditions are
3217 3202 * not met return 0.
3218 3203 *
3219 3204 * If all pages are properly aligned attempt to upgrade their locks
3220 3205 * to exclusive mode. If it fails set *upgrdfail to 1 and return 0.
3221 3206 * upgrdfail was set to 0 by caller.
3222 3207 *
3223 3208 * Return 1 if all pages are aligned and locked exclusively.
3224 3209 *
3225 3210 * If all pages in ppa array happen to be physically contiguous to make one
3226 3211 * szc page and all exclusive locks are successfully obtained promote the page
3227 3212 * size to szc and set *pszc to szc. Return 1 with pages locked shared.
3228 3213 */
3229 3214 static int
3230 3215 segvn_full_szcpages(page_t **ppa, uint_t szc, int *upgrdfail, uint_t *pszc)
3231 3216 {
3232 3217 page_t *pp;
3233 3218 pfn_t pfn;
3234 3219 pgcnt_t totnpgs = page_get_pagecnt(szc);
3235 3220 pfn_t first_pfn;
3236 3221 int contig = 1;
3237 3222 pgcnt_t i;
3238 3223 pgcnt_t j;
3239 3224 uint_t curszc;
3240 3225 pgcnt_t curnpgs;
3241 3226 int root = 0;
3242 3227
3243 3228 ASSERT(szc > 0);
3244 3229
3245 3230 VM_STAT_ADD(segvnvmstats.fullszcpages[0]);
3246 3231
3247 3232 for (i = 0; i < totnpgs; i++) {
3248 3233 pp = ppa[i];
3249 3234 ASSERT(PAGE_SHARED(pp));
3250 3235 ASSERT(!PP_ISFREE(pp));
3251 3236 pfn = page_pptonum(pp);
3252 3237 if (i == 0) {
3253 3238 if (!IS_P2ALIGNED(pfn, totnpgs)) {
3254 3239 contig = 0;
3255 3240 } else {
3256 3241 first_pfn = pfn;
3257 3242 }
3258 3243 } else if (contig && pfn != first_pfn + i) {
3259 3244 contig = 0;
3260 3245 }
3261 3246 if (pp->p_szc == 0) {
3262 3247 if (root) {
3263 3248 VM_STAT_ADD(segvnvmstats.fullszcpages[1]);
3264 3249 return (0);
3265 3250 }
3266 3251 } else if (!root) {
3267 3252 if ((curszc = pp->p_szc) >= szc) {
3268 3253 VM_STAT_ADD(segvnvmstats.fullszcpages[2]);
3269 3254 return (0);
3270 3255 }
3271 3256 if (curszc == 0) {
3272 3257 /*
3273 3258 * p_szc changed means we don't have all pages
3274 3259 * locked. return failure.
3275 3260 */
3276 3261 VM_STAT_ADD(segvnvmstats.fullszcpages[3]);
3277 3262 return (0);
3278 3263 }
3279 3264 curnpgs = page_get_pagecnt(curszc);
3280 3265 if (!IS_P2ALIGNED(pfn, curnpgs) ||
3281 3266 !IS_P2ALIGNED(i, curnpgs)) {
3282 3267 VM_STAT_ADD(segvnvmstats.fullszcpages[4]);
3283 3268 return (0);
3284 3269 }
3285 3270 root = 1;
3286 3271 } else {
3287 3272 ASSERT(i > 0);
3288 3273 VM_STAT_ADD(segvnvmstats.fullszcpages[5]);
3289 3274 if (pp->p_szc != curszc) {
3290 3275 VM_STAT_ADD(segvnvmstats.fullszcpages[6]);
3291 3276 return (0);
3292 3277 }
3293 3278 if (pfn - 1 != page_pptonum(ppa[i - 1])) {
3294 3279 panic("segvn_full_szcpages: "
3295 3280 "large page not physically contiguous");
3296 3281 }
3297 3282 if (P2PHASE(pfn, curnpgs) == curnpgs - 1) {
3298 3283 root = 0;
3299 3284 }
3300 3285 }
3301 3286 }
3302 3287
3303 3288 for (i = 0; i < totnpgs; i++) {
3304 3289 ASSERT(ppa[i]->p_szc < szc);
3305 3290 if (!page_tryupgrade(ppa[i])) {
3306 3291 for (j = 0; j < i; j++) {
3307 3292 page_downgrade(ppa[j]);
3308 3293 }
3309 3294 *pszc = ppa[i]->p_szc;
3310 3295 *upgrdfail = 1;
3311 3296 VM_STAT_ADD(segvnvmstats.fullszcpages[7]);
3312 3297 return (0);
3313 3298 }
3314 3299 }
3315 3300
3316 3301 /*
3317 3302 * When a page is put a free cachelist its szc is set to 0. if file
3318 3303 * system reclaimed pages from cachelist targ pages will be physically
3319 3304 * contiguous with 0 p_szc. in this case just upgrade szc of targ
3320 3305 * pages without any relocations.
3321 3306 * To avoid any hat issues with previous small mappings
3322 3307 * hat_pageunload() the target pages first.
3323 3308 */
3324 3309 if (contig) {
3325 3310 VM_STAT_ADD(segvnvmstats.fullszcpages[8]);
3326 3311 for (i = 0; i < totnpgs; i++) {
3327 3312 (void) hat_pageunload(ppa[i], HAT_FORCE_PGUNLOAD);
3328 3313 }
3329 3314 for (i = 0; i < totnpgs; i++) {
3330 3315 ppa[i]->p_szc = szc;
3331 3316 }
3332 3317 for (i = 0; i < totnpgs; i++) {
3333 3318 ASSERT(PAGE_EXCL(ppa[i]));
3334 3319 page_downgrade(ppa[i]);
3335 3320 }
3336 3321 if (pszc != NULL) {
3337 3322 *pszc = szc;
3338 3323 }
3339 3324 }
3340 3325 VM_STAT_ADD(segvnvmstats.fullszcpages[9]);
3341 3326 return (1);
3342 3327 }
3343 3328
3344 3329 /*
3345 3330 * Create physically contiguous pages for [vp, off] - [vp, off +
3346 3331 * page_size(szc)) range and for private segment return them in ppa array.
3347 3332 * Pages are created either via IO or relocations.
3348 3333 *
3349 3334 * Return 1 on success and 0 on failure.
3350 3335 *
3351 3336 * If physically contiguous pages already exist for this range return 1 without
3352 3337 * filling ppa array. Caller initializes ppa[0] as NULL to detect that ppa
3353 3338 * array wasn't filled. In this case caller fills ppa array via VOP_GETPAGE().
3354 3339 */
3355 3340
3356 3341 static int
3357 3342 segvn_fill_vp_pages(struct segvn_data *svd, vnode_t *vp, u_offset_t off,
3358 3343 uint_t szc, page_t **ppa, page_t **ppplist, uint_t *ret_pszc,
3359 3344 int *downsize)
3360 3345
3361 3346 {
3362 3347 page_t *pplist = *ppplist;
3363 3348 size_t pgsz = page_get_pagesize(szc);
3364 3349 pgcnt_t pages = btop(pgsz);
3365 3350 ulong_t start_off = off;
3366 3351 u_offset_t eoff = off + pgsz;
3367 3352 spgcnt_t nreloc;
3368 3353 u_offset_t io_off = off;
3369 3354 size_t io_len;
3370 3355 page_t *io_pplist = NULL;
3371 3356 page_t *done_pplist = NULL;
3372 3357 pgcnt_t pgidx = 0;
3373 3358 page_t *pp;
3374 3359 page_t *newpp;
3375 3360 page_t *targpp;
3376 3361 int io_err = 0;
3377 3362 int i;
3378 3363 pfn_t pfn;
3379 3364 ulong_t ppages;
3380 3365 page_t *targ_pplist = NULL;
3381 3366 page_t *repl_pplist = NULL;
3382 3367 page_t *tmp_pplist;
3383 3368 int nios = 0;
3384 3369 uint_t pszc;
3385 3370 struct vattr va;
3386 3371
3387 3372 VM_STAT_ADD(segvnvmstats.fill_vp_pages[0]);
3388 3373
3389 3374 ASSERT(szc != 0);
3390 3375 ASSERT(pplist->p_szc == szc);
3391 3376
3392 3377 /*
3393 3378 * downsize will be set to 1 only if we fail to lock pages. this will
3394 3379 * allow subsequent faults to try to relocate the page again. If we
3395 3380 * fail due to misalignment don't downsize and let the caller map the
3396 3381 * whole region with small mappings to avoid more faults into the area
3397 3382 * where we can't get large pages anyway.
3398 3383 */
3399 3384 *downsize = 0;
3400 3385
3401 3386 while (off < eoff) {
3402 3387 newpp = pplist;
3403 3388 ASSERT(newpp != NULL);
3404 3389 ASSERT(PAGE_EXCL(newpp));
3405 3390 ASSERT(!PP_ISFREE(newpp));
3406 3391 /*
3407 3392 * we pass NULL for nrelocp to page_lookup_create()
3408 3393 * so that it doesn't relocate. We relocate here
3409 3394 * later only after we make sure we can lock all
3410 3395 * pages in the range we handle and they are all
3411 3396 * aligned.
3412 3397 */
3413 3398 pp = page_lookup_create(vp, off, SE_SHARED, newpp, NULL, 0);
3414 3399 ASSERT(pp != NULL);
3415 3400 ASSERT(!PP_ISFREE(pp));
3416 3401 ASSERT(pp->p_vnode == vp);
3417 3402 ASSERT(pp->p_offset == off);
3418 3403 if (pp == newpp) {
3419 3404 VM_STAT_ADD(segvnvmstats.fill_vp_pages[1]);
3420 3405 page_sub(&pplist, pp);
3421 3406 ASSERT(PAGE_EXCL(pp));
3422 3407 ASSERT(page_iolock_assert(pp));
3423 3408 page_list_concat(&io_pplist, &pp);
3424 3409 off += PAGESIZE;
3425 3410 continue;
3426 3411 }
3427 3412 VM_STAT_ADD(segvnvmstats.fill_vp_pages[2]);
3428 3413 pfn = page_pptonum(pp);
3429 3414 pszc = pp->p_szc;
3430 3415 if (pszc >= szc && targ_pplist == NULL && io_pplist == NULL &&
3431 3416 IS_P2ALIGNED(pfn, pages)) {
3432 3417 ASSERT(repl_pplist == NULL);
3433 3418 ASSERT(done_pplist == NULL);
3434 3419 ASSERT(pplist == *ppplist);
3435 3420 page_unlock(pp);
3436 3421 page_free_replacement_page(pplist);
3437 3422 page_create_putback(pages);
3438 3423 *ppplist = NULL;
3439 3424 VM_STAT_ADD(segvnvmstats.fill_vp_pages[3]);
3440 3425 return (1);
3441 3426 }
3442 3427 if (pszc >= szc) {
3443 3428 page_unlock(pp);
3444 3429 segvn_faultvnmpss_align_err1++;
3445 3430 goto out;
3446 3431 }
3447 3432 ppages = page_get_pagecnt(pszc);
3448 3433 if (!IS_P2ALIGNED(pfn, ppages)) {
3449 3434 ASSERT(pszc > 0);
3450 3435 /*
3451 3436 * sizing down to pszc won't help.
3452 3437 */
3453 3438 page_unlock(pp);
3454 3439 segvn_faultvnmpss_align_err2++;
3455 3440 goto out;
3456 3441 }
3457 3442 pfn = page_pptonum(newpp);
3458 3443 if (!IS_P2ALIGNED(pfn, ppages)) {
3459 3444 ASSERT(pszc > 0);
3460 3445 /*
3461 3446 * sizing down to pszc won't help.
3462 3447 */
3463 3448 page_unlock(pp);
3464 3449 segvn_faultvnmpss_align_err3++;
3465 3450 goto out;
3466 3451 }
3467 3452 if (!PAGE_EXCL(pp)) {
3468 3453 VM_STAT_ADD(segvnvmstats.fill_vp_pages[4]);
3469 3454 page_unlock(pp);
3470 3455 *downsize = 1;
3471 3456 *ret_pszc = pp->p_szc;
3472 3457 goto out;
3473 3458 }
3474 3459 targpp = pp;
3475 3460 if (io_pplist != NULL) {
3476 3461 VM_STAT_ADD(segvnvmstats.fill_vp_pages[5]);
3477 3462 io_len = off - io_off;
3478 3463 /*
3479 3464 * Some file systems like NFS don't check EOF
3480 3465 * conditions in VOP_PAGEIO(). Check it here
3481 3466 * now that pages are locked SE_EXCL. Any file
3482 3467 * truncation will wait until the pages are
3483 3468 * unlocked so no need to worry that file will
3484 3469 * be truncated after we check its size here.
3485 3470 * XXX fix NFS to remove this check.
3486 3471 */
3487 3472 va.va_mask = AT_SIZE;
3488 3473 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL)) {
3489 3474 VM_STAT_ADD(segvnvmstats.fill_vp_pages[6]);
3490 3475 page_unlock(targpp);
3491 3476 goto out;
3492 3477 }
3493 3478 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3494 3479 VM_STAT_ADD(segvnvmstats.fill_vp_pages[7]);
3495 3480 *downsize = 1;
3496 3481 *ret_pszc = 0;
3497 3482 page_unlock(targpp);
3498 3483 goto out;
3499 3484 }
3500 3485 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3501 3486 B_READ, svd->cred, NULL);
3502 3487 if (io_err) {
3503 3488 VM_STAT_ADD(segvnvmstats.fill_vp_pages[8]);
3504 3489 page_unlock(targpp);
3505 3490 if (io_err == EDEADLK) {
3506 3491 segvn_vmpss_pageio_deadlk_err++;
3507 3492 }
3508 3493 goto out;
3509 3494 }
3510 3495 nios++;
3511 3496 VM_STAT_ADD(segvnvmstats.fill_vp_pages[9]);
3512 3497 while (io_pplist != NULL) {
3513 3498 pp = io_pplist;
3514 3499 page_sub(&io_pplist, pp);
3515 3500 ASSERT(page_iolock_assert(pp));
3516 3501 page_io_unlock(pp);
3517 3502 pgidx = (pp->p_offset - start_off) >>
3518 3503 PAGESHIFT;
3519 3504 ASSERT(pgidx < pages);
3520 3505 ppa[pgidx] = pp;
3521 3506 page_list_concat(&done_pplist, &pp);
3522 3507 }
3523 3508 }
3524 3509 pp = targpp;
3525 3510 ASSERT(PAGE_EXCL(pp));
3526 3511 ASSERT(pp->p_szc <= pszc);
3527 3512 if (pszc != 0 && !group_page_trylock(pp, SE_EXCL)) {
3528 3513 VM_STAT_ADD(segvnvmstats.fill_vp_pages[10]);
3529 3514 page_unlock(pp);
3530 3515 *downsize = 1;
3531 3516 *ret_pszc = pp->p_szc;
3532 3517 goto out;
3533 3518 }
3534 3519 VM_STAT_ADD(segvnvmstats.fill_vp_pages[11]);
3535 3520 /*
3536 3521 * page szc chould have changed before the entire group was
3537 3522 * locked. reread page szc.
3538 3523 */
3539 3524 pszc = pp->p_szc;
3540 3525 ppages = page_get_pagecnt(pszc);
3541 3526
3542 3527 /* link just the roots */
3543 3528 page_list_concat(&targ_pplist, &pp);
3544 3529 page_sub(&pplist, newpp);
3545 3530 page_list_concat(&repl_pplist, &newpp);
3546 3531 off += PAGESIZE;
3547 3532 while (--ppages != 0) {
3548 3533 newpp = pplist;
3549 3534 page_sub(&pplist, newpp);
3550 3535 off += PAGESIZE;
3551 3536 }
3552 3537 io_off = off;
3553 3538 }
3554 3539 if (io_pplist != NULL) {
3555 3540 VM_STAT_ADD(segvnvmstats.fill_vp_pages[12]);
3556 3541 io_len = eoff - io_off;
3557 3542 va.va_mask = AT_SIZE;
3558 3543 if (VOP_GETATTR(vp, &va, ATTR_HINT, svd->cred, NULL) != 0) {
3559 3544 VM_STAT_ADD(segvnvmstats.fill_vp_pages[13]);
3560 3545 goto out;
3561 3546 }
3562 3547 if (btopr(va.va_size) < btopr(io_off + io_len)) {
3563 3548 VM_STAT_ADD(segvnvmstats.fill_vp_pages[14]);
3564 3549 *downsize = 1;
3565 3550 *ret_pszc = 0;
3566 3551 goto out;
3567 3552 }
3568 3553 io_err = VOP_PAGEIO(vp, io_pplist, io_off, io_len,
3569 3554 B_READ, svd->cred, NULL);
3570 3555 if (io_err) {
3571 3556 VM_STAT_ADD(segvnvmstats.fill_vp_pages[15]);
3572 3557 if (io_err == EDEADLK) {
3573 3558 segvn_vmpss_pageio_deadlk_err++;
3574 3559 }
3575 3560 goto out;
3576 3561 }
3577 3562 nios++;
3578 3563 while (io_pplist != NULL) {
3579 3564 pp = io_pplist;
3580 3565 page_sub(&io_pplist, pp);
3581 3566 ASSERT(page_iolock_assert(pp));
3582 3567 page_io_unlock(pp);
3583 3568 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3584 3569 ASSERT(pgidx < pages);
3585 3570 ppa[pgidx] = pp;
3586 3571 }
3587 3572 }
3588 3573 /*
3589 3574 * we're now bound to succeed or panic.
3590 3575 * remove pages from done_pplist. it's not needed anymore.
3591 3576 */
3592 3577 while (done_pplist != NULL) {
3593 3578 pp = done_pplist;
3594 3579 page_sub(&done_pplist, pp);
3595 3580 }
3596 3581 VM_STAT_ADD(segvnvmstats.fill_vp_pages[16]);
3597 3582 ASSERT(pplist == NULL);
3598 3583 *ppplist = NULL;
3599 3584 while (targ_pplist != NULL) {
3600 3585 int ret;
3601 3586 VM_STAT_ADD(segvnvmstats.fill_vp_pages[17]);
3602 3587 ASSERT(repl_pplist);
3603 3588 pp = targ_pplist;
3604 3589 page_sub(&targ_pplist, pp);
3605 3590 pgidx = (pp->p_offset - start_off) >> PAGESHIFT;
3606 3591 newpp = repl_pplist;
3607 3592 page_sub(&repl_pplist, newpp);
3608 3593 #ifdef DEBUG
3609 3594 pfn = page_pptonum(pp);
3610 3595 pszc = pp->p_szc;
3611 3596 ppages = page_get_pagecnt(pszc);
3612 3597 ASSERT(IS_P2ALIGNED(pfn, ppages));
3613 3598 pfn = page_pptonum(newpp);
3614 3599 ASSERT(IS_P2ALIGNED(pfn, ppages));
3615 3600 ASSERT(P2PHASE(pfn, pages) == pgidx);
3616 3601 #endif
3617 3602 nreloc = 0;
3618 3603 ret = page_relocate(&pp, &newpp, 0, 1, &nreloc, NULL);
3619 3604 if (ret != 0 || nreloc == 0) {
3620 3605 panic("segvn_fill_vp_pages: "
3621 3606 "page_relocate failed");
3622 3607 }
3623 3608 pp = newpp;
3624 3609 while (nreloc-- != 0) {
3625 3610 ASSERT(PAGE_EXCL(pp));
3626 3611 ASSERT(pp->p_vnode == vp);
3627 3612 ASSERT(pgidx ==
3628 3613 ((pp->p_offset - start_off) >> PAGESHIFT));
3629 3614 ppa[pgidx++] = pp;
3630 3615 pp++;
3631 3616 }
3632 3617 }
3633 3618
3634 3619 if (svd->type == MAP_PRIVATE) {
3635 3620 VM_STAT_ADD(segvnvmstats.fill_vp_pages[18]);
3636 3621 for (i = 0; i < pages; i++) {
3637 3622 ASSERT(ppa[i] != NULL);
3638 3623 ASSERT(PAGE_EXCL(ppa[i]));
3639 3624 ASSERT(ppa[i]->p_vnode == vp);
3640 3625 ASSERT(ppa[i]->p_offset ==
3641 3626 start_off + (i << PAGESHIFT));
3642 3627 page_downgrade(ppa[i]);
3643 3628 }
3644 3629 ppa[pages] = NULL;
3645 3630 } else {
3646 3631 VM_STAT_ADD(segvnvmstats.fill_vp_pages[19]);
3647 3632 /*
3648 3633 * the caller will still call VOP_GETPAGE() for shared segments
3649 3634 * to check FS write permissions. For private segments we map
3650 3635 * file read only anyway. so no VOP_GETPAGE is needed.
3651 3636 */
3652 3637 for (i = 0; i < pages; i++) {
3653 3638 ASSERT(ppa[i] != NULL);
3654 3639 ASSERT(PAGE_EXCL(ppa[i]));
3655 3640 ASSERT(ppa[i]->p_vnode == vp);
3656 3641 ASSERT(ppa[i]->p_offset ==
3657 3642 start_off + (i << PAGESHIFT));
3658 3643 page_unlock(ppa[i]);
3659 3644 }
3660 3645 ppa[0] = NULL;
3661 3646 }
3662 3647
3663 3648 return (1);
3664 3649 out:
3665 3650 /*
3666 3651 * Do the cleanup. Unlock target pages we didn't relocate. They are
3667 3652 * linked on targ_pplist by root pages. reassemble unused replacement
3668 3653 * and io pages back to pplist.
3669 3654 */
3670 3655 if (io_pplist != NULL) {
3671 3656 VM_STAT_ADD(segvnvmstats.fill_vp_pages[20]);
3672 3657 pp = io_pplist;
3673 3658 do {
3674 3659 ASSERT(pp->p_vnode == vp);
3675 3660 ASSERT(pp->p_offset == io_off);
3676 3661 ASSERT(page_iolock_assert(pp));
3677 3662 page_io_unlock(pp);
3678 3663 page_hashout(pp, NULL);
3679 3664 io_off += PAGESIZE;
3680 3665 } while ((pp = pp->p_next) != io_pplist);
3681 3666 page_list_concat(&io_pplist, &pplist);
3682 3667 pplist = io_pplist;
3683 3668 }
3684 3669 tmp_pplist = NULL;
3685 3670 while (targ_pplist != NULL) {
3686 3671 VM_STAT_ADD(segvnvmstats.fill_vp_pages[21]);
3687 3672 pp = targ_pplist;
3688 3673 ASSERT(PAGE_EXCL(pp));
3689 3674 page_sub(&targ_pplist, pp);
3690 3675
3691 3676 pszc = pp->p_szc;
3692 3677 ppages = page_get_pagecnt(pszc);
3693 3678 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3694 3679
3695 3680 if (pszc != 0) {
3696 3681 group_page_unlock(pp);
3697 3682 }
3698 3683 page_unlock(pp);
3699 3684
3700 3685 pp = repl_pplist;
3701 3686 ASSERT(pp != NULL);
3702 3687 ASSERT(PAGE_EXCL(pp));
3703 3688 ASSERT(pp->p_szc == szc);
3704 3689 page_sub(&repl_pplist, pp);
3705 3690
3706 3691 ASSERT(IS_P2ALIGNED(page_pptonum(pp), ppages));
3707 3692
3708 3693 /* relink replacement page */
3709 3694 page_list_concat(&tmp_pplist, &pp);
3710 3695 while (--ppages != 0) {
3711 3696 VM_STAT_ADD(segvnvmstats.fill_vp_pages[22]);
3712 3697 pp++;
3713 3698 ASSERT(PAGE_EXCL(pp));
3714 3699 ASSERT(pp->p_szc == szc);
3715 3700 page_list_concat(&tmp_pplist, &pp);
3716 3701 }
3717 3702 }
3718 3703 if (tmp_pplist != NULL) {
3719 3704 VM_STAT_ADD(segvnvmstats.fill_vp_pages[23]);
3720 3705 page_list_concat(&tmp_pplist, &pplist);
3721 3706 pplist = tmp_pplist;
3722 3707 }
3723 3708 /*
3724 3709 * at this point all pages are either on done_pplist or
3725 3710 * pplist. They can't be all on done_pplist otherwise
3726 3711 * we'd've been done.
3727 3712 */
3728 3713 ASSERT(pplist != NULL);
3729 3714 if (nios != 0) {
3730 3715 VM_STAT_ADD(segvnvmstats.fill_vp_pages[24]);
3731 3716 pp = pplist;
3732 3717 do {
3733 3718 VM_STAT_ADD(segvnvmstats.fill_vp_pages[25]);
3734 3719 ASSERT(pp->p_szc == szc);
3735 3720 ASSERT(PAGE_EXCL(pp));
3736 3721 ASSERT(pp->p_vnode != vp);
3737 3722 pp->p_szc = 0;
3738 3723 } while ((pp = pp->p_next) != pplist);
3739 3724
3740 3725 pp = done_pplist;
3741 3726 do {
3742 3727 VM_STAT_ADD(segvnvmstats.fill_vp_pages[26]);
3743 3728 ASSERT(pp->p_szc == szc);
3744 3729 ASSERT(PAGE_EXCL(pp));
3745 3730 ASSERT(pp->p_vnode == vp);
3746 3731 pp->p_szc = 0;
3747 3732 } while ((pp = pp->p_next) != done_pplist);
3748 3733
3749 3734 while (pplist != NULL) {
3750 3735 VM_STAT_ADD(segvnvmstats.fill_vp_pages[27]);
3751 3736 pp = pplist;
3752 3737 page_sub(&pplist, pp);
3753 3738 page_free(pp, 0);
3754 3739 }
3755 3740
3756 3741 while (done_pplist != NULL) {
3757 3742 VM_STAT_ADD(segvnvmstats.fill_vp_pages[28]);
3758 3743 pp = done_pplist;
3759 3744 page_sub(&done_pplist, pp);
3760 3745 page_unlock(pp);
3761 3746 }
3762 3747 *ppplist = NULL;
3763 3748 return (0);
3764 3749 }
3765 3750 ASSERT(pplist == *ppplist);
3766 3751 if (io_err) {
3767 3752 VM_STAT_ADD(segvnvmstats.fill_vp_pages[29]);
3768 3753 /*
3769 3754 * don't downsize on io error.
3770 3755 * see if vop_getpage succeeds.
3771 3756 * pplist may still be used in this case
3772 3757 * for relocations.
3773 3758 */
3774 3759 return (0);
3775 3760 }
3776 3761 VM_STAT_ADD(segvnvmstats.fill_vp_pages[30]);
3777 3762 page_free_replacement_page(pplist);
3778 3763 page_create_putback(pages);
3779 3764 *ppplist = NULL;
3780 3765 return (0);
3781 3766 }
3782 3767
3783 3768 int segvn_anypgsz = 0;
3784 3769
3785 3770 #define SEGVN_RESTORE_SOFTLOCK_VP(type, pages) \
3786 3771 if ((type) == F_SOFTLOCK) { \
3787 3772 atomic_add_long((ulong_t *)&(svd)->softlockcnt, \
3788 3773 -(pages)); \
3789 3774 }
3790 3775
3791 3776 #define SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot) \
3792 3777 if (IS_VMODSORT((ppa)[0]->p_vnode)) { \
3793 3778 if ((rw) == S_WRITE) { \
3794 3779 for (i = 0; i < (pages); i++) { \
3795 3780 ASSERT((ppa)[i]->p_vnode == \
3796 3781 (ppa)[0]->p_vnode); \
3797 3782 hat_setmod((ppa)[i]); \
3798 3783 } \
3799 3784 } else if ((rw) != S_OTHER && \
3800 3785 ((prot) & (vpprot) & PROT_WRITE)) { \
3801 3786 for (i = 0; i < (pages); i++) { \
3802 3787 ASSERT((ppa)[i]->p_vnode == \
3803 3788 (ppa)[0]->p_vnode); \
3804 3789 if (!hat_ismod((ppa)[i])) { \
3805 3790 prot &= ~PROT_WRITE; \
3806 3791 break; \
3807 3792 } \
3808 3793 } \
3809 3794 } \
3810 3795 }
3811 3796
3812 3797 #ifdef VM_STATS
3813 3798
3814 3799 #define SEGVN_VMSTAT_FLTVNPAGES(idx) \
3815 3800 VM_STAT_ADD(segvnvmstats.fltvnpages[(idx)]);
3816 3801
3817 3802 #else /* VM_STATS */
3818 3803
3819 3804 #define SEGVN_VMSTAT_FLTVNPAGES(idx)
3820 3805
3821 3806 #endif
3822 3807
3823 3808 static faultcode_t
3824 3809 segvn_fault_vnodepages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
3825 3810 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
3826 3811 caddr_t eaddr, int brkcow)
3827 3812 {
3828 3813 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
3829 3814 struct anon_map *amp = svd->amp;
3830 3815 uchar_t segtype = svd->type;
3831 3816 uint_t szc = seg->s_szc;
3832 3817 size_t pgsz = page_get_pagesize(szc);
3833 3818 size_t maxpgsz = pgsz;
3834 3819 pgcnt_t pages = btop(pgsz);
3835 3820 pgcnt_t maxpages = pages;
3836 3821 size_t ppasize = (pages + 1) * sizeof (page_t *);
3837 3822 caddr_t a = lpgaddr;
3838 3823 caddr_t maxlpgeaddr = lpgeaddr;
3839 3824 u_offset_t off = svd->offset + (uintptr_t)(a - seg->s_base);
3840 3825 ulong_t aindx = svd->anon_index + seg_page(seg, a);
3841 3826 struct vpage *vpage = (svd->vpage != NULL) ?
3842 3827 &svd->vpage[seg_page(seg, a)] : NULL;
3843 3828 vnode_t *vp = svd->vp;
3844 3829 page_t **ppa;
3845 3830 uint_t pszc;
3846 3831 size_t ppgsz;
3847 3832 pgcnt_t ppages;
3848 3833 faultcode_t err = 0;
↓ open down ↓ |
3674 lines elided |
↑ open up ↑ |
3849 3834 int ierr;
3850 3835 int vop_size_err = 0;
3851 3836 uint_t protchk, prot, vpprot;
3852 3837 ulong_t i;
3853 3838 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
3854 3839 anon_sync_obj_t an_cookie;
3855 3840 enum seg_rw arw;
3856 3841 int alloc_failed = 0;
3857 3842 int adjszc_chk;
3858 3843 struct vattr va;
3859 - int xhat = 0;
3860 3844 page_t *pplist;
3861 3845 pfn_t pfn;
3862 3846 int physcontig;
3863 3847 int upgrdfail;
3864 3848 int segvn_anypgsz_vnode = 0; /* for now map vnode with 2 page sizes */
3865 3849 int tron = (svd->tr_state == SEGVN_TR_ON);
3866 3850
3867 3851 ASSERT(szc != 0);
3868 3852 ASSERT(vp != NULL);
3869 3853 ASSERT(brkcow == 0 || amp != NULL);
3870 3854 ASSERT(tron == 0 || amp != NULL);
3871 3855 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
3872 3856 ASSERT(!(svd->flags & MAP_NORESERVE));
3873 3857 ASSERT(type != F_SOFTUNLOCK);
3874 3858 ASSERT(IS_P2ALIGNED(a, maxpgsz));
3875 3859 ASSERT(amp == NULL || IS_P2ALIGNED(aindx, maxpages));
3876 3860 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
3877 3861 ASSERT(seg->s_szc < NBBY * sizeof (int));
3878 3862 ASSERT(type != F_SOFTLOCK || lpgeaddr - a == maxpgsz);
3879 3863 ASSERT(svd->tr_state != SEGVN_TR_INIT);
3880 3864
3881 3865 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltvnpages[0]);
3882 3866 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltvnpages[1]);
3883 3867
3884 3868 if (svd->flags & MAP_TEXT) {
3885 3869 hat_flag |= HAT_LOAD_TEXT;
3886 3870 }
3887 3871
3888 3872 if (svd->pageprot) {
3889 3873 switch (rw) {
3890 3874 case S_READ:
3891 3875 protchk = PROT_READ;
3892 3876 break;
3893 3877 case S_WRITE:
3894 3878 protchk = PROT_WRITE;
3895 3879 break;
3896 3880 case S_EXEC:
3897 3881 protchk = PROT_EXEC;
3898 3882 break;
↓ open down ↓ |
29 lines elided |
↑ open up ↑ |
3899 3883 case S_OTHER:
3900 3884 default:
3901 3885 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
3902 3886 break;
3903 3887 }
3904 3888 } else {
3905 3889 prot = svd->prot;
3906 3890 /* caller has already done segment level protection check. */
3907 3891 }
3908 3892
3909 - if (seg->s_as->a_hat != hat) {
3910 - xhat = 1;
3911 - }
3912 -
3913 3893 if (rw == S_WRITE && segtype == MAP_PRIVATE) {
3914 3894 SEGVN_VMSTAT_FLTVNPAGES(2);
3915 3895 arw = S_READ;
3916 3896 } else {
3917 3897 arw = rw;
3918 3898 }
3919 3899
3920 3900 ppa = kmem_alloc(ppasize, KM_SLEEP);
3921 3901
3922 3902 VM_STAT_COND_ADD(amp != NULL, segvnvmstats.fltvnpages[3]);
3923 3903
3924 3904 for (;;) {
3925 3905 adjszc_chk = 0;
3926 3906 for (; a < lpgeaddr; a += pgsz, off += pgsz, aindx += pages) {
3927 3907 if (adjszc_chk) {
3928 3908 while (szc < seg->s_szc) {
3929 3909 uintptr_t e;
3930 3910 uint_t tszc;
3931 3911 tszc = segvn_anypgsz_vnode ? szc + 1 :
3932 3912 seg->s_szc;
3933 3913 ppgsz = page_get_pagesize(tszc);
3934 3914 if (!IS_P2ALIGNED(a, ppgsz) ||
3935 3915 ((alloc_failed >> tszc) & 0x1)) {
3936 3916 break;
3937 3917 }
3938 3918 SEGVN_VMSTAT_FLTVNPAGES(4);
3939 3919 szc = tszc;
3940 3920 pgsz = ppgsz;
3941 3921 pages = btop(pgsz);
3942 3922 e = P2ROUNDUP((uintptr_t)eaddr, pgsz);
3943 3923 lpgeaddr = (caddr_t)e;
3944 3924 }
3945 3925 }
3946 3926
3947 3927 again:
3948 3928 if (IS_P2ALIGNED(a, maxpgsz) && amp != NULL) {
3949 3929 ASSERT(IS_P2ALIGNED(aindx, maxpages));
3950 3930 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
3951 3931 anon_array_enter(amp, aindx, &an_cookie);
3952 3932 if (anon_get_ptr(amp->ahp, aindx) != NULL) {
3953 3933 SEGVN_VMSTAT_FLTVNPAGES(5);
3954 3934 ASSERT(anon_pages(amp->ahp, aindx,
3955 3935 maxpages) == maxpages);
3956 3936 anon_array_exit(&an_cookie);
3957 3937 ANON_LOCK_EXIT(&->a_rwlock);
3958 3938 err = segvn_fault_anonpages(hat, seg,
3959 3939 a, a + maxpgsz, type, rw,
3960 3940 MAX(a, addr),
3961 3941 MIN(a + maxpgsz, eaddr), brkcow);
3962 3942 if (err != 0) {
3963 3943 SEGVN_VMSTAT_FLTVNPAGES(6);
3964 3944 goto out;
3965 3945 }
3966 3946 if (szc < seg->s_szc) {
3967 3947 szc = seg->s_szc;
3968 3948 pgsz = maxpgsz;
3969 3949 pages = maxpages;
3970 3950 lpgeaddr = maxlpgeaddr;
3971 3951 }
3972 3952 goto next;
3973 3953 } else {
3974 3954 ASSERT(anon_pages(amp->ahp, aindx,
3975 3955 maxpages) == 0);
3976 3956 SEGVN_VMSTAT_FLTVNPAGES(7);
3977 3957 anon_array_exit(&an_cookie);
3978 3958 ANON_LOCK_EXIT(&->a_rwlock);
3979 3959 }
3980 3960 }
3981 3961 ASSERT(!brkcow || IS_P2ALIGNED(a, maxpgsz));
3982 3962 ASSERT(!tron || IS_P2ALIGNED(a, maxpgsz));
3983 3963
3984 3964 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
3985 3965 ASSERT(vpage != NULL);
3986 3966 prot = VPP_PROT(vpage);
3987 3967 ASSERT(sameprot(seg, a, maxpgsz));
3988 3968 if ((prot & protchk) == 0) {
3989 3969 SEGVN_VMSTAT_FLTVNPAGES(8);
3990 3970 err = FC_PROT;
3991 3971 goto out;
3992 3972 }
3993 3973 }
3994 3974 if (type == F_SOFTLOCK) {
3995 3975 atomic_add_long((ulong_t *)&svd->softlockcnt,
3996 3976 pages);
3997 3977 }
3998 3978
3999 3979 pplist = NULL;
4000 3980 physcontig = 0;
4001 3981 ppa[0] = NULL;
4002 3982 if (!brkcow && !tron && szc &&
4003 3983 !page_exists_physcontig(vp, off, szc,
4004 3984 segtype == MAP_PRIVATE ? ppa : NULL)) {
4005 3985 SEGVN_VMSTAT_FLTVNPAGES(9);
4006 3986 if (page_alloc_pages(vp, seg, a, &pplist, NULL,
4007 3987 szc, 0, 0) && type != F_SOFTLOCK) {
4008 3988 SEGVN_VMSTAT_FLTVNPAGES(10);
4009 3989 pszc = 0;
4010 3990 ierr = -1;
4011 3991 alloc_failed |= (1 << szc);
4012 3992 break;
4013 3993 }
4014 3994 if (pplist != NULL &&
4015 3995 vp->v_mpssdata == SEGVN_PAGEIO) {
4016 3996 int downsize;
4017 3997 SEGVN_VMSTAT_FLTVNPAGES(11);
4018 3998 physcontig = segvn_fill_vp_pages(svd,
4019 3999 vp, off, szc, ppa, &pplist,
4020 4000 &pszc, &downsize);
4021 4001 ASSERT(!physcontig || pplist == NULL);
4022 4002 if (!physcontig && downsize &&
4023 4003 type != F_SOFTLOCK) {
4024 4004 ASSERT(pplist == NULL);
4025 4005 SEGVN_VMSTAT_FLTVNPAGES(12);
4026 4006 ierr = -1;
4027 4007 break;
4028 4008 }
4029 4009 ASSERT(!physcontig ||
4030 4010 segtype == MAP_PRIVATE ||
4031 4011 ppa[0] == NULL);
4032 4012 if (physcontig && ppa[0] == NULL) {
4033 4013 physcontig = 0;
4034 4014 }
4035 4015 }
4036 4016 } else if (!brkcow && !tron && szc && ppa[0] != NULL) {
4037 4017 SEGVN_VMSTAT_FLTVNPAGES(13);
4038 4018 ASSERT(segtype == MAP_PRIVATE);
4039 4019 physcontig = 1;
4040 4020 }
4041 4021
4042 4022 if (!physcontig) {
4043 4023 SEGVN_VMSTAT_FLTVNPAGES(14);
4044 4024 ppa[0] = NULL;
4045 4025 ierr = VOP_GETPAGE(vp, (offset_t)off, pgsz,
4046 4026 &vpprot, ppa, pgsz, seg, a, arw,
4047 4027 svd->cred, NULL);
4048 4028 #ifdef DEBUG
4049 4029 if (ierr == 0) {
4050 4030 for (i = 0; i < pages; i++) {
4051 4031 ASSERT(PAGE_LOCKED(ppa[i]));
4052 4032 ASSERT(!PP_ISFREE(ppa[i]));
4053 4033 ASSERT(ppa[i]->p_vnode == vp);
4054 4034 ASSERT(ppa[i]->p_offset ==
4055 4035 off + (i << PAGESHIFT));
4056 4036 }
4057 4037 }
4058 4038 #endif /* DEBUG */
4059 4039 if (segtype == MAP_PRIVATE) {
4060 4040 SEGVN_VMSTAT_FLTVNPAGES(15);
4061 4041 vpprot &= ~PROT_WRITE;
4062 4042 }
4063 4043 } else {
4064 4044 ASSERT(segtype == MAP_PRIVATE);
4065 4045 SEGVN_VMSTAT_FLTVNPAGES(16);
4066 4046 vpprot = PROT_ALL & ~PROT_WRITE;
4067 4047 ierr = 0;
4068 4048 }
4069 4049
4070 4050 if (ierr != 0) {
4071 4051 SEGVN_VMSTAT_FLTVNPAGES(17);
4072 4052 if (pplist != NULL) {
4073 4053 SEGVN_VMSTAT_FLTVNPAGES(18);
4074 4054 page_free_replacement_page(pplist);
4075 4055 page_create_putback(pages);
4076 4056 }
4077 4057 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4078 4058 if (a + pgsz <= eaddr) {
4079 4059 SEGVN_VMSTAT_FLTVNPAGES(19);
4080 4060 err = FC_MAKE_ERR(ierr);
4081 4061 goto out;
4082 4062 }
4083 4063 va.va_mask = AT_SIZE;
4084 4064 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL)) {
4085 4065 SEGVN_VMSTAT_FLTVNPAGES(20);
4086 4066 err = FC_MAKE_ERR(EIO);
4087 4067 goto out;
4088 4068 }
4089 4069 if (btopr(va.va_size) >= btopr(off + pgsz)) {
4090 4070 SEGVN_VMSTAT_FLTVNPAGES(21);
4091 4071 err = FC_MAKE_ERR(ierr);
4092 4072 goto out;
4093 4073 }
4094 4074 if (btopr(va.va_size) <
4095 4075 btopr(off + (eaddr - a))) {
4096 4076 SEGVN_VMSTAT_FLTVNPAGES(22);
4097 4077 err = FC_MAKE_ERR(ierr);
4098 4078 goto out;
4099 4079 }
4100 4080 if (brkcow || tron || type == F_SOFTLOCK) {
4101 4081 /* can't reduce map area */
4102 4082 SEGVN_VMSTAT_FLTVNPAGES(23);
4103 4083 vop_size_err = 1;
4104 4084 goto out;
4105 4085 }
4106 4086 SEGVN_VMSTAT_FLTVNPAGES(24);
4107 4087 ASSERT(szc != 0);
4108 4088 pszc = 0;
4109 4089 ierr = -1;
4110 4090 break;
4111 4091 }
4112 4092
4113 4093 if (amp != NULL) {
4114 4094 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4115 4095 anon_array_enter(amp, aindx, &an_cookie);
4116 4096 }
4117 4097 if (amp != NULL &&
4118 4098 anon_get_ptr(amp->ahp, aindx) != NULL) {
4119 4099 ulong_t taindx = P2ALIGN(aindx, maxpages);
4120 4100
4121 4101 SEGVN_VMSTAT_FLTVNPAGES(25);
4122 4102 ASSERT(anon_pages(amp->ahp, taindx,
4123 4103 maxpages) == maxpages);
4124 4104 for (i = 0; i < pages; i++) {
4125 4105 page_unlock(ppa[i]);
4126 4106 }
4127 4107 anon_array_exit(&an_cookie);
4128 4108 ANON_LOCK_EXIT(&->a_rwlock);
4129 4109 if (pplist != NULL) {
4130 4110 page_free_replacement_page(pplist);
4131 4111 page_create_putback(pages);
4132 4112 }
4133 4113 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4134 4114 if (szc < seg->s_szc) {
4135 4115 SEGVN_VMSTAT_FLTVNPAGES(26);
4136 4116 /*
4137 4117 * For private segments SOFTLOCK
4138 4118 * either always breaks cow (any rw
4139 4119 * type except S_READ_NOCOW) or
4140 4120 * address space is locked as writer
4141 4121 * (S_READ_NOCOW case) and anon slots
4142 4122 * can't show up on second check.
4143 4123 * Therefore if we are here for
4144 4124 * SOFTLOCK case it must be a cow
4145 4125 * break but cow break never reduces
4146 4126 * szc. text replication (tron) in
4147 4127 * this case works as cow break.
4148 4128 * Thus the assert below.
4149 4129 */
4150 4130 ASSERT(!brkcow && !tron &&
4151 4131 type != F_SOFTLOCK);
4152 4132 pszc = seg->s_szc;
4153 4133 ierr = -2;
4154 4134 break;
4155 4135 }
4156 4136 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4157 4137 goto again;
4158 4138 }
4159 4139 #ifdef DEBUG
4160 4140 if (amp != NULL) {
4161 4141 ulong_t taindx = P2ALIGN(aindx, maxpages);
4162 4142 ASSERT(!anon_pages(amp->ahp, taindx, maxpages));
4163 4143 }
4164 4144 #endif /* DEBUG */
4165 4145
4166 4146 if (brkcow || tron) {
4167 4147 ASSERT(amp != NULL);
4168 4148 ASSERT(pplist == NULL);
4169 4149 ASSERT(szc == seg->s_szc);
4170 4150 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4171 4151 ASSERT(IS_P2ALIGNED(aindx, maxpages));
4172 4152 SEGVN_VMSTAT_FLTVNPAGES(27);
4173 4153 ierr = anon_map_privatepages(amp, aindx, szc,
4174 4154 seg, a, prot, ppa, vpage, segvn_anypgsz,
4175 4155 tron ? PG_LOCAL : 0, svd->cred);
4176 4156 if (ierr != 0) {
4177 4157 SEGVN_VMSTAT_FLTVNPAGES(28);
4178 4158 anon_array_exit(&an_cookie);
4179 4159 ANON_LOCK_EXIT(&->a_rwlock);
4180 4160 SEGVN_RESTORE_SOFTLOCK_VP(type, pages);
4181 4161 err = FC_MAKE_ERR(ierr);
4182 4162 goto out;
4183 4163 }
4184 4164
4185 4165 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4186 4166 /*
4187 4167 * p_szc can't be changed for locked
4188 4168 * swapfs pages.
4189 4169 */
4190 4170 ASSERT(svd->rcookie ==
4191 4171 HAT_INVALID_REGION_COOKIE);
4192 4172 hat_memload_array(hat, a, pgsz, ppa, prot,
4193 4173 hat_flag);
4194 4174
4195 4175 if (!(hat_flag & HAT_LOAD_LOCK)) {
4196 4176 SEGVN_VMSTAT_FLTVNPAGES(29);
4197 4177 for (i = 0; i < pages; i++) {
4198 4178 page_unlock(ppa[i]);
4199 4179 }
4200 4180 }
4201 4181 anon_array_exit(&an_cookie);
4202 4182 ANON_LOCK_EXIT(&->a_rwlock);
4203 4183 goto next;
4204 4184 }
4205 4185
4206 4186 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE ||
4207 4187 (!svd->pageprot && svd->prot == (prot & vpprot)));
4208 4188
4209 4189 pfn = page_pptonum(ppa[0]);
4210 4190 /*
4211 4191 * hat_page_demote() needs an SE_EXCL lock on one of
4212 4192 * constituent page_t's and it decreases root's p_szc
4213 4193 * last. This means if root's p_szc is equal szc and
4214 4194 * all its constituent pages are locked
4215 4195 * hat_page_demote() that could have changed p_szc to
4216 4196 * szc is already done and no new have page_demote()
4217 4197 * can start for this large page.
4218 4198 */
4219 4199
4220 4200 /*
4221 4201 * we need to make sure same mapping size is used for
4222 4202 * the same address range if there's a possibility the
4223 4203 * adddress is already mapped because hat layer panics
4224 4204 * when translation is loaded for the range already
4225 4205 * mapped with a different page size. We achieve it
4226 4206 * by always using largest page size possible subject
4227 4207 * to the constraints of page size, segment page size
4228 4208 * and page alignment. Since mappings are invalidated
4229 4209 * when those constraints change and make it
4230 4210 * impossible to use previously used mapping size no
4231 4211 * mapping size conflicts should happen.
4232 4212 */
4233 4213
4234 4214 chkszc:
4235 4215 if ((pszc = ppa[0]->p_szc) == szc &&
4236 4216 IS_P2ALIGNED(pfn, pages)) {
4237 4217
4238 4218 SEGVN_VMSTAT_FLTVNPAGES(30);
4239 4219 #ifdef DEBUG
4240 4220 for (i = 0; i < pages; i++) {
4241 4221 ASSERT(PAGE_LOCKED(ppa[i]));
4242 4222 ASSERT(!PP_ISFREE(ppa[i]));
4243 4223 ASSERT(page_pptonum(ppa[i]) ==
4244 4224 pfn + i);
4245 4225 ASSERT(ppa[i]->p_szc == szc);
4246 4226 ASSERT(ppa[i]->p_vnode == vp);
4247 4227 ASSERT(ppa[i]->p_offset ==
4248 4228 off + (i << PAGESHIFT));
4249 4229 }
4250 4230 #endif /* DEBUG */
4251 4231 /*
4252 4232 * All pages are of szc we need and they are
4253 4233 * all locked so they can't change szc. load
4254 4234 * translations.
4255 4235 *
4256 4236 * if page got promoted since last check
4257 4237 * we don't need pplist.
↓ open down ↓ |
335 lines elided |
↑ open up ↑ |
4258 4238 */
4259 4239 if (pplist != NULL) {
4260 4240 page_free_replacement_page(pplist);
4261 4241 page_create_putback(pages);
4262 4242 }
4263 4243 if (PP_ISMIGRATE(ppa[0])) {
4264 4244 page_migrate(seg, a, ppa, pages);
4265 4245 }
4266 4246 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4267 4247 prot, vpprot);
4268 - if (!xhat) {
4269 - hat_memload_array_region(hat, a, pgsz,
4270 - ppa, prot & vpprot, hat_flag,
4271 - svd->rcookie);
4272 - } else {
4273 - /*
4274 - * avoid large xhat mappings to FS
4275 - * pages so that hat_page_demote()
4276 - * doesn't need to check for xhat
4277 - * large mappings.
4278 - * Don't use regions with xhats.
4279 - */
4280 - for (i = 0; i < pages; i++) {
4281 - hat_memload(hat,
4282 - a + (i << PAGESHIFT),
4283 - ppa[i], prot & vpprot,
4284 - hat_flag);
4285 - }
4286 - }
4248 + hat_memload_array_region(hat, a, pgsz,
4249 + ppa, prot & vpprot, hat_flag,
4250 + svd->rcookie);
4287 4251
4288 4252 if (!(hat_flag & HAT_LOAD_LOCK)) {
4289 4253 for (i = 0; i < pages; i++) {
4290 4254 page_unlock(ppa[i]);
4291 4255 }
4292 4256 }
4293 4257 if (amp != NULL) {
4294 4258 anon_array_exit(&an_cookie);
4295 4259 ANON_LOCK_EXIT(&->a_rwlock);
4296 4260 }
4297 4261 goto next;
4298 4262 }
4299 4263
4300 4264 /*
4301 4265 * See if upsize is possible.
4302 4266 */
4303 4267 if (pszc > szc && szc < seg->s_szc &&
4304 4268 (segvn_anypgsz_vnode || pszc >= seg->s_szc)) {
4305 4269 pgcnt_t aphase;
4306 4270 uint_t pszc1 = MIN(pszc, seg->s_szc);
4307 4271 ppgsz = page_get_pagesize(pszc1);
4308 4272 ppages = btop(ppgsz);
4309 4273 aphase = btop(P2PHASE((uintptr_t)a, ppgsz));
4310 4274
4311 4275 ASSERT(type != F_SOFTLOCK);
4312 4276
4313 4277 SEGVN_VMSTAT_FLTVNPAGES(31);
4314 4278 if (aphase != P2PHASE(pfn, ppages)) {
4315 4279 segvn_faultvnmpss_align_err4++;
4316 4280 } else {
4317 4281 SEGVN_VMSTAT_FLTVNPAGES(32);
4318 4282 if (pplist != NULL) {
4319 4283 page_t *pl = pplist;
4320 4284 page_free_replacement_page(pl);
4321 4285 page_create_putback(pages);
4322 4286 }
4323 4287 for (i = 0; i < pages; i++) {
4324 4288 page_unlock(ppa[i]);
4325 4289 }
4326 4290 if (amp != NULL) {
4327 4291 anon_array_exit(&an_cookie);
4328 4292 ANON_LOCK_EXIT(&->a_rwlock);
4329 4293 }
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
4330 4294 pszc = pszc1;
4331 4295 ierr = -2;
4332 4296 break;
4333 4297 }
4334 4298 }
4335 4299
4336 4300 /*
4337 4301 * check if we should use smallest mapping size.
4338 4302 */
4339 4303 upgrdfail = 0;
4340 - if (szc == 0 || xhat ||
4304 + if (szc == 0 ||
4341 4305 (pszc >= szc &&
4342 4306 !IS_P2ALIGNED(pfn, pages)) ||
4343 4307 (pszc < szc &&
4344 4308 !segvn_full_szcpages(ppa, szc, &upgrdfail,
4345 4309 &pszc))) {
4346 4310
4347 4311 if (upgrdfail && type != F_SOFTLOCK) {
4348 4312 /*
4349 4313 * segvn_full_szcpages failed to lock
4350 4314 * all pages EXCL. Size down.
4351 4315 */
4352 4316 ASSERT(pszc < szc);
4353 4317
4354 4318 SEGVN_VMSTAT_FLTVNPAGES(33);
4355 4319
4356 4320 if (pplist != NULL) {
4357 4321 page_t *pl = pplist;
4358 4322 page_free_replacement_page(pl);
4359 4323 page_create_putback(pages);
4360 4324 }
4361 4325
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
4362 4326 for (i = 0; i < pages; i++) {
4363 4327 page_unlock(ppa[i]);
4364 4328 }
4365 4329 if (amp != NULL) {
4366 4330 anon_array_exit(&an_cookie);
4367 4331 ANON_LOCK_EXIT(&->a_rwlock);
4368 4332 }
4369 4333 ierr = -1;
4370 4334 break;
4371 4335 }
4372 - if (szc != 0 && !xhat && !upgrdfail) {
4336 + if (szc != 0 && !upgrdfail) {
4373 4337 segvn_faultvnmpss_align_err5++;
4374 4338 }
4375 4339 SEGVN_VMSTAT_FLTVNPAGES(34);
4376 4340 if (pplist != NULL) {
4377 4341 page_free_replacement_page(pplist);
4378 4342 page_create_putback(pages);
4379 4343 }
4380 4344 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4381 4345 prot, vpprot);
4382 4346 if (upgrdfail && segvn_anypgsz_vnode) {
4383 4347 /* SOFTLOCK case */
4384 4348 hat_memload_array_region(hat, a, pgsz,
4385 4349 ppa, prot & vpprot, hat_flag,
4386 4350 svd->rcookie);
4387 4351 } else {
4388 4352 for (i = 0; i < pages; i++) {
4389 4353 hat_memload_region(hat,
4390 4354 a + (i << PAGESHIFT),
4391 4355 ppa[i], prot & vpprot,
4392 4356 hat_flag, svd->rcookie);
4393 4357 }
4394 4358 }
4395 4359 if (!(hat_flag & HAT_LOAD_LOCK)) {
4396 4360 for (i = 0; i < pages; i++) {
4397 4361 page_unlock(ppa[i]);
4398 4362 }
4399 4363 }
4400 4364 if (amp != NULL) {
4401 4365 anon_array_exit(&an_cookie);
4402 4366 ANON_LOCK_EXIT(&->a_rwlock);
4403 4367 }
4404 4368 goto next;
4405 4369 }
4406 4370
4407 4371 if (pszc == szc) {
4408 4372 /*
4409 4373 * segvn_full_szcpages() upgraded pages szc.
4410 4374 */
4411 4375 ASSERT(pszc == ppa[0]->p_szc);
4412 4376 ASSERT(IS_P2ALIGNED(pfn, pages));
4413 4377 goto chkszc;
4414 4378 }
4415 4379
4416 4380 if (pszc > szc) {
4417 4381 kmutex_t *szcmtx;
4418 4382 SEGVN_VMSTAT_FLTVNPAGES(35);
4419 4383 /*
4420 4384 * p_szc of ppa[0] can change since we haven't
4421 4385 * locked all constituent pages. Call
4422 4386 * page_lock_szc() to prevent szc changes.
4423 4387 * This should be a rare case that happens when
4424 4388 * multiple segments use a different page size
4425 4389 * to map the same file offsets.
4426 4390 */
4427 4391 szcmtx = page_szc_lock(ppa[0]);
4428 4392 pszc = ppa[0]->p_szc;
4429 4393 ASSERT(szcmtx != NULL || pszc == 0);
4430 4394 ASSERT(ppa[0]->p_szc <= pszc);
4431 4395 if (pszc <= szc) {
4432 4396 SEGVN_VMSTAT_FLTVNPAGES(36);
4433 4397 if (szcmtx != NULL) {
4434 4398 mutex_exit(szcmtx);
4435 4399 }
4436 4400 goto chkszc;
4437 4401 }
4438 4402 if (pplist != NULL) {
4439 4403 /*
4440 4404 * page got promoted since last check.
4441 4405 * we don't need preaalocated large
4442 4406 * page.
4443 4407 */
4444 4408 SEGVN_VMSTAT_FLTVNPAGES(37);
4445 4409 page_free_replacement_page(pplist);
4446 4410 page_create_putback(pages);
4447 4411 }
4448 4412 SEGVN_UPDATE_MODBITS(ppa, pages, rw,
4449 4413 prot, vpprot);
4450 4414 hat_memload_array_region(hat, a, pgsz, ppa,
4451 4415 prot & vpprot, hat_flag, svd->rcookie);
4452 4416 mutex_exit(szcmtx);
4453 4417 if (!(hat_flag & HAT_LOAD_LOCK)) {
4454 4418 for (i = 0; i < pages; i++) {
4455 4419 page_unlock(ppa[i]);
4456 4420 }
4457 4421 }
4458 4422 if (amp != NULL) {
4459 4423 anon_array_exit(&an_cookie);
4460 4424 ANON_LOCK_EXIT(&->a_rwlock);
4461 4425 }
4462 4426 goto next;
4463 4427 }
4464 4428
4465 4429 /*
4466 4430 * if page got demoted since last check
4467 4431 * we could have not allocated larger page.
4468 4432 * allocate now.
4469 4433 */
4470 4434 if (pplist == NULL &&
4471 4435 page_alloc_pages(vp, seg, a, &pplist, NULL,
4472 4436 szc, 0, 0) && type != F_SOFTLOCK) {
4473 4437 SEGVN_VMSTAT_FLTVNPAGES(38);
4474 4438 for (i = 0; i < pages; i++) {
4475 4439 page_unlock(ppa[i]);
4476 4440 }
4477 4441 if (amp != NULL) {
4478 4442 anon_array_exit(&an_cookie);
4479 4443 ANON_LOCK_EXIT(&->a_rwlock);
4480 4444 }
4481 4445 ierr = -1;
4482 4446 alloc_failed |= (1 << szc);
4483 4447 break;
4484 4448 }
4485 4449
4486 4450 SEGVN_VMSTAT_FLTVNPAGES(39);
4487 4451
4488 4452 if (pplist != NULL) {
4489 4453 segvn_relocate_pages(ppa, pplist);
4490 4454 #ifdef DEBUG
4491 4455 } else {
4492 4456 ASSERT(type == F_SOFTLOCK);
4493 4457 SEGVN_VMSTAT_FLTVNPAGES(40);
4494 4458 #endif /* DEBUG */
4495 4459 }
4496 4460
4497 4461 SEGVN_UPDATE_MODBITS(ppa, pages, rw, prot, vpprot);
4498 4462
4499 4463 if (pplist == NULL && segvn_anypgsz_vnode == 0) {
4500 4464 ASSERT(type == F_SOFTLOCK);
4501 4465 for (i = 0; i < pages; i++) {
4502 4466 ASSERT(ppa[i]->p_szc < szc);
4503 4467 hat_memload_region(hat,
4504 4468 a + (i << PAGESHIFT),
4505 4469 ppa[i], prot & vpprot, hat_flag,
4506 4470 svd->rcookie);
4507 4471 }
4508 4472 } else {
4509 4473 ASSERT(pplist != NULL || type == F_SOFTLOCK);
4510 4474 hat_memload_array_region(hat, a, pgsz, ppa,
4511 4475 prot & vpprot, hat_flag, svd->rcookie);
4512 4476 }
4513 4477 if (!(hat_flag & HAT_LOAD_LOCK)) {
4514 4478 for (i = 0; i < pages; i++) {
4515 4479 ASSERT(PAGE_SHARED(ppa[i]));
4516 4480 page_unlock(ppa[i]);
4517 4481 }
4518 4482 }
4519 4483 if (amp != NULL) {
4520 4484 anon_array_exit(&an_cookie);
4521 4485 ANON_LOCK_EXIT(&->a_rwlock);
4522 4486 }
4523 4487
4524 4488 next:
4525 4489 if (vpage != NULL) {
4526 4490 vpage += pages;
4527 4491 }
4528 4492 adjszc_chk = 1;
4529 4493 }
4530 4494 if (a == lpgeaddr)
4531 4495 break;
4532 4496 ASSERT(a < lpgeaddr);
4533 4497
4534 4498 ASSERT(!brkcow && !tron && type != F_SOFTLOCK);
4535 4499
4536 4500 /*
4537 4501 * ierr == -1 means we failed to map with a large page.
4538 4502 * (either due to allocation/relocation failures or
4539 4503 * misalignment with other mappings to this file.
4540 4504 *
4541 4505 * ierr == -2 means some other thread allocated a large page
4542 4506 * after we gave up tp map with a large page. retry with
4543 4507 * larger mapping.
4544 4508 */
4545 4509 ASSERT(ierr == -1 || ierr == -2);
4546 4510 ASSERT(ierr == -2 || szc != 0);
4547 4511 ASSERT(ierr == -1 || szc < seg->s_szc);
4548 4512 if (ierr == -2) {
4549 4513 SEGVN_VMSTAT_FLTVNPAGES(41);
4550 4514 ASSERT(pszc > szc && pszc <= seg->s_szc);
4551 4515 szc = pszc;
4552 4516 } else if (segvn_anypgsz_vnode) {
4553 4517 SEGVN_VMSTAT_FLTVNPAGES(42);
4554 4518 szc--;
4555 4519 } else {
4556 4520 SEGVN_VMSTAT_FLTVNPAGES(43);
4557 4521 ASSERT(pszc < szc);
4558 4522 /*
4559 4523 * other process created pszc large page.
4560 4524 * but we still have to drop to 0 szc.
4561 4525 */
4562 4526 szc = 0;
4563 4527 }
4564 4528
4565 4529 pgsz = page_get_pagesize(szc);
4566 4530 pages = btop(pgsz);
4567 4531 if (ierr == -2) {
4568 4532 /*
4569 4533 * Size up case. Note lpgaddr may only be needed for
4570 4534 * softlock case so we don't adjust it here.
4571 4535 */
4572 4536 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4573 4537 ASSERT(a >= lpgaddr);
4574 4538 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4575 4539 off = svd->offset + (uintptr_t)(a - seg->s_base);
4576 4540 aindx = svd->anon_index + seg_page(seg, a);
4577 4541 vpage = (svd->vpage != NULL) ?
4578 4542 &svd->vpage[seg_page(seg, a)] : NULL;
4579 4543 } else {
4580 4544 /*
4581 4545 * Size down case. Note lpgaddr may only be needed for
4582 4546 * softlock case so we don't adjust it here.
4583 4547 */
4584 4548 ASSERT(IS_P2ALIGNED(a, pgsz));
4585 4549 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4586 4550 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4587 4551 ASSERT(a < lpgeaddr);
4588 4552 if (a < addr) {
4589 4553 SEGVN_VMSTAT_FLTVNPAGES(44);
4590 4554 /*
4591 4555 * The beginning of the large page region can
4592 4556 * be pulled to the right to make a smaller
4593 4557 * region. We haven't yet faulted a single
4594 4558 * page.
4595 4559 */
4596 4560 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4597 4561 ASSERT(a >= lpgaddr);
4598 4562 off = svd->offset +
4599 4563 (uintptr_t)(a - seg->s_base);
4600 4564 aindx = svd->anon_index + seg_page(seg, a);
4601 4565 vpage = (svd->vpage != NULL) ?
4602 4566 &svd->vpage[seg_page(seg, a)] : NULL;
4603 4567 }
4604 4568 }
4605 4569 }
4606 4570 out:
4607 4571 kmem_free(ppa, ppasize);
4608 4572 if (!err && !vop_size_err) {
4609 4573 SEGVN_VMSTAT_FLTVNPAGES(45);
4610 4574 return (0);
4611 4575 }
4612 4576 if (type == F_SOFTLOCK && a > lpgaddr) {
4613 4577 SEGVN_VMSTAT_FLTVNPAGES(46);
4614 4578 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4615 4579 }
4616 4580 if (!vop_size_err) {
4617 4581 SEGVN_VMSTAT_FLTVNPAGES(47);
4618 4582 return (err);
4619 4583 }
4620 4584 ASSERT(brkcow || tron || type == F_SOFTLOCK);
4621 4585 /*
4622 4586 * Large page end is mapped beyond the end of file and it's a cow
4623 4587 * fault (can be a text replication induced cow) or softlock so we can't
4624 4588 * reduce the map area. For now just demote the segment. This should
4625 4589 * really only happen if the end of the file changed after the mapping
4626 4590 * was established since when large page segments are created we make
4627 4591 * sure they don't extend beyond the end of the file.
4628 4592 */
4629 4593 SEGVN_VMSTAT_FLTVNPAGES(48);
4630 4594
4631 4595 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4632 4596 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4633 4597 err = 0;
4634 4598 if (seg->s_szc != 0) {
4635 4599 segvn_fltvnpages_clrszc_cnt++;
4636 4600 ASSERT(svd->softlockcnt == 0);
4637 4601 err = segvn_clrszc(seg);
4638 4602 if (err != 0) {
4639 4603 segvn_fltvnpages_clrszc_err++;
4640 4604 }
4641 4605 }
4642 4606 ASSERT(err || seg->s_szc == 0);
4643 4607 SEGVN_LOCK_DOWNGRADE(seg->s_as, &svd->lock);
4644 4608 /* segvn_fault will do its job as if szc had been zero to begin with */
4645 4609 return (err == 0 ? IE_RETRY : FC_MAKE_ERR(err));
4646 4610 }
4647 4611
4648 4612 /*
4649 4613 * This routine will attempt to fault in one large page.
4650 4614 * it will use smaller pages if that fails.
4651 4615 * It should only be called for pure anonymous segments.
4652 4616 */
4653 4617 static faultcode_t
4654 4618 segvn_fault_anonpages(struct hat *hat, struct seg *seg, caddr_t lpgaddr,
4655 4619 caddr_t lpgeaddr, enum fault_type type, enum seg_rw rw, caddr_t addr,
4656 4620 caddr_t eaddr, int brkcow)
4657 4621 {
4658 4622 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4659 4623 struct anon_map *amp = svd->amp;
4660 4624 uchar_t segtype = svd->type;
4661 4625 uint_t szc = seg->s_szc;
4662 4626 size_t pgsz = page_get_pagesize(szc);
4663 4627 size_t maxpgsz = pgsz;
4664 4628 pgcnt_t pages = btop(pgsz);
4665 4629 uint_t ppaszc = szc;
4666 4630 caddr_t a = lpgaddr;
4667 4631 ulong_t aindx = svd->anon_index + seg_page(seg, a);
4668 4632 struct vpage *vpage = (svd->vpage != NULL) ?
4669 4633 &svd->vpage[seg_page(seg, a)] : NULL;
4670 4634 page_t **ppa;
4671 4635 uint_t ppa_szc;
4672 4636 faultcode_t err;
4673 4637 int ierr;
4674 4638 uint_t protchk, prot, vpprot;
4675 4639 ulong_t i;
4676 4640 int hat_flag = (type == F_SOFTLOCK) ? HAT_LOAD_LOCK : HAT_LOAD;
4677 4641 anon_sync_obj_t cookie;
4678 4642 int adjszc_chk;
4679 4643 int pgflags = (svd->tr_state == SEGVN_TR_ON) ? PG_LOCAL : 0;
4680 4644
4681 4645 ASSERT(szc != 0);
4682 4646 ASSERT(amp != NULL);
4683 4647 ASSERT(enable_mbit_wa == 0); /* no mbit simulations with large pages */
4684 4648 ASSERT(!(svd->flags & MAP_NORESERVE));
4685 4649 ASSERT(type != F_SOFTUNLOCK);
4686 4650 ASSERT(IS_P2ALIGNED(a, maxpgsz));
4687 4651 ASSERT(!brkcow || svd->tr_state == SEGVN_TR_OFF);
4688 4652 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4689 4653
4690 4654 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
4691 4655
4692 4656 VM_STAT_COND_ADD(type == F_SOFTLOCK, segvnvmstats.fltanpages[0]);
4693 4657 VM_STAT_COND_ADD(type != F_SOFTLOCK, segvnvmstats.fltanpages[1]);
4694 4658
4695 4659 if (svd->flags & MAP_TEXT) {
4696 4660 hat_flag |= HAT_LOAD_TEXT;
4697 4661 }
4698 4662
4699 4663 if (svd->pageprot) {
4700 4664 switch (rw) {
4701 4665 case S_READ:
4702 4666 protchk = PROT_READ;
4703 4667 break;
4704 4668 case S_WRITE:
4705 4669 protchk = PROT_WRITE;
4706 4670 break;
4707 4671 case S_EXEC:
4708 4672 protchk = PROT_EXEC;
4709 4673 break;
4710 4674 case S_OTHER:
4711 4675 default:
4712 4676 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
4713 4677 break;
4714 4678 }
4715 4679 VM_STAT_ADD(segvnvmstats.fltanpages[2]);
4716 4680 } else {
4717 4681 prot = svd->prot;
4718 4682 /* caller has already done segment level protection check. */
4719 4683 }
4720 4684
4721 4685 ppa = kmem_cache_alloc(segvn_szc_cache[ppaszc], KM_SLEEP);
4722 4686 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
4723 4687 for (;;) {
4724 4688 adjszc_chk = 0;
4725 4689 for (; a < lpgeaddr; a += pgsz, aindx += pages) {
4726 4690 if (svd->pageprot != 0 && IS_P2ALIGNED(a, maxpgsz)) {
4727 4691 VM_STAT_ADD(segvnvmstats.fltanpages[3]);
4728 4692 ASSERT(vpage != NULL);
4729 4693 prot = VPP_PROT(vpage);
4730 4694 ASSERT(sameprot(seg, a, maxpgsz));
4731 4695 if ((prot & protchk) == 0) {
4732 4696 err = FC_PROT;
4733 4697 goto error;
4734 4698 }
4735 4699 }
4736 4700 if (adjszc_chk && IS_P2ALIGNED(a, maxpgsz) &&
4737 4701 pgsz < maxpgsz) {
4738 4702 ASSERT(a > lpgaddr);
4739 4703 szc = seg->s_szc;
4740 4704 pgsz = maxpgsz;
4741 4705 pages = btop(pgsz);
4742 4706 ASSERT(IS_P2ALIGNED(aindx, pages));
4743 4707 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr,
4744 4708 pgsz);
4745 4709 }
4746 4710 if (type == F_SOFTLOCK) {
4747 4711 atomic_add_long((ulong_t *)&svd->softlockcnt,
4748 4712 pages);
4749 4713 }
4750 4714 anon_array_enter(amp, aindx, &cookie);
4751 4715 ppa_szc = (uint_t)-1;
4752 4716 ierr = anon_map_getpages(amp, aindx, szc, seg, a,
4753 4717 prot, &vpprot, ppa, &ppa_szc, vpage, rw, brkcow,
4754 4718 segvn_anypgsz, pgflags, svd->cred);
4755 4719 if (ierr != 0) {
4756 4720 anon_array_exit(&cookie);
4757 4721 VM_STAT_ADD(segvnvmstats.fltanpages[4]);
4758 4722 if (type == F_SOFTLOCK) {
4759 4723 atomic_add_long(
4760 4724 (ulong_t *)&svd->softlockcnt,
4761 4725 -pages);
4762 4726 }
4763 4727 if (ierr > 0) {
4764 4728 VM_STAT_ADD(segvnvmstats.fltanpages[6]);
4765 4729 err = FC_MAKE_ERR(ierr);
4766 4730 goto error;
4767 4731 }
4768 4732 break;
4769 4733 }
4770 4734
4771 4735 ASSERT(!IS_VMODSORT(ppa[0]->p_vnode));
4772 4736
4773 4737 ASSERT(segtype == MAP_SHARED ||
4774 4738 ppa[0]->p_szc <= szc);
4775 4739 ASSERT(segtype == MAP_PRIVATE ||
4776 4740 ppa[0]->p_szc >= szc);
4777 4741
4778 4742 /*
4779 4743 * Handle pages that have been marked for migration
4780 4744 */
4781 4745 if (lgrp_optimizations())
4782 4746 page_migrate(seg, a, ppa, pages);
4783 4747
4784 4748 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
4785 4749
4786 4750 if (segtype == MAP_SHARED) {
4787 4751 vpprot |= PROT_WRITE;
4788 4752 }
4789 4753
4790 4754 hat_memload_array(hat, a, pgsz, ppa,
4791 4755 prot & vpprot, hat_flag);
4792 4756
4793 4757 if (hat_flag & HAT_LOAD_LOCK) {
4794 4758 VM_STAT_ADD(segvnvmstats.fltanpages[7]);
4795 4759 } else {
4796 4760 VM_STAT_ADD(segvnvmstats.fltanpages[8]);
4797 4761 for (i = 0; i < pages; i++)
4798 4762 page_unlock(ppa[i]);
4799 4763 }
4800 4764 if (vpage != NULL)
4801 4765 vpage += pages;
4802 4766
4803 4767 anon_array_exit(&cookie);
4804 4768 adjszc_chk = 1;
4805 4769 }
4806 4770 if (a == lpgeaddr)
4807 4771 break;
4808 4772 ASSERT(a < lpgeaddr);
4809 4773 /*
4810 4774 * ierr == -1 means we failed to allocate a large page.
4811 4775 * so do a size down operation.
4812 4776 *
4813 4777 * ierr == -2 means some other process that privately shares
4814 4778 * pages with this process has allocated a larger page and we
4815 4779 * need to retry with larger pages. So do a size up
4816 4780 * operation. This relies on the fact that large pages are
4817 4781 * never partially shared i.e. if we share any constituent
4818 4782 * page of a large page with another process we must share the
4819 4783 * entire large page. Note this cannot happen for SOFTLOCK
4820 4784 * case, unless current address (a) is at the beginning of the
4821 4785 * next page size boundary because the other process couldn't
4822 4786 * have relocated locked pages.
4823 4787 */
4824 4788 ASSERT(ierr == -1 || ierr == -2);
4825 4789
4826 4790 if (segvn_anypgsz) {
4827 4791 ASSERT(ierr == -2 || szc != 0);
4828 4792 ASSERT(ierr == -1 || szc < seg->s_szc);
4829 4793 szc = (ierr == -1) ? szc - 1 : szc + 1;
4830 4794 } else {
4831 4795 /*
4832 4796 * For non COW faults and segvn_anypgsz == 0
4833 4797 * we need to be careful not to loop forever
4834 4798 * if existing page is found with szc other
4835 4799 * than 0 or seg->s_szc. This could be due
4836 4800 * to page relocations on behalf of DR or
4837 4801 * more likely large page creation. For this
4838 4802 * case simply re-size to existing page's szc
4839 4803 * if returned by anon_map_getpages().
4840 4804 */
4841 4805 if (ppa_szc == (uint_t)-1) {
4842 4806 szc = (ierr == -1) ? 0 : seg->s_szc;
4843 4807 } else {
4844 4808 ASSERT(ppa_szc <= seg->s_szc);
4845 4809 ASSERT(ierr == -2 || ppa_szc < szc);
4846 4810 ASSERT(ierr == -1 || ppa_szc > szc);
4847 4811 szc = ppa_szc;
4848 4812 }
4849 4813 }
4850 4814
4851 4815 pgsz = page_get_pagesize(szc);
4852 4816 pages = btop(pgsz);
4853 4817 ASSERT(type != F_SOFTLOCK || ierr == -1 ||
4854 4818 (IS_P2ALIGNED(a, pgsz) && IS_P2ALIGNED(lpgeaddr, pgsz)));
4855 4819 if (type == F_SOFTLOCK) {
4856 4820 /*
4857 4821 * For softlocks we cannot reduce the fault area
4858 4822 * (calculated based on the largest page size for this
4859 4823 * segment) for size down and a is already next
4860 4824 * page size aligned as assertted above for size
4861 4825 * ups. Therefore just continue in case of softlock.
4862 4826 */
4863 4827 VM_STAT_ADD(segvnvmstats.fltanpages[9]);
4864 4828 continue; /* keep lint happy */
4865 4829 } else if (ierr == -2) {
4866 4830
4867 4831 /*
4868 4832 * Size up case. Note lpgaddr may only be needed for
4869 4833 * softlock case so we don't adjust it here.
4870 4834 */
4871 4835 VM_STAT_ADD(segvnvmstats.fltanpages[10]);
4872 4836 a = (caddr_t)P2ALIGN((uintptr_t)a, pgsz);
4873 4837 ASSERT(a >= lpgaddr);
4874 4838 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4875 4839 aindx = svd->anon_index + seg_page(seg, a);
4876 4840 vpage = (svd->vpage != NULL) ?
4877 4841 &svd->vpage[seg_page(seg, a)] : NULL;
4878 4842 } else {
4879 4843 /*
4880 4844 * Size down case. Note lpgaddr may only be needed for
4881 4845 * softlock case so we don't adjust it here.
4882 4846 */
4883 4847 VM_STAT_ADD(segvnvmstats.fltanpages[11]);
4884 4848 ASSERT(IS_P2ALIGNED(a, pgsz));
4885 4849 ASSERT(IS_P2ALIGNED(lpgeaddr, pgsz));
4886 4850 lpgeaddr = (caddr_t)P2ROUNDUP((uintptr_t)eaddr, pgsz);
4887 4851 ASSERT(a < lpgeaddr);
4888 4852 if (a < addr) {
4889 4853 /*
4890 4854 * The beginning of the large page region can
4891 4855 * be pulled to the right to make a smaller
4892 4856 * region. We haven't yet faulted a single
4893 4857 * page.
4894 4858 */
4895 4859 VM_STAT_ADD(segvnvmstats.fltanpages[12]);
4896 4860 a = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
4897 4861 ASSERT(a >= lpgaddr);
4898 4862 aindx = svd->anon_index + seg_page(seg, a);
4899 4863 vpage = (svd->vpage != NULL) ?
4900 4864 &svd->vpage[seg_page(seg, a)] : NULL;
4901 4865 }
4902 4866 }
4903 4867 }
4904 4868 VM_STAT_ADD(segvnvmstats.fltanpages[13]);
4905 4869 ANON_LOCK_EXIT(&->a_rwlock);
4906 4870 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4907 4871 return (0);
4908 4872 error:
4909 4873 VM_STAT_ADD(segvnvmstats.fltanpages[14]);
4910 4874 ANON_LOCK_EXIT(&->a_rwlock);
4911 4875 kmem_cache_free(segvn_szc_cache[ppaszc], ppa);
4912 4876 if (type == F_SOFTLOCK && a > lpgaddr) {
4913 4877 VM_STAT_ADD(segvnvmstats.fltanpages[15]);
4914 4878 segvn_softunlock(seg, lpgaddr, a - lpgaddr, S_OTHER);
4915 4879 }
4916 4880 return (err);
4917 4881 }
4918 4882
4919 4883 int fltadvice = 1; /* set to free behind pages for sequential access */
4920 4884
4921 4885 /*
4922 4886 * This routine is called via a machine specific fault handling routine.
4923 4887 * It is also called by software routines wishing to lock or unlock
4924 4888 * a range of addresses.
4925 4889 *
4926 4890 * Here is the basic algorithm:
4927 4891 * If unlocking
4928 4892 * Call segvn_softunlock
4929 4893 * Return
4930 4894 * endif
4931 4895 * Checking and set up work
4932 4896 * If we will need some non-anonymous pages
4933 4897 * Call VOP_GETPAGE over the range of non-anonymous pages
4934 4898 * endif
4935 4899 * Loop over all addresses requested
4936 4900 * Call segvn_faultpage passing in page list
4937 4901 * to load up translations and handle anonymous pages
4938 4902 * endloop
4939 4903 * Load up translation to any additional pages in page list not
4940 4904 * already handled that fit into this segment
4941 4905 */
4942 4906 static faultcode_t
↓ open down ↓ |
560 lines elided |
↑ open up ↑ |
4943 4907 segvn_fault(struct hat *hat, struct seg *seg, caddr_t addr, size_t len,
4944 4908 enum fault_type type, enum seg_rw rw)
4945 4909 {
4946 4910 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
4947 4911 page_t **plp, **ppp, *pp;
4948 4912 u_offset_t off;
4949 4913 caddr_t a;
4950 4914 struct vpage *vpage;
4951 4915 uint_t vpprot, prot;
4952 4916 int err;
4953 - page_t *pl[PVN_GETPAGE_NUM + 1];
4917 + page_t *pl[FAULT_TMP_PAGES_NUM + 1];
4954 4918 size_t plsz, pl_alloc_sz;
4955 4919 size_t page;
4956 4920 ulong_t anon_index;
4957 4921 struct anon_map *amp;
4958 4922 int dogetpage = 0;
4959 4923 caddr_t lpgaddr, lpgeaddr;
4960 4924 size_t pgsz;
4961 4925 anon_sync_obj_t cookie;
4962 4926 int brkcow = BREAK_COW_SHARE(rw, type, svd->type);
4963 4927
4964 4928 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
4965 4929 ASSERT(svd->amp == NULL || svd->rcookie == HAT_INVALID_REGION_COOKIE);
4966 4930
4967 4931 /*
4968 4932 * First handle the easy stuff
4969 4933 */
4970 4934 if (type == F_SOFTUNLOCK) {
4971 4935 if (rw == S_READ_NOCOW) {
4972 4936 rw = S_READ;
4973 4937 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
4974 4938 }
4975 4939 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
4976 4940 pgsz = (seg->s_szc == 0) ? PAGESIZE :
4977 4941 page_get_pagesize(seg->s_szc);
4978 4942 VM_STAT_COND_ADD(pgsz > PAGESIZE, segvnvmstats.fltanpages[16]);
4979 4943 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
4980 4944 segvn_softunlock(seg, lpgaddr, lpgeaddr - lpgaddr, rw);
4981 4945 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
4982 4946 return (0);
4983 4947 }
4984 4948
4985 4949 ASSERT(svd->tr_state == SEGVN_TR_OFF ||
4986 4950 !HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
4987 4951 if (brkcow == 0) {
4988 4952 if (svd->tr_state == SEGVN_TR_INIT) {
4989 4953 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
4990 4954 if (svd->tr_state == SEGVN_TR_INIT) {
4991 4955 ASSERT(svd->vp != NULL && svd->amp == NULL);
4992 4956 ASSERT(svd->flags & MAP_TEXT);
4993 4957 ASSERT(svd->type == MAP_PRIVATE);
4994 4958 segvn_textrepl(seg);
4995 4959 ASSERT(svd->tr_state != SEGVN_TR_INIT);
4996 4960 ASSERT(svd->tr_state != SEGVN_TR_ON ||
4997 4961 svd->amp != NULL);
4998 4962 }
4999 4963 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5000 4964 }
5001 4965 } else if (svd->tr_state != SEGVN_TR_OFF) {
5002 4966 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5003 4967
5004 4968 if (rw == S_WRITE && svd->tr_state != SEGVN_TR_OFF) {
5005 4969 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
5006 4970 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5007 4971 return (FC_PROT);
5008 4972 }
5009 4973
5010 4974 if (svd->tr_state == SEGVN_TR_ON) {
5011 4975 ASSERT(svd->vp != NULL && svd->amp != NULL);
5012 4976 segvn_textunrepl(seg, 0);
5013 4977 ASSERT(svd->amp == NULL &&
5014 4978 svd->tr_state == SEGVN_TR_OFF);
5015 4979 } else if (svd->tr_state != SEGVN_TR_OFF) {
5016 4980 svd->tr_state = SEGVN_TR_OFF;
5017 4981 }
5018 4982 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5019 4983 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5020 4984 }
5021 4985
5022 4986 top:
5023 4987 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5024 4988
5025 4989 /*
5026 4990 * If we have the same protections for the entire segment,
5027 4991 * insure that the access being attempted is legitimate.
5028 4992 */
5029 4993
5030 4994 if (svd->pageprot == 0) {
5031 4995 uint_t protchk;
5032 4996
5033 4997 switch (rw) {
5034 4998 case S_READ:
5035 4999 case S_READ_NOCOW:
5036 5000 protchk = PROT_READ;
5037 5001 break;
5038 5002 case S_WRITE:
5039 5003 protchk = PROT_WRITE;
5040 5004 break;
5041 5005 case S_EXEC:
5042 5006 protchk = PROT_EXEC;
5043 5007 break;
5044 5008 case S_OTHER:
5045 5009 default:
5046 5010 protchk = PROT_READ | PROT_WRITE | PROT_EXEC;
5047 5011 break;
5048 5012 }
5049 5013
5050 5014 if ((svd->prot & protchk) == 0) {
5051 5015 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5052 5016 return (FC_PROT); /* illegal access type */
5053 5017 }
5054 5018 }
5055 5019
5056 5020 if (brkcow && HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5057 5021 /* this must be SOFTLOCK S_READ fault */
5058 5022 ASSERT(svd->amp == NULL);
5059 5023 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5060 5024 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5061 5025 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5062 5026 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5063 5027 /*
5064 5028 * this must be the first ever non S_READ_NOCOW
5065 5029 * softlock for this segment.
5066 5030 */
5067 5031 ASSERT(svd->softlockcnt == 0);
5068 5032 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5069 5033 HAT_REGION_TEXT);
5070 5034 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5071 5035 }
5072 5036 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5073 5037 goto top;
5074 5038 }
5075 5039
5076 5040 /*
5077 5041 * We can't allow the long term use of softlocks for vmpss segments,
5078 5042 * because in some file truncation cases we should be able to demote
5079 5043 * the segment, which requires that there are no softlocks. The
5080 5044 * only case where it's ok to allow a SOFTLOCK fault against a vmpss
5081 5045 * segment is S_READ_NOCOW, where the caller holds the address space
5082 5046 * locked as writer and calls softunlock before dropping the as lock.
5083 5047 * S_READ_NOCOW is used by /proc to read memory from another user.
5084 5048 *
5085 5049 * Another deadlock between SOFTLOCK and file truncation can happen
5086 5050 * because segvn_fault_vnodepages() calls the FS one pagesize at
5087 5051 * a time. A second VOP_GETPAGE() call by segvn_fault_vnodepages()
5088 5052 * can cause a deadlock because the first set of page_t's remain
5089 5053 * locked SE_SHARED. To avoid this, we demote segments on a first
5090 5054 * SOFTLOCK if they have a length greater than the segment's
5091 5055 * page size.
5092 5056 *
5093 5057 * So for now, we only avoid demoting a segment on a SOFTLOCK when
5094 5058 * the access type is S_READ_NOCOW and the fault length is less than
5095 5059 * or equal to the segment's page size. While this is quite restrictive,
5096 5060 * it should be the most common case of SOFTLOCK against a vmpss
5097 5061 * segment.
5098 5062 *
5099 5063 * For S_READ_NOCOW, it's safe not to do a copy on write because the
5100 5064 * caller makes sure no COW will be caused by another thread for a
5101 5065 * softlocked page.
5102 5066 */
5103 5067 if (type == F_SOFTLOCK && svd->vp != NULL && seg->s_szc != 0) {
5104 5068 int demote = 0;
5105 5069
5106 5070 if (rw != S_READ_NOCOW) {
5107 5071 demote = 1;
5108 5072 }
5109 5073 if (!demote && len > PAGESIZE) {
5110 5074 pgsz = page_get_pagesize(seg->s_szc);
5111 5075 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr,
5112 5076 lpgeaddr);
5113 5077 if (lpgeaddr - lpgaddr > pgsz) {
5114 5078 demote = 1;
5115 5079 }
5116 5080 }
5117 5081
5118 5082 ASSERT(demote || AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5119 5083
5120 5084 if (demote) {
5121 5085 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5122 5086 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5123 5087 if (seg->s_szc != 0) {
5124 5088 segvn_vmpss_clrszc_cnt++;
5125 5089 ASSERT(svd->softlockcnt == 0);
5126 5090 err = segvn_clrszc(seg);
5127 5091 if (err) {
5128 5092 segvn_vmpss_clrszc_err++;
5129 5093 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5130 5094 return (FC_MAKE_ERR(err));
5131 5095 }
5132 5096 }
5133 5097 ASSERT(seg->s_szc == 0);
5134 5098 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5135 5099 goto top;
5136 5100 }
5137 5101 }
5138 5102
5139 5103 /*
5140 5104 * Check to see if we need to allocate an anon_map structure.
5141 5105 */
5142 5106 if (svd->amp == NULL && (svd->vp == NULL || brkcow)) {
5143 5107 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5144 5108 /*
5145 5109 * Drop the "read" lock on the segment and acquire
5146 5110 * the "write" version since we have to allocate the
5147 5111 * anon_map.
5148 5112 */
5149 5113 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5150 5114 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5151 5115
5152 5116 if (svd->amp == NULL) {
5153 5117 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
5154 5118 svd->amp->a_szc = seg->s_szc;
5155 5119 }
5156 5120 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5157 5121
5158 5122 /*
5159 5123 * Start all over again since segment protections
5160 5124 * may have changed after we dropped the "read" lock.
5161 5125 */
5162 5126 goto top;
5163 5127 }
5164 5128
5165 5129 /*
5166 5130 * S_READ_NOCOW vs S_READ distinction was
5167 5131 * only needed for the code above. After
5168 5132 * that we treat it as S_READ.
5169 5133 */
5170 5134 if (rw == S_READ_NOCOW) {
5171 5135 ASSERT(type == F_SOFTLOCK);
5172 5136 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
5173 5137 rw = S_READ;
5174 5138 }
5175 5139
5176 5140 amp = svd->amp;
5177 5141
5178 5142 /*
5179 5143 * MADV_SEQUENTIAL work is ignored for large page segments.
5180 5144 */
5181 5145 if (seg->s_szc != 0) {
5182 5146 pgsz = page_get_pagesize(seg->s_szc);
5183 5147 ASSERT(SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
5184 5148 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
5185 5149 if (svd->vp == NULL) {
5186 5150 err = segvn_fault_anonpages(hat, seg, lpgaddr,
5187 5151 lpgeaddr, type, rw, addr, addr + len, brkcow);
5188 5152 } else {
5189 5153 err = segvn_fault_vnodepages(hat, seg, lpgaddr,
5190 5154 lpgeaddr, type, rw, addr, addr + len, brkcow);
5191 5155 if (err == IE_RETRY) {
5192 5156 ASSERT(seg->s_szc == 0);
5193 5157 ASSERT(SEGVN_READ_HELD(seg->s_as, &svd->lock));
5194 5158 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5195 5159 goto top;
5196 5160 }
5197 5161 }
5198 5162 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5199 5163 return (err);
5200 5164 }
5201 5165
5202 5166 page = seg_page(seg, addr);
5203 5167 if (amp != NULL) {
5204 5168 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
5205 5169 anon_index = svd->anon_index + page;
5206 5170
5207 5171 if (type == F_PROT && rw == S_READ &&
5208 5172 svd->tr_state == SEGVN_TR_OFF &&
5209 5173 svd->type == MAP_PRIVATE && svd->pageprot == 0) {
5210 5174 size_t index = anon_index;
5211 5175 struct anon *ap;
5212 5176
5213 5177 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5214 5178 /*
5215 5179 * The fast path could apply to S_WRITE also, except
5216 5180 * that the protection fault could be caused by lazy
5217 5181 * tlb flush when ro->rw. In this case, the pte is
5218 5182 * RW already. But RO in the other cpu's tlb causes
5219 5183 * the fault. Since hat_chgprot won't do anything if
5220 5184 * pte doesn't change, we may end up faulting
5221 5185 * indefinitely until the RO tlb entry gets replaced.
5222 5186 */
5223 5187 for (a = addr; a < addr + len; a += PAGESIZE, index++) {
5224 5188 anon_array_enter(amp, index, &cookie);
5225 5189 ap = anon_get_ptr(amp->ahp, index);
5226 5190 anon_array_exit(&cookie);
5227 5191 if ((ap == NULL) || (ap->an_refcnt != 1)) {
5228 5192 ANON_LOCK_EXIT(&->a_rwlock);
5229 5193 goto slow;
5230 5194 }
5231 5195 }
5232 5196 hat_chgprot(seg->s_as->a_hat, addr, len, svd->prot);
5233 5197 ANON_LOCK_EXIT(&->a_rwlock);
5234 5198 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5235 5199 return (0);
5236 5200 }
5237 5201 }
5238 5202 slow:
5239 5203
5240 5204 if (svd->vpage == NULL)
5241 5205 vpage = NULL;
5242 5206 else
5243 5207 vpage = &svd->vpage[page];
5244 5208
5245 5209 off = svd->offset + (uintptr_t)(addr - seg->s_base);
5246 5210
5247 5211 /*
5248 5212 * If MADV_SEQUENTIAL has been set for the particular page we
5249 5213 * are faulting on, free behind all pages in the segment and put
5250 5214 * them on the free list.
5251 5215 */
5252 5216
5253 5217 if ((page != 0) && fltadvice && svd->tr_state != SEGVN_TR_ON) {
5254 5218 struct vpage *vpp;
5255 5219 ulong_t fanon_index;
5256 5220 size_t fpage;
5257 5221 u_offset_t pgoff, fpgoff;
5258 5222 struct vnode *fvp;
5259 5223 struct anon *fap = NULL;
5260 5224
5261 5225 if (svd->advice == MADV_SEQUENTIAL ||
5262 5226 (svd->pageadvice &&
5263 5227 VPP_ADVICE(vpage) == MADV_SEQUENTIAL)) {
5264 5228 pgoff = off - PAGESIZE;
5265 5229 fpage = page - 1;
5266 5230 if (vpage != NULL)
5267 5231 vpp = &svd->vpage[fpage];
5268 5232 if (amp != NULL)
5269 5233 fanon_index = svd->anon_index + fpage;
5270 5234
5271 5235 while (pgoff > svd->offset) {
5272 5236 if (svd->advice != MADV_SEQUENTIAL &&
5273 5237 (!svd->pageadvice || (vpage &&
5274 5238 VPP_ADVICE(vpp) != MADV_SEQUENTIAL)))
5275 5239 break;
5276 5240
5277 5241 /*
5278 5242 * If this is an anon page, we must find the
5279 5243 * correct <vp, offset> for it
5280 5244 */
5281 5245 fap = NULL;
5282 5246 if (amp != NULL) {
5283 5247 ANON_LOCK_ENTER(&->a_rwlock,
5284 5248 RW_READER);
5285 5249 anon_array_enter(amp, fanon_index,
5286 5250 &cookie);
5287 5251 fap = anon_get_ptr(amp->ahp,
5288 5252 fanon_index);
5289 5253 if (fap != NULL) {
5290 5254 swap_xlate(fap, &fvp, &fpgoff);
5291 5255 } else {
5292 5256 fpgoff = pgoff;
5293 5257 fvp = svd->vp;
5294 5258 }
5295 5259 anon_array_exit(&cookie);
5296 5260 ANON_LOCK_EXIT(&->a_rwlock);
5297 5261 } else {
5298 5262 fpgoff = pgoff;
5299 5263 fvp = svd->vp;
5300 5264 }
5301 5265 if (fvp == NULL)
5302 5266 break; /* XXX */
5303 5267 /*
5304 5268 * Skip pages that are free or have an
5305 5269 * "exclusive" lock.
5306 5270 */
5307 5271 pp = page_lookup_nowait(fvp, fpgoff, SE_SHARED);
5308 5272 if (pp == NULL)
5309 5273 break;
5310 5274 /*
5311 5275 * We don't need the page_struct_lock to test
5312 5276 * as this is only advisory; even if we
5313 5277 * acquire it someone might race in and lock
5314 5278 * the page after we unlock and before the
5315 5279 * PUTPAGE, then VOP_PUTPAGE will do nothing.
5316 5280 */
5317 5281 if (pp->p_lckcnt == 0 && pp->p_cowcnt == 0) {
5318 5282 /*
5319 5283 * Hold the vnode before releasing
5320 5284 * the page lock to prevent it from
5321 5285 * being freed and re-used by some
5322 5286 * other thread.
5323 5287 */
5324 5288 VN_HOLD(fvp);
5325 5289 page_unlock(pp);
5326 5290 /*
5327 5291 * We should build a page list
5328 5292 * to kluster putpages XXX
5329 5293 */
5330 5294 (void) VOP_PUTPAGE(fvp,
5331 5295 (offset_t)fpgoff, PAGESIZE,
5332 5296 (B_DONTNEED|B_FREE|B_ASYNC),
5333 5297 svd->cred, NULL);
5334 5298 VN_RELE(fvp);
5335 5299 } else {
5336 5300 /*
5337 5301 * XXX - Should the loop terminate if
5338 5302 * the page is `locked'?
5339 5303 */
5340 5304 page_unlock(pp);
5341 5305 }
5342 5306 --vpp;
5343 5307 --fanon_index;
5344 5308 pgoff -= PAGESIZE;
5345 5309 }
5346 5310 }
5347 5311 }
5348 5312
5349 5313 plp = pl;
5350 5314 *plp = NULL;
5351 5315 pl_alloc_sz = 0;
5352 5316
5353 5317 /*
5354 5318 * See if we need to call VOP_GETPAGE for
5355 5319 * *any* of the range being faulted on.
5356 5320 * We can skip all of this work if there
5357 5321 * was no original vnode.
5358 5322 */
5359 5323 if (svd->vp != NULL) {
5360 5324 u_offset_t vp_off;
5361 5325 size_t vp_len;
5362 5326 struct anon *ap;
5363 5327 vnode_t *vp;
5364 5328
5365 5329 vp_off = off;
5366 5330 vp_len = len;
5367 5331
5368 5332 if (amp == NULL)
5369 5333 dogetpage = 1;
5370 5334 else {
5371 5335 /*
5372 5336 * Only acquire reader lock to prevent amp->ahp
5373 5337 * from being changed. It's ok to miss pages,
5374 5338 * hence we don't do anon_array_enter
5375 5339 */
5376 5340 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5377 5341 ap = anon_get_ptr(amp->ahp, anon_index);
5378 5342
5379 5343 if (len <= PAGESIZE)
5380 5344 /* inline non_anon() */
5381 5345 dogetpage = (ap == NULL);
↓ open down ↓ |
418 lines elided |
↑ open up ↑ |
5382 5346 else
5383 5347 dogetpage = non_anon(amp->ahp, anon_index,
5384 5348 &vp_off, &vp_len);
5385 5349 ANON_LOCK_EXIT(&->a_rwlock);
5386 5350 }
5387 5351
5388 5352 if (dogetpage) {
5389 5353 enum seg_rw arw;
5390 5354 struct as *as = seg->s_as;
5391 5355
5392 - if (len > ptob((sizeof (pl) / sizeof (pl[0])) - 1)) {
5356 + if (len > FAULT_TMP_PAGES_SZ) {
5393 5357 /*
5394 5358 * Page list won't fit in local array,
5395 5359 * allocate one of the needed size.
5396 5360 */
5397 5361 pl_alloc_sz =
5398 5362 (btop(len) + 1) * sizeof (page_t *);
5399 5363 plp = kmem_alloc(pl_alloc_sz, KM_SLEEP);
5400 5364 plp[0] = NULL;
5401 5365 plsz = len;
5402 5366 } else if (rw == S_WRITE && svd->type == MAP_PRIVATE ||
5403 5367 svd->tr_state == SEGVN_TR_ON || rw == S_OTHER ||
5404 5368 (((size_t)(addr + PAGESIZE) <
5405 5369 (size_t)(seg->s_base + seg->s_size)) &&
5406 5370 hat_probe(as->a_hat, addr + PAGESIZE))) {
5407 5371 /*
5408 5372 * Ask VOP_GETPAGE to return the exact number
5409 5373 * of pages if
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
5410 5374 * (a) this is a COW fault, or
5411 5375 * (b) this is a software fault, or
5412 5376 * (c) next page is already mapped.
5413 5377 */
5414 5378 plsz = len;
5415 5379 } else {
5416 5380 /*
5417 5381 * Ask VOP_GETPAGE to return adjacent pages
5418 5382 * within the segment.
5419 5383 */
5420 - plsz = MIN((size_t)PVN_GETPAGE_SZ, (size_t)
5384 + plsz = MIN((size_t)FAULT_TMP_PAGES_SZ, (size_t)
5421 5385 ((seg->s_base + seg->s_size) - addr));
5422 5386 ASSERT((addr + plsz) <=
5423 5387 (seg->s_base + seg->s_size));
5424 5388 }
5425 5389
5426 5390 /*
5427 5391 * Need to get some non-anonymous pages.
5428 5392 * We need to make only one call to GETPAGE to do
5429 5393 * this to prevent certain deadlocking conditions
5430 5394 * when we are doing locking. In this case
5431 5395 * non_anon() should have picked up the smallest
5432 5396 * range which includes all the non-anonymous
5433 5397 * pages in the requested range. We have to
5434 5398 * be careful regarding which rw flag to pass in
5435 5399 * because on a private mapping, the underlying
5436 5400 * object is never allowed to be written.
5437 5401 */
5438 5402 if (rw == S_WRITE && svd->type == MAP_PRIVATE) {
5439 5403 arw = S_READ;
5440 5404 } else {
5441 5405 arw = rw;
5442 5406 }
5443 5407 vp = svd->vp;
5444 5408 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5445 5409 "segvn_getpage:seg %p addr %p vp %p",
5446 5410 seg, addr, vp);
5447 5411 err = VOP_GETPAGE(vp, (offset_t)vp_off, vp_len,
5448 5412 &vpprot, plp, plsz, seg, addr + (vp_off - off), arw,
5449 5413 svd->cred, NULL);
5450 5414 if (err) {
5451 5415 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5452 5416 segvn_pagelist_rele(plp);
5453 5417 if (pl_alloc_sz)
5454 5418 kmem_free(plp, pl_alloc_sz);
5455 5419 return (FC_MAKE_ERR(err));
5456 5420 }
5457 5421 if (svd->type == MAP_PRIVATE)
5458 5422 vpprot &= ~PROT_WRITE;
5459 5423 }
5460 5424 }
5461 5425
5462 5426 /*
5463 5427 * N.B. at this time the plp array has all the needed non-anon
5464 5428 * pages in addition to (possibly) having some adjacent pages.
5465 5429 */
5466 5430
5467 5431 /*
5468 5432 * Always acquire the anon_array_lock to prevent
5469 5433 * 2 threads from allocating separate anon slots for
5470 5434 * the same "addr".
5471 5435 *
5472 5436 * If this is a copy-on-write fault and we don't already
5473 5437 * have the anon_array_lock, acquire it to prevent the
5474 5438 * fault routine from handling multiple copy-on-write faults
5475 5439 * on the same "addr" in the same address space.
5476 5440 *
5477 5441 * Only one thread should deal with the fault since after
5478 5442 * it is handled, the other threads can acquire a translation
5479 5443 * to the newly created private page. This prevents two or
5480 5444 * more threads from creating different private pages for the
5481 5445 * same fault.
5482 5446 *
5483 5447 * We grab "serialization" lock here if this is a MAP_PRIVATE segment
5484 5448 * to prevent deadlock between this thread and another thread
5485 5449 * which has soft-locked this page and wants to acquire serial_lock.
5486 5450 * ( bug 4026339 )
5487 5451 *
5488 5452 * The fix for bug 4026339 becomes unnecessary when using the
5489 5453 * locking scheme with per amp rwlock and a global set of hash
5490 5454 * lock, anon_array_lock. If we steal a vnode page when low
5491 5455 * on memory and upgrad the page lock through page_rename,
5492 5456 * then the page is PAGE_HANDLED, nothing needs to be done
5493 5457 * for this page after returning from segvn_faultpage.
5494 5458 *
5495 5459 * But really, the page lock should be downgraded after
5496 5460 * the stolen page is page_rename'd.
5497 5461 */
5498 5462
5499 5463 if (amp != NULL)
5500 5464 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5501 5465
5502 5466 /*
5503 5467 * Ok, now loop over the address range and handle faults
5504 5468 */
5505 5469 for (a = addr; a < addr + len; a += PAGESIZE, off += PAGESIZE) {
5506 5470 err = segvn_faultpage(hat, seg, a, off, vpage, plp, vpprot,
5507 5471 type, rw, brkcow);
5508 5472 if (err) {
5509 5473 if (amp != NULL)
5510 5474 ANON_LOCK_EXIT(&->a_rwlock);
5511 5475 if (type == F_SOFTLOCK && a > addr) {
5512 5476 segvn_softunlock(seg, addr, (a - addr),
5513 5477 S_OTHER);
5514 5478 }
5515 5479 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5516 5480 segvn_pagelist_rele(plp);
5517 5481 if (pl_alloc_sz)
5518 5482 kmem_free(plp, pl_alloc_sz);
5519 5483 return (err);
5520 5484 }
5521 5485 if (vpage) {
5522 5486 vpage++;
5523 5487 } else if (svd->vpage) {
5524 5488 page = seg_page(seg, addr);
5525 5489 vpage = &svd->vpage[++page];
5526 5490 }
5527 5491 }
5528 5492
5529 5493 /* Didn't get pages from the underlying fs so we're done */
5530 5494 if (!dogetpage)
5531 5495 goto done;
5532 5496
5533 5497 /*
5534 5498 * Now handle any other pages in the list returned.
5535 5499 * If the page can be used, load up the translations now.
5536 5500 * Note that the for loop will only be entered if "plp"
5537 5501 * is pointing to a non-NULL page pointer which means that
5538 5502 * VOP_GETPAGE() was called and vpprot has been initialized.
5539 5503 */
5540 5504 if (svd->pageprot == 0)
5541 5505 prot = svd->prot & vpprot;
5542 5506
5543 5507
5544 5508 /*
5545 5509 * Large Files: diff should be unsigned value because we started
5546 5510 * supporting > 2GB segment sizes from 2.5.1 and when a
5547 5511 * large file of size > 2GB gets mapped to address space
5548 5512 * the diff value can be > 2GB.
5549 5513 */
5550 5514
5551 5515 for (ppp = plp; (pp = *ppp) != NULL; ppp++) {
5552 5516 size_t diff;
5553 5517 struct anon *ap;
5554 5518 int anon_index;
5555 5519 anon_sync_obj_t cookie;
5556 5520 int hat_flag = HAT_LOAD_ADV;
5557 5521
5558 5522 if (svd->flags & MAP_TEXT) {
5559 5523 hat_flag |= HAT_LOAD_TEXT;
5560 5524 }
5561 5525
5562 5526 if (pp == PAGE_HANDLED)
5563 5527 continue;
5564 5528
5565 5529 if (svd->tr_state != SEGVN_TR_ON &&
5566 5530 pp->p_offset >= svd->offset &&
5567 5531 pp->p_offset < svd->offset + seg->s_size) {
5568 5532
5569 5533 diff = pp->p_offset - svd->offset;
5570 5534
5571 5535 /*
5572 5536 * Large Files: Following is the assertion
5573 5537 * validating the above cast.
5574 5538 */
5575 5539 ASSERT(svd->vp == pp->p_vnode);
5576 5540
5577 5541 page = btop(diff);
5578 5542 if (svd->pageprot)
5579 5543 prot = VPP_PROT(&svd->vpage[page]) & vpprot;
5580 5544
5581 5545 /*
5582 5546 * Prevent other threads in the address space from
5583 5547 * creating private pages (i.e., allocating anon slots)
5584 5548 * while we are in the process of loading translations
5585 5549 * to additional pages returned by the underlying
5586 5550 * object.
5587 5551 */
5588 5552 if (amp != NULL) {
5589 5553 anon_index = svd->anon_index + page;
5590 5554 anon_array_enter(amp, anon_index, &cookie);
5591 5555 ap = anon_get_ptr(amp->ahp, anon_index);
5592 5556 }
5593 5557 if ((amp == NULL) || (ap == NULL)) {
5594 5558 if (IS_VMODSORT(pp->p_vnode) ||
5595 5559 enable_mbit_wa) {
5596 5560 if (rw == S_WRITE)
5597 5561 hat_setmod(pp);
5598 5562 else if (rw != S_OTHER &&
5599 5563 !hat_ismod(pp))
5600 5564 prot &= ~PROT_WRITE;
5601 5565 }
5602 5566 /*
5603 5567 * Skip mapping read ahead pages marked
5604 5568 * for migration, so they will get migrated
5605 5569 * properly on fault
5606 5570 */
5607 5571 ASSERT(amp == NULL ||
5608 5572 svd->rcookie == HAT_INVALID_REGION_COOKIE);
5609 5573 if ((prot & PROT_READ) && !PP_ISMIGRATE(pp)) {
5610 5574 hat_memload_region(hat,
5611 5575 seg->s_base + diff,
5612 5576 pp, prot, hat_flag,
5613 5577 svd->rcookie);
5614 5578 }
5615 5579 }
5616 5580 if (amp != NULL)
5617 5581 anon_array_exit(&cookie);
5618 5582 }
5619 5583 page_unlock(pp);
5620 5584 }
5621 5585 done:
5622 5586 if (amp != NULL)
5623 5587 ANON_LOCK_EXIT(&->a_rwlock);
5624 5588 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5625 5589 if (pl_alloc_sz)
5626 5590 kmem_free(plp, pl_alloc_sz);
5627 5591 return (0);
5628 5592 }
5629 5593
5630 5594 /*
5631 5595 * This routine is used to start I/O on pages asynchronously. XXX it will
5632 5596 * only create PAGESIZE pages. At fault time they will be relocated into
5633 5597 * larger pages.
5634 5598 */
5635 5599 static faultcode_t
5636 5600 segvn_faulta(struct seg *seg, caddr_t addr)
5637 5601 {
5638 5602 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5639 5603 int err;
5640 5604 struct anon_map *amp;
5641 5605 vnode_t *vp;
5642 5606
5643 5607 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5644 5608
5645 5609 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
5646 5610 if ((amp = svd->amp) != NULL) {
5647 5611 struct anon *ap;
5648 5612
5649 5613 /*
5650 5614 * Reader lock to prevent amp->ahp from being changed.
5651 5615 * This is advisory, it's ok to miss a page, so
5652 5616 * we don't do anon_array_enter lock.
5653 5617 */
5654 5618 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5655 5619 if ((ap = anon_get_ptr(amp->ahp,
5656 5620 svd->anon_index + seg_page(seg, addr))) != NULL) {
5657 5621
5658 5622 err = anon_getpage(&ap, NULL, NULL,
5659 5623 0, seg, addr, S_READ, svd->cred);
5660 5624
5661 5625 ANON_LOCK_EXIT(&->a_rwlock);
5662 5626 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5663 5627 if (err)
5664 5628 return (FC_MAKE_ERR(err));
5665 5629 return (0);
5666 5630 }
5667 5631 ANON_LOCK_EXIT(&->a_rwlock);
5668 5632 }
5669 5633
5670 5634 if (svd->vp == NULL) {
5671 5635 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5672 5636 return (0); /* zfod page - do nothing now */
5673 5637 }
5674 5638
5675 5639 vp = svd->vp;
5676 5640 TRACE_3(TR_FAC_VM, TR_SEGVN_GETPAGE,
5677 5641 "segvn_getpage:seg %p addr %p vp %p", seg, addr, vp);
5678 5642 err = VOP_GETPAGE(vp,
5679 5643 (offset_t)(svd->offset + (uintptr_t)(addr - seg->s_base)),
5680 5644 PAGESIZE, NULL, NULL, 0, seg, addr,
5681 5645 S_OTHER, svd->cred, NULL);
5682 5646
5683 5647 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5684 5648 if (err)
5685 5649 return (FC_MAKE_ERR(err));
5686 5650 return (0);
5687 5651 }
5688 5652
5689 5653 static int
5690 5654 segvn_setprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
5691 5655 {
5692 5656 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
5693 5657 struct vpage *cvp, *svp, *evp;
5694 5658 struct vnode *vp;
5695 5659 size_t pgsz;
5696 5660 pgcnt_t pgcnt;
5697 5661 anon_sync_obj_t cookie;
5698 5662 int unload_done = 0;
5699 5663
5700 5664 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
5701 5665
5702 5666 if ((svd->maxprot & prot) != prot)
5703 5667 return (EACCES); /* violated maxprot */
5704 5668
5705 5669 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
5706 5670
5707 5671 /* return if prot is the same */
5708 5672 if (!svd->pageprot && svd->prot == prot) {
5709 5673 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5710 5674 return (0);
5711 5675 }
5712 5676
5713 5677 /*
5714 5678 * Since we change protections we first have to flush the cache.
5715 5679 * This makes sure all the pagelock calls have to recheck
5716 5680 * protections.
5717 5681 */
5718 5682 if (svd->softlockcnt > 0) {
5719 5683 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5720 5684
5721 5685 /*
5722 5686 * If this is shared segment non 0 softlockcnt
5723 5687 * means locked pages are still in use.
5724 5688 */
5725 5689 if (svd->type == MAP_SHARED) {
5726 5690 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5727 5691 return (EAGAIN);
5728 5692 }
5729 5693
5730 5694 /*
5731 5695 * Since we do have the segvn writers lock nobody can fill
5732 5696 * the cache with entries belonging to this seg during
5733 5697 * the purge. The flush either succeeds or we still have
5734 5698 * pending I/Os.
5735 5699 */
5736 5700 segvn_purge(seg);
5737 5701 if (svd->softlockcnt > 0) {
5738 5702 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5739 5703 return (EAGAIN);
5740 5704 }
5741 5705 }
5742 5706
5743 5707 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
5744 5708 ASSERT(svd->amp == NULL);
5745 5709 ASSERT(svd->tr_state == SEGVN_TR_OFF);
5746 5710 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
5747 5711 HAT_REGION_TEXT);
5748 5712 svd->rcookie = HAT_INVALID_REGION_COOKIE;
5749 5713 unload_done = 1;
5750 5714 } else if (svd->tr_state == SEGVN_TR_INIT) {
5751 5715 svd->tr_state = SEGVN_TR_OFF;
5752 5716 } else if (svd->tr_state == SEGVN_TR_ON) {
5753 5717 ASSERT(svd->amp != NULL);
5754 5718 segvn_textunrepl(seg, 0);
5755 5719 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
5756 5720 unload_done = 1;
5757 5721 }
5758 5722
5759 5723 if ((prot & PROT_WRITE) && svd->type == MAP_SHARED &&
5760 5724 svd->vp != NULL && (svd->vp->v_flag & VVMEXEC)) {
5761 5725 ASSERT(vn_is_mapped(svd->vp, V_WRITE));
5762 5726 segvn_inval_trcache(svd->vp);
5763 5727 }
5764 5728 if (seg->s_szc != 0) {
5765 5729 int err;
5766 5730 pgsz = page_get_pagesize(seg->s_szc);
5767 5731 pgcnt = pgsz >> PAGESHIFT;
5768 5732 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
5769 5733 if (!IS_P2ALIGNED(addr, pgsz) || !IS_P2ALIGNED(len, pgsz)) {
5770 5734 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5771 5735 ASSERT(seg->s_base != addr || seg->s_size != len);
5772 5736 /*
5773 5737 * If we are holding the as lock as a reader then
5774 5738 * we need to return IE_RETRY and let the as
5775 5739 * layer drop and re-acquire the lock as a writer.
5776 5740 */
5777 5741 if (AS_READ_HELD(seg->s_as, &seg->s_as->a_lock))
5778 5742 return (IE_RETRY);
5779 5743 VM_STAT_ADD(segvnvmstats.demoterange[1]);
5780 5744 if (svd->type == MAP_PRIVATE || svd->vp != NULL) {
5781 5745 err = segvn_demote_range(seg, addr, len,
5782 5746 SDR_END, 0);
5783 5747 } else {
5784 5748 uint_t szcvec = map_pgszcvec(seg->s_base,
5785 5749 pgsz, (uintptr_t)seg->s_base,
5786 5750 (svd->flags & MAP_TEXT), MAPPGSZC_SHM, 0);
5787 5751 err = segvn_demote_range(seg, addr, len,
5788 5752 SDR_END, szcvec);
5789 5753 }
5790 5754 if (err == 0)
5791 5755 return (IE_RETRY);
5792 5756 if (err == ENOMEM)
5793 5757 return (IE_NOMEM);
5794 5758 return (err);
5795 5759 }
5796 5760 }
5797 5761
5798 5762
5799 5763 /*
5800 5764 * If it's a private mapping and we're making it writable then we
5801 5765 * may have to reserve the additional swap space now. If we are
5802 5766 * making writable only a part of the segment then we use its vpage
5803 5767 * array to keep a record of the pages for which we have reserved
5804 5768 * swap. In this case we set the pageswap field in the segment's
5805 5769 * segvn structure to record this.
5806 5770 *
5807 5771 * If it's a private mapping to a file (i.e., vp != NULL) and we're
5808 5772 * removing write permission on the entire segment and we haven't
5809 5773 * modified any pages, we can release the swap space.
5810 5774 */
5811 5775 if (svd->type == MAP_PRIVATE) {
5812 5776 if (prot & PROT_WRITE) {
5813 5777 if (!(svd->flags & MAP_NORESERVE) &&
5814 5778 !(svd->swresv && svd->pageswap == 0)) {
5815 5779 size_t sz = 0;
5816 5780
5817 5781 /*
5818 5782 * Start by determining how much swap
5819 5783 * space is required.
5820 5784 */
5821 5785 if (addr == seg->s_base &&
5822 5786 len == seg->s_size &&
5823 5787 svd->pageswap == 0) {
5824 5788 /* The whole segment */
5825 5789 sz = seg->s_size;
5826 5790 } else {
5827 5791 /*
5828 5792 * Make sure that the vpage array
5829 5793 * exists, and make a note of the
5830 5794 * range of elements corresponding
5831 5795 * to len.
5832 5796 */
5833 5797 segvn_vpage(seg);
5834 5798 if (svd->vpage == NULL) {
5835 5799 SEGVN_LOCK_EXIT(seg->s_as,
5836 5800 &svd->lock);
5837 5801 return (ENOMEM);
5838 5802 }
5839 5803 svp = &svd->vpage[seg_page(seg, addr)];
5840 5804 evp = &svd->vpage[seg_page(seg,
5841 5805 addr + len)];
5842 5806
5843 5807 if (svd->pageswap == 0) {
5844 5808 /*
5845 5809 * This is the first time we've
5846 5810 * asked for a part of this
5847 5811 * segment, so we need to
5848 5812 * reserve everything we've
5849 5813 * been asked for.
5850 5814 */
5851 5815 sz = len;
5852 5816 } else {
5853 5817 /*
5854 5818 * We have to count the number
5855 5819 * of pages required.
5856 5820 */
5857 5821 for (cvp = svp; cvp < evp;
5858 5822 cvp++) {
5859 5823 if (!VPP_ISSWAPRES(cvp))
5860 5824 sz++;
5861 5825 }
5862 5826 sz <<= PAGESHIFT;
5863 5827 }
5864 5828 }
5865 5829
5866 5830 /* Try to reserve the necessary swap. */
5867 5831 if (anon_resv_zone(sz,
5868 5832 seg->s_as->a_proc->p_zone) == 0) {
5869 5833 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5870 5834 return (IE_NOMEM);
5871 5835 }
5872 5836
5873 5837 /*
5874 5838 * Make a note of how much swap space
5875 5839 * we've reserved.
5876 5840 */
5877 5841 if (svd->pageswap == 0 && sz == seg->s_size) {
5878 5842 svd->swresv = sz;
5879 5843 } else {
5880 5844 ASSERT(svd->vpage != NULL);
5881 5845 svd->swresv += sz;
5882 5846 svd->pageswap = 1;
5883 5847 for (cvp = svp; cvp < evp; cvp++) {
5884 5848 if (!VPP_ISSWAPRES(cvp))
5885 5849 VPP_SETSWAPRES(cvp);
5886 5850 }
5887 5851 }
5888 5852 }
5889 5853 } else {
5890 5854 /*
5891 5855 * Swap space is released only if this segment
5892 5856 * does not map anonymous memory, since read faults
5893 5857 * on such segments still need an anon slot to read
5894 5858 * in the data.
5895 5859 */
5896 5860 if (svd->swresv != 0 && svd->vp != NULL &&
5897 5861 svd->amp == NULL && addr == seg->s_base &&
5898 5862 len == seg->s_size && svd->pageprot == 0) {
5899 5863 ASSERT(svd->pageswap == 0);
5900 5864 anon_unresv_zone(svd->swresv,
5901 5865 seg->s_as->a_proc->p_zone);
5902 5866 svd->swresv = 0;
5903 5867 TRACE_3(TR_FAC_VM, TR_ANON_PROC,
5904 5868 "anon proc:%p %lu %u", seg, 0, 0);
5905 5869 }
5906 5870 }
5907 5871 }
5908 5872
5909 5873 if (addr == seg->s_base && len == seg->s_size && svd->vpage == NULL) {
5910 5874 if (svd->prot == prot) {
5911 5875 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5912 5876 return (0); /* all done */
5913 5877 }
5914 5878 svd->prot = (uchar_t)prot;
5915 5879 } else if (svd->type == MAP_PRIVATE) {
5916 5880 struct anon *ap = NULL;
5917 5881 page_t *pp;
5918 5882 u_offset_t offset, off;
5919 5883 struct anon_map *amp;
5920 5884 ulong_t anon_idx = 0;
5921 5885
5922 5886 /*
5923 5887 * A vpage structure exists or else the change does not
5924 5888 * involve the entire segment. Establish a vpage structure
5925 5889 * if none is there. Then, for each page in the range,
5926 5890 * adjust its individual permissions. Note that write-
5927 5891 * enabling a MAP_PRIVATE page can affect the claims for
5928 5892 * locked down memory. Overcommitting memory terminates
5929 5893 * the operation.
5930 5894 */
5931 5895 segvn_vpage(seg);
5932 5896 if (svd->vpage == NULL) {
5933 5897 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
5934 5898 return (ENOMEM);
5935 5899 }
5936 5900 svd->pageprot = 1;
5937 5901 if ((amp = svd->amp) != NULL) {
5938 5902 anon_idx = svd->anon_index + seg_page(seg, addr);
5939 5903 ASSERT(seg->s_szc == 0 ||
5940 5904 IS_P2ALIGNED(anon_idx, pgcnt));
5941 5905 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
5942 5906 }
5943 5907
5944 5908 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
5945 5909 evp = &svd->vpage[seg_page(seg, addr + len)];
5946 5910
5947 5911 /*
5948 5912 * See Statement at the beginning of segvn_lockop regarding
5949 5913 * the way cowcnts and lckcnts are handled.
5950 5914 */
5951 5915 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
5952 5916
5953 5917 if (seg->s_szc != 0) {
5954 5918 if (amp != NULL) {
5955 5919 anon_array_enter(amp, anon_idx,
5956 5920 &cookie);
5957 5921 }
5958 5922 if (IS_P2ALIGNED(anon_idx, pgcnt) &&
5959 5923 !segvn_claim_pages(seg, svp, offset,
5960 5924 anon_idx, prot)) {
5961 5925 if (amp != NULL) {
5962 5926 anon_array_exit(&cookie);
5963 5927 }
5964 5928 break;
5965 5929 }
5966 5930 if (amp != NULL) {
5967 5931 anon_array_exit(&cookie);
5968 5932 }
5969 5933 anon_idx++;
5970 5934 } else {
5971 5935 if (amp != NULL) {
5972 5936 anon_array_enter(amp, anon_idx,
5973 5937 &cookie);
5974 5938 ap = anon_get_ptr(amp->ahp, anon_idx++);
5975 5939 }
5976 5940
5977 5941 if (VPP_ISPPLOCK(svp) &&
5978 5942 VPP_PROT(svp) != prot) {
5979 5943
5980 5944 if (amp == NULL || ap == NULL) {
5981 5945 vp = svd->vp;
5982 5946 off = offset;
5983 5947 } else
5984 5948 swap_xlate(ap, &vp, &off);
5985 5949 if (amp != NULL)
5986 5950 anon_array_exit(&cookie);
5987 5951
5988 5952 if ((pp = page_lookup(vp, off,
5989 5953 SE_SHARED)) == NULL) {
5990 5954 panic("segvn_setprot: no page");
5991 5955 /*NOTREACHED*/
5992 5956 }
5993 5957 ASSERT(seg->s_szc == 0);
5994 5958 if ((VPP_PROT(svp) ^ prot) &
5995 5959 PROT_WRITE) {
5996 5960 if (prot & PROT_WRITE) {
5997 5961 if (!page_addclaim(
5998 5962 pp)) {
5999 5963 page_unlock(pp);
6000 5964 break;
6001 5965 }
6002 5966 } else {
6003 5967 if (!page_subclaim(
6004 5968 pp)) {
6005 5969 page_unlock(pp);
6006 5970 break;
6007 5971 }
6008 5972 }
6009 5973 }
6010 5974 page_unlock(pp);
6011 5975 } else if (amp != NULL)
6012 5976 anon_array_exit(&cookie);
6013 5977 }
6014 5978 VPP_SETPROT(svp, prot);
6015 5979 offset += PAGESIZE;
6016 5980 }
6017 5981 if (amp != NULL)
6018 5982 ANON_LOCK_EXIT(&->a_rwlock);
6019 5983
6020 5984 /*
6021 5985 * Did we terminate prematurely? If so, simply unload
6022 5986 * the translations to the things we've updated so far.
6023 5987 */
6024 5988 if (svp != evp) {
6025 5989 if (unload_done) {
6026 5990 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6027 5991 return (IE_NOMEM);
6028 5992 }
6029 5993 len = (svp - &svd->vpage[seg_page(seg, addr)]) *
6030 5994 PAGESIZE;
6031 5995 ASSERT(seg->s_szc == 0 || IS_P2ALIGNED(len, pgsz));
6032 5996 if (len != 0)
6033 5997 hat_unload(seg->s_as->a_hat, addr,
6034 5998 len, HAT_UNLOAD);
6035 5999 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6036 6000 return (IE_NOMEM);
6037 6001 }
6038 6002 } else {
6039 6003 segvn_vpage(seg);
6040 6004 if (svd->vpage == NULL) {
6041 6005 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6042 6006 return (ENOMEM);
6043 6007 }
6044 6008 svd->pageprot = 1;
6045 6009 evp = &svd->vpage[seg_page(seg, addr + len)];
6046 6010 for (svp = &svd->vpage[seg_page(seg, addr)]; svp < evp; svp++) {
6047 6011 VPP_SETPROT(svp, prot);
6048 6012 }
6049 6013 }
6050 6014
6051 6015 if (unload_done) {
6052 6016 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6053 6017 return (0);
6054 6018 }
6055 6019
6056 6020 if (((prot & PROT_WRITE) != 0 &&
6057 6021 (svd->vp != NULL || svd->type == MAP_PRIVATE)) ||
6058 6022 (prot & ~PROT_USER) == PROT_NONE) {
6059 6023 /*
6060 6024 * Either private or shared data with write access (in
6061 6025 * which case we need to throw out all former translations
6062 6026 * so that we get the right translations set up on fault
6063 6027 * and we don't allow write access to any copy-on-write pages
6064 6028 * that might be around or to prevent write access to pages
6065 6029 * representing holes in a file), or we don't have permission
6066 6030 * to access the memory at all (in which case we have to
6067 6031 * unload any current translations that might exist).
6068 6032 */
6069 6033 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
6070 6034 } else {
6071 6035 /*
6072 6036 * A shared mapping or a private mapping in which write
6073 6037 * protection is going to be denied - just change all the
6074 6038 * protections over the range of addresses in question.
6075 6039 * segvn does not support any other attributes other
6076 6040 * than prot so we can use hat_chgattr.
↓ open down ↓ |
646 lines elided |
↑ open up ↑ |
6077 6041 */
6078 6042 hat_chgattr(seg->s_as->a_hat, addr, len, prot);
6079 6043 }
6080 6044
6081 6045 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6082 6046
6083 6047 return (0);
6084 6048 }
6085 6049
6086 6050 /*
6087 - * segvn_setpagesize is called via SEGOP_SETPAGESIZE from as_setpagesize,
6051 + * segvn_setpagesize is called via segop_setpagesize from as_setpagesize,
6088 6052 * to determine if the seg is capable of mapping the requested szc.
6089 6053 */
6090 6054 static int
6091 6055 segvn_setpagesize(struct seg *seg, caddr_t addr, size_t len, uint_t szc)
6092 6056 {
6093 6057 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6094 6058 struct segvn_data *nsvd;
6095 6059 struct anon_map *amp = svd->amp;
6096 6060 struct seg *nseg;
6097 6061 caddr_t eaddr = addr + len, a;
6098 6062 size_t pgsz = page_get_pagesize(szc);
6099 6063 pgcnt_t pgcnt = page_get_pagecnt(szc);
6100 6064 int err;
6101 6065 u_offset_t off = svd->offset + (uintptr_t)(addr - seg->s_base);
6102 6066
6103 6067 ASSERT(seg->s_as && AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6104 6068 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6105 6069
6106 6070 if (seg->s_szc == szc || segvn_lpg_disable != 0) {
6107 6071 return (0);
6108 6072 }
6109 6073
6110 6074 /*
6111 6075 * addr should always be pgsz aligned but eaddr may be misaligned if
6112 6076 * it's at the end of the segment.
6113 6077 *
6114 6078 * XXX we should assert this condition since as_setpagesize() logic
6115 6079 * guarantees it.
6116 6080 */
6117 6081 if (!IS_P2ALIGNED(addr, pgsz) ||
6118 6082 (!IS_P2ALIGNED(eaddr, pgsz) &&
6119 6083 eaddr != seg->s_base + seg->s_size)) {
6120 6084
6121 6085 segvn_setpgsz_align_err++;
6122 6086 return (EINVAL);
6123 6087 }
6124 6088
6125 6089 if (amp != NULL && svd->type == MAP_SHARED) {
6126 6090 ulong_t an_idx = svd->anon_index + seg_page(seg, addr);
6127 6091 if (!IS_P2ALIGNED(an_idx, pgcnt)) {
6128 6092
6129 6093 segvn_setpgsz_anon_align_err++;
6130 6094 return (EINVAL);
6131 6095 }
6132 6096 }
6133 6097
6134 6098 if ((svd->flags & MAP_NORESERVE) || seg->s_as == &kas ||
6135 6099 szc > segvn_maxpgszc) {
6136 6100 return (EINVAL);
6137 6101 }
6138 6102
6139 6103 /* paranoid check */
6140 6104 if (svd->vp != NULL &&
6141 6105 (IS_SWAPFSVP(svd->vp) || VN_ISKAS(svd->vp))) {
6142 6106 return (EINVAL);
6143 6107 }
6144 6108
6145 6109 if (seg->s_szc == 0 && svd->vp != NULL &&
6146 6110 map_addr_vacalign_check(addr, off)) {
6147 6111 return (EINVAL);
6148 6112 }
6149 6113
6150 6114 /*
6151 6115 * Check that protections are the same within new page
6152 6116 * size boundaries.
6153 6117 */
6154 6118 if (svd->pageprot) {
6155 6119 for (a = addr; a < eaddr; a += pgsz) {
6156 6120 if ((a + pgsz) > eaddr) {
6157 6121 if (!sameprot(seg, a, eaddr - a)) {
6158 6122 return (EINVAL);
6159 6123 }
6160 6124 } else {
6161 6125 if (!sameprot(seg, a, pgsz)) {
6162 6126 return (EINVAL);
6163 6127 }
6164 6128 }
6165 6129 }
6166 6130 }
6167 6131
6168 6132 /*
6169 6133 * Since we are changing page size we first have to flush
6170 6134 * the cache. This makes sure all the pagelock calls have
6171 6135 * to recheck protections.
6172 6136 */
6173 6137 if (svd->softlockcnt > 0) {
6174 6138 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6175 6139
6176 6140 /*
6177 6141 * If this is shared segment non 0 softlockcnt
6178 6142 * means locked pages are still in use.
6179 6143 */
6180 6144 if (svd->type == MAP_SHARED) {
6181 6145 return (EAGAIN);
6182 6146 }
6183 6147
6184 6148 /*
6185 6149 * Since we do have the segvn writers lock nobody can fill
6186 6150 * the cache with entries belonging to this seg during
6187 6151 * the purge. The flush either succeeds or we still have
6188 6152 * pending I/Os.
6189 6153 */
6190 6154 segvn_purge(seg);
6191 6155 if (svd->softlockcnt > 0) {
6192 6156 return (EAGAIN);
6193 6157 }
6194 6158 }
6195 6159
6196 6160 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6197 6161 ASSERT(svd->amp == NULL);
6198 6162 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6199 6163 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6200 6164 HAT_REGION_TEXT);
6201 6165 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6202 6166 } else if (svd->tr_state == SEGVN_TR_INIT) {
6203 6167 svd->tr_state = SEGVN_TR_OFF;
6204 6168 } else if (svd->tr_state == SEGVN_TR_ON) {
6205 6169 ASSERT(svd->amp != NULL);
6206 6170 segvn_textunrepl(seg, 1);
6207 6171 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6208 6172 amp = NULL;
6209 6173 }
6210 6174
6211 6175 /*
6212 6176 * Operation for sub range of existing segment.
6213 6177 */
6214 6178 if (addr != seg->s_base || eaddr != (seg->s_base + seg->s_size)) {
6215 6179 if (szc < seg->s_szc) {
6216 6180 VM_STAT_ADD(segvnvmstats.demoterange[2]);
6217 6181 err = segvn_demote_range(seg, addr, len, SDR_RANGE, 0);
6218 6182 if (err == 0) {
6219 6183 return (IE_RETRY);
6220 6184 }
6221 6185 if (err == ENOMEM) {
6222 6186 return (IE_NOMEM);
6223 6187 }
6224 6188 return (err);
6225 6189 }
6226 6190 if (addr != seg->s_base) {
6227 6191 nseg = segvn_split_seg(seg, addr);
6228 6192 if (eaddr != (nseg->s_base + nseg->s_size)) {
6229 6193 /* eaddr is szc aligned */
6230 6194 (void) segvn_split_seg(nseg, eaddr);
6231 6195 }
6232 6196 return (IE_RETRY);
6233 6197 }
6234 6198 if (eaddr != (seg->s_base + seg->s_size)) {
6235 6199 /* eaddr is szc aligned */
6236 6200 (void) segvn_split_seg(seg, eaddr);
6237 6201 }
6238 6202 return (IE_RETRY);
6239 6203 }
6240 6204
6241 6205 /*
6242 6206 * Break any low level sharing and reset seg->s_szc to 0.
6243 6207 */
6244 6208 if ((err = segvn_clrszc(seg)) != 0) {
6245 6209 if (err == ENOMEM) {
6246 6210 err = IE_NOMEM;
6247 6211 }
6248 6212 return (err);
6249 6213 }
6250 6214 ASSERT(seg->s_szc == 0);
6251 6215
6252 6216 /*
6253 6217 * If the end of the current segment is not pgsz aligned
6254 6218 * then attempt to concatenate with the next segment.
6255 6219 */
6256 6220 if (!IS_P2ALIGNED(eaddr, pgsz)) {
6257 6221 nseg = AS_SEGNEXT(seg->s_as, seg);
6258 6222 if (nseg == NULL || nseg == seg || eaddr != nseg->s_base) {
6259 6223 return (ENOMEM);
6260 6224 }
6261 6225 if (nseg->s_ops != &segvn_ops) {
6262 6226 return (EINVAL);
6263 6227 }
6264 6228 nsvd = (struct segvn_data *)nseg->s_data;
6265 6229 if (nsvd->softlockcnt > 0) {
6266 6230 /*
6267 6231 * If this is shared segment non 0 softlockcnt
6268 6232 * means locked pages are still in use.
6269 6233 */
6270 6234 if (nsvd->type == MAP_SHARED) {
6271 6235 return (EAGAIN);
6272 6236 }
6273 6237 segvn_purge(nseg);
6274 6238 if (nsvd->softlockcnt > 0) {
6275 6239 return (EAGAIN);
6276 6240 }
6277 6241 }
6278 6242 err = segvn_clrszc(nseg);
6279 6243 if (err == ENOMEM) {
6280 6244 err = IE_NOMEM;
6281 6245 }
6282 6246 if (err != 0) {
6283 6247 return (err);
6284 6248 }
6285 6249 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6286 6250 err = segvn_concat(seg, nseg, 1);
6287 6251 if (err == -1) {
6288 6252 return (EINVAL);
6289 6253 }
6290 6254 if (err == -2) {
6291 6255 return (IE_NOMEM);
6292 6256 }
6293 6257 return (IE_RETRY);
6294 6258 }
6295 6259
6296 6260 /*
6297 6261 * May need to re-align anon array to
6298 6262 * new szc.
6299 6263 */
6300 6264 if (amp != NULL) {
6301 6265 if (!IS_P2ALIGNED(svd->anon_index, pgcnt)) {
6302 6266 struct anon_hdr *nahp;
6303 6267
6304 6268 ASSERT(svd->type == MAP_PRIVATE);
6305 6269
6306 6270 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6307 6271 ASSERT(amp->refcnt == 1);
6308 6272 nahp = anon_create(btop(amp->size), ANON_NOSLEEP);
6309 6273 if (nahp == NULL) {
6310 6274 ANON_LOCK_EXIT(&->a_rwlock);
6311 6275 return (IE_NOMEM);
6312 6276 }
6313 6277 if (anon_copy_ptr(amp->ahp, svd->anon_index,
6314 6278 nahp, 0, btop(seg->s_size), ANON_NOSLEEP)) {
6315 6279 anon_release(nahp, btop(amp->size));
6316 6280 ANON_LOCK_EXIT(&->a_rwlock);
6317 6281 return (IE_NOMEM);
6318 6282 }
6319 6283 anon_release(amp->ahp, btop(amp->size));
6320 6284 amp->ahp = nahp;
6321 6285 svd->anon_index = 0;
6322 6286 ANON_LOCK_EXIT(&->a_rwlock);
6323 6287 }
6324 6288 }
6325 6289 if (svd->vp != NULL && szc != 0) {
6326 6290 struct vattr va;
6327 6291 u_offset_t eoffpage = svd->offset;
6328 6292 va.va_mask = AT_SIZE;
6329 6293 eoffpage += seg->s_size;
6330 6294 eoffpage = btopr(eoffpage);
6331 6295 if (VOP_GETATTR(svd->vp, &va, 0, svd->cred, NULL) != 0) {
6332 6296 segvn_setpgsz_getattr_err++;
6333 6297 return (EINVAL);
6334 6298 }
6335 6299 if (btopr(va.va_size) < eoffpage) {
6336 6300 segvn_setpgsz_eof_err++;
6337 6301 return (EINVAL);
6338 6302 }
6339 6303 if (amp != NULL) {
6340 6304 /*
6341 6305 * anon_fill_cow_holes() may call VOP_GETPAGE().
6342 6306 * don't take anon map lock here to avoid holding it
6343 6307 * across VOP_GETPAGE() calls that may call back into
6344 6308 * segvn for klsutering checks. We don't really need
6345 6309 * anon map lock here since it's a private segment and
6346 6310 * we hold as level lock as writers.
6347 6311 */
6348 6312 if ((err = anon_fill_cow_holes(seg, seg->s_base,
6349 6313 amp->ahp, svd->anon_index, svd->vp, svd->offset,
6350 6314 seg->s_size, szc, svd->prot, svd->vpage,
6351 6315 svd->cred)) != 0) {
6352 6316 return (EINVAL);
6353 6317 }
6354 6318 }
6355 6319 segvn_setvnode_mpss(svd->vp);
6356 6320 }
6357 6321
6358 6322 if (amp != NULL) {
6359 6323 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6360 6324 if (svd->type == MAP_PRIVATE) {
6361 6325 amp->a_szc = szc;
6362 6326 } else if (szc > amp->a_szc) {
6363 6327 amp->a_szc = szc;
6364 6328 }
6365 6329 ANON_LOCK_EXIT(&->a_rwlock);
6366 6330 }
6367 6331
6368 6332 seg->s_szc = szc;
6369 6333
6370 6334 return (0);
6371 6335 }
6372 6336
6373 6337 static int
6374 6338 segvn_clrszc(struct seg *seg)
6375 6339 {
6376 6340 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6377 6341 struct anon_map *amp = svd->amp;
6378 6342 size_t pgsz;
6379 6343 pgcnt_t pages;
6380 6344 int err = 0;
6381 6345 caddr_t a = seg->s_base;
6382 6346 caddr_t ea = a + seg->s_size;
6383 6347 ulong_t an_idx = svd->anon_index;
6384 6348 vnode_t *vp = svd->vp;
6385 6349 struct vpage *vpage = svd->vpage;
6386 6350 page_t *anon_pl[1 + 1], *pp;
6387 6351 struct anon *ap, *oldap;
6388 6352 uint_t prot = svd->prot, vpprot;
6389 6353 int pageflag = 0;
6390 6354
6391 6355 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6392 6356 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
6393 6357 ASSERT(svd->softlockcnt == 0);
6394 6358
6395 6359 if (vp == NULL && amp == NULL) {
6396 6360 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6397 6361 seg->s_szc = 0;
6398 6362 return (0);
6399 6363 }
6400 6364
6401 6365 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
6402 6366 ASSERT(svd->amp == NULL);
6403 6367 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6404 6368 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
6405 6369 HAT_REGION_TEXT);
6406 6370 svd->rcookie = HAT_INVALID_REGION_COOKIE;
6407 6371 } else if (svd->tr_state == SEGVN_TR_ON) {
6408 6372 ASSERT(svd->amp != NULL);
6409 6373 segvn_textunrepl(seg, 1);
6410 6374 ASSERT(svd->amp == NULL && svd->tr_state == SEGVN_TR_OFF);
6411 6375 amp = NULL;
6412 6376 } else {
6413 6377 if (svd->tr_state != SEGVN_TR_OFF) {
6414 6378 ASSERT(svd->tr_state == SEGVN_TR_INIT);
6415 6379 svd->tr_state = SEGVN_TR_OFF;
6416 6380 }
6417 6381
6418 6382 /*
6419 6383 * do HAT_UNLOAD_UNMAP since we are changing the pagesize.
6420 6384 * unload argument is 0 when we are freeing the segment
6421 6385 * and unload was already done.
6422 6386 */
6423 6387 hat_unload(seg->s_as->a_hat, seg->s_base, seg->s_size,
6424 6388 HAT_UNLOAD_UNMAP);
6425 6389 }
6426 6390
6427 6391 if (amp == NULL || svd->type == MAP_SHARED) {
6428 6392 seg->s_szc = 0;
6429 6393 return (0);
6430 6394 }
6431 6395
6432 6396 pgsz = page_get_pagesize(seg->s_szc);
6433 6397 pages = btop(pgsz);
6434 6398
6435 6399 /*
6436 6400 * XXX anon rwlock is not really needed because this is a
6437 6401 * private segment and we are writers.
6438 6402 */
6439 6403 ANON_LOCK_ENTER(&->a_rwlock, RW_WRITER);
6440 6404
6441 6405 for (; a < ea; a += pgsz, an_idx += pages) {
6442 6406 if ((oldap = anon_get_ptr(amp->ahp, an_idx)) != NULL) {
6443 6407 ASSERT(vpage != NULL || svd->pageprot == 0);
6444 6408 if (vpage != NULL) {
6445 6409 ASSERT(sameprot(seg, a, pgsz));
6446 6410 prot = VPP_PROT(vpage);
6447 6411 pageflag = VPP_ISPPLOCK(vpage) ? LOCK_PAGE : 0;
6448 6412 }
6449 6413 if (seg->s_szc != 0) {
6450 6414 ASSERT(vp == NULL || anon_pages(amp->ahp,
6451 6415 an_idx, pages) == pages);
6452 6416 if ((err = anon_map_demotepages(amp, an_idx,
6453 6417 seg, a, prot, vpage, svd->cred)) != 0) {
6454 6418 goto out;
6455 6419 }
6456 6420 } else {
6457 6421 if (oldap->an_refcnt == 1) {
6458 6422 continue;
6459 6423 }
6460 6424 if ((err = anon_getpage(&oldap, &vpprot,
6461 6425 anon_pl, PAGESIZE, seg, a, S_READ,
6462 6426 svd->cred))) {
6463 6427 goto out;
6464 6428 }
6465 6429 if ((pp = anon_private(&ap, seg, a, prot,
6466 6430 anon_pl[0], pageflag, svd->cred)) == NULL) {
6467 6431 err = ENOMEM;
6468 6432 goto out;
6469 6433 }
6470 6434 anon_decref(oldap);
6471 6435 (void) anon_set_ptr(amp->ahp, an_idx, ap,
6472 6436 ANON_SLEEP);
6473 6437 page_unlock(pp);
6474 6438 }
6475 6439 }
6476 6440 vpage = (vpage == NULL) ? NULL : vpage + pages;
6477 6441 }
6478 6442
6479 6443 amp->a_szc = 0;
6480 6444 seg->s_szc = 0;
6481 6445 out:
6482 6446 ANON_LOCK_EXIT(&->a_rwlock);
6483 6447 return (err);
6484 6448 }
6485 6449
6486 6450 static int
6487 6451 segvn_claim_pages(
6488 6452 struct seg *seg,
6489 6453 struct vpage *svp,
6490 6454 u_offset_t off,
6491 6455 ulong_t anon_idx,
6492 6456 uint_t prot)
6493 6457 {
6494 6458 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6495 6459 size_t ppasize = (pgcnt + 1) * sizeof (page_t *);
6496 6460 page_t **ppa;
6497 6461 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6498 6462 struct anon_map *amp = svd->amp;
6499 6463 struct vpage *evp = svp + pgcnt;
6500 6464 caddr_t addr = ((uintptr_t)(svp - svd->vpage) << PAGESHIFT)
6501 6465 + seg->s_base;
6502 6466 struct anon *ap;
6503 6467 struct vnode *vp = svd->vp;
6504 6468 page_t *pp;
6505 6469 pgcnt_t pg_idx, i;
6506 6470 int err = 0;
6507 6471 anoff_t aoff;
6508 6472 int anon = (amp != NULL) ? 1 : 0;
6509 6473
6510 6474 ASSERT(svd->type == MAP_PRIVATE);
6511 6475 ASSERT(svd->vpage != NULL);
6512 6476 ASSERT(seg->s_szc != 0);
6513 6477 ASSERT(IS_P2ALIGNED(pgcnt, pgcnt));
6514 6478 ASSERT(amp == NULL || IS_P2ALIGNED(anon_idx, pgcnt));
6515 6479 ASSERT(sameprot(seg, addr, pgcnt << PAGESHIFT));
6516 6480
6517 6481 if (VPP_PROT(svp) == prot)
6518 6482 return (1);
6519 6483 if (!((VPP_PROT(svp) ^ prot) & PROT_WRITE))
6520 6484 return (1);
6521 6485
6522 6486 ppa = kmem_alloc(ppasize, KM_SLEEP);
6523 6487 if (anon && vp != NULL) {
6524 6488 if (anon_get_ptr(amp->ahp, anon_idx) == NULL) {
6525 6489 anon = 0;
6526 6490 ASSERT(!anon_pages(amp->ahp, anon_idx, pgcnt));
6527 6491 }
6528 6492 ASSERT(!anon ||
6529 6493 anon_pages(amp->ahp, anon_idx, pgcnt) == pgcnt);
6530 6494 }
6531 6495
6532 6496 for (*ppa = NULL, pg_idx = 0; svp < evp; svp++, anon_idx++) {
6533 6497 if (!VPP_ISPPLOCK(svp))
6534 6498 continue;
6535 6499 if (anon) {
6536 6500 ap = anon_get_ptr(amp->ahp, anon_idx);
6537 6501 if (ap == NULL) {
6538 6502 panic("segvn_claim_pages: no anon slot");
6539 6503 }
6540 6504 swap_xlate(ap, &vp, &aoff);
6541 6505 off = (u_offset_t)aoff;
6542 6506 }
6543 6507 ASSERT(vp != NULL);
6544 6508 if ((pp = page_lookup(vp,
6545 6509 (u_offset_t)off, SE_SHARED)) == NULL) {
6546 6510 panic("segvn_claim_pages: no page");
6547 6511 }
6548 6512 ppa[pg_idx++] = pp;
6549 6513 off += PAGESIZE;
6550 6514 }
6551 6515
6552 6516 if (ppa[0] == NULL) {
6553 6517 kmem_free(ppa, ppasize);
6554 6518 return (1);
6555 6519 }
6556 6520
6557 6521 ASSERT(pg_idx <= pgcnt);
6558 6522 ppa[pg_idx] = NULL;
6559 6523
6560 6524
6561 6525 /* Find each large page within ppa, and adjust its claim */
6562 6526
6563 6527 /* Does ppa cover a single large page? */
6564 6528 if (ppa[0]->p_szc == seg->s_szc) {
6565 6529 if (prot & PROT_WRITE)
6566 6530 err = page_addclaim_pages(ppa);
6567 6531 else
6568 6532 err = page_subclaim_pages(ppa);
6569 6533 } else {
6570 6534 for (i = 0; ppa[i]; i += pgcnt) {
6571 6535 ASSERT(IS_P2ALIGNED(page_pptonum(ppa[i]), pgcnt));
6572 6536 if (prot & PROT_WRITE)
6573 6537 err = page_addclaim_pages(&ppa[i]);
6574 6538 else
6575 6539 err = page_subclaim_pages(&ppa[i]);
6576 6540 if (err == 0)
6577 6541 break;
6578 6542 }
6579 6543 }
6580 6544
6581 6545 for (i = 0; i < pg_idx; i++) {
6582 6546 ASSERT(ppa[i] != NULL);
6583 6547 page_unlock(ppa[i]);
6584 6548 }
6585 6549
6586 6550 kmem_free(ppa, ppasize);
6587 6551 return (err);
6588 6552 }
6589 6553
6590 6554 /*
6591 6555 * Returns right (upper address) segment if split occurred.
6592 6556 * If the address is equal to the beginning or end of its segment it returns
6593 6557 * the current segment.
6594 6558 */
6595 6559 static struct seg *
6596 6560 segvn_split_seg(struct seg *seg, caddr_t addr)
6597 6561 {
6598 6562 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6599 6563 struct seg *nseg;
6600 6564 size_t nsize;
6601 6565 struct segvn_data *nsvd;
6602 6566
6603 6567 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6604 6568 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6605 6569
6606 6570 ASSERT(addr >= seg->s_base);
6607 6571 ASSERT(addr <= seg->s_base + seg->s_size);
6608 6572 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6609 6573
6610 6574 if (addr == seg->s_base || addr == seg->s_base + seg->s_size)
6611 6575 return (seg);
6612 6576
6613 6577 nsize = seg->s_base + seg->s_size - addr;
6614 6578 seg->s_size = addr - seg->s_base;
6615 6579 nseg = seg_alloc(seg->s_as, addr, nsize);
6616 6580 ASSERT(nseg != NULL);
6617 6581 nseg->s_ops = seg->s_ops;
6618 6582 nsvd = kmem_cache_alloc(segvn_cache, KM_SLEEP);
6619 6583 nseg->s_data = (void *)nsvd;
6620 6584 nseg->s_szc = seg->s_szc;
6621 6585 *nsvd = *svd;
6622 6586 ASSERT(nsvd->rcookie == HAT_INVALID_REGION_COOKIE);
6623 6587 nsvd->seg = nseg;
6624 6588 rw_init(&nsvd->lock, NULL, RW_DEFAULT, NULL);
6625 6589
6626 6590 if (nsvd->vp != NULL) {
6627 6591 VN_HOLD(nsvd->vp);
6628 6592 nsvd->offset = svd->offset +
6629 6593 (uintptr_t)(nseg->s_base - seg->s_base);
6630 6594 if (nsvd->type == MAP_SHARED)
6631 6595 lgrp_shm_policy_init(NULL, nsvd->vp);
6632 6596 } else {
6633 6597 /*
6634 6598 * The offset for an anonymous segment has no signifigance in
6635 6599 * terms of an offset into a file. If we were to use the above
6636 6600 * calculation instead, the structures read out of
6637 6601 * /proc/<pid>/xmap would be more difficult to decipher since
6638 6602 * it would be unclear whether two seemingly contiguous
6639 6603 * prxmap_t structures represented different segments or a
6640 6604 * single segment that had been split up into multiple prxmap_t
6641 6605 * structures (e.g. if some part of the segment had not yet
6642 6606 * been faulted in).
6643 6607 */
6644 6608 nsvd->offset = 0;
6645 6609 }
6646 6610
6647 6611 ASSERT(svd->softlockcnt == 0);
6648 6612 ASSERT(svd->softlockcnt_sbase == 0);
6649 6613 ASSERT(svd->softlockcnt_send == 0);
6650 6614 crhold(svd->cred);
6651 6615
6652 6616 if (svd->vpage != NULL) {
6653 6617 size_t bytes = vpgtob(seg_pages(seg));
6654 6618 size_t nbytes = vpgtob(seg_pages(nseg));
6655 6619 struct vpage *ovpage = svd->vpage;
6656 6620
6657 6621 svd->vpage = kmem_alloc(bytes, KM_SLEEP);
6658 6622 bcopy(ovpage, svd->vpage, bytes);
6659 6623 nsvd->vpage = kmem_alloc(nbytes, KM_SLEEP);
6660 6624 bcopy(ovpage + seg_pages(seg), nsvd->vpage, nbytes);
6661 6625 kmem_free(ovpage, bytes + nbytes);
6662 6626 }
6663 6627 if (svd->amp != NULL && svd->type == MAP_PRIVATE) {
6664 6628 struct anon_map *oamp = svd->amp, *namp;
6665 6629 struct anon_hdr *nahp;
6666 6630
6667 6631 ANON_LOCK_ENTER(&oamp->a_rwlock, RW_WRITER);
6668 6632 ASSERT(oamp->refcnt == 1);
6669 6633 nahp = anon_create(btop(seg->s_size), ANON_SLEEP);
6670 6634 (void) anon_copy_ptr(oamp->ahp, svd->anon_index,
6671 6635 nahp, 0, btop(seg->s_size), ANON_SLEEP);
6672 6636
6673 6637 namp = anonmap_alloc(nseg->s_size, 0, ANON_SLEEP);
6674 6638 namp->a_szc = nseg->s_szc;
6675 6639 (void) anon_copy_ptr(oamp->ahp,
6676 6640 svd->anon_index + btop(seg->s_size),
6677 6641 namp->ahp, 0, btop(nseg->s_size), ANON_SLEEP);
6678 6642 anon_release(oamp->ahp, btop(oamp->size));
6679 6643 oamp->ahp = nahp;
6680 6644 oamp->size = seg->s_size;
6681 6645 svd->anon_index = 0;
6682 6646 nsvd->amp = namp;
6683 6647 nsvd->anon_index = 0;
6684 6648 ANON_LOCK_EXIT(&oamp->a_rwlock);
6685 6649 } else if (svd->amp != NULL) {
6686 6650 pgcnt_t pgcnt = page_get_pagecnt(seg->s_szc);
6687 6651 ASSERT(svd->amp == nsvd->amp);
6688 6652 ASSERT(seg->s_szc <= svd->amp->a_szc);
6689 6653 nsvd->anon_index = svd->anon_index + seg_pages(seg);
6690 6654 ASSERT(IS_P2ALIGNED(nsvd->anon_index, pgcnt));
6691 6655 ANON_LOCK_ENTER(&svd->amp->a_rwlock, RW_WRITER);
6692 6656 svd->amp->refcnt++;
6693 6657 ANON_LOCK_EXIT(&svd->amp->a_rwlock);
6694 6658 }
6695 6659
6696 6660 /*
6697 6661 * Split the amount of swap reserved.
6698 6662 */
6699 6663 if (svd->swresv) {
6700 6664 /*
6701 6665 * For MAP_NORESERVE, only allocate swap reserve for pages
6702 6666 * being used. Other segments get enough to cover whole
6703 6667 * segment.
6704 6668 */
6705 6669 if (svd->flags & MAP_NORESERVE) {
6706 6670 size_t oswresv;
6707 6671
6708 6672 ASSERT(svd->amp);
6709 6673 oswresv = svd->swresv;
6710 6674 svd->swresv = ptob(anon_pages(svd->amp->ahp,
6711 6675 svd->anon_index, btop(seg->s_size)));
6712 6676 nsvd->swresv = ptob(anon_pages(nsvd->amp->ahp,
6713 6677 nsvd->anon_index, btop(nseg->s_size)));
6714 6678 ASSERT(oswresv >= (svd->swresv + nsvd->swresv));
6715 6679 } else {
6716 6680 if (svd->pageswap) {
6717 6681 svd->swresv = segvn_count_swap_by_vpages(seg);
6718 6682 ASSERT(nsvd->swresv >= svd->swresv);
6719 6683 nsvd->swresv -= svd->swresv;
6720 6684 } else {
6721 6685 ASSERT(svd->swresv == seg->s_size +
6722 6686 nseg->s_size);
6723 6687 svd->swresv = seg->s_size;
6724 6688 nsvd->swresv = nseg->s_size;
6725 6689 }
6726 6690 }
6727 6691 }
6728 6692
6729 6693 return (nseg);
6730 6694 }
6731 6695
6732 6696 /*
6733 6697 * called on memory operations (unmap, setprot, setpagesize) for a subset
6734 6698 * of a large page segment to either demote the memory range (SDR_RANGE)
6735 6699 * or the ends (SDR_END) by addr/len.
6736 6700 *
6737 6701 * returns 0 on success. returns errno, including ENOMEM, on failure.
6738 6702 */
6739 6703 static int
6740 6704 segvn_demote_range(
6741 6705 struct seg *seg,
6742 6706 caddr_t addr,
6743 6707 size_t len,
6744 6708 int flag,
6745 6709 uint_t szcvec)
6746 6710 {
6747 6711 caddr_t eaddr = addr + len;
6748 6712 caddr_t lpgaddr, lpgeaddr;
6749 6713 struct seg *nseg;
6750 6714 struct seg *badseg1 = NULL;
6751 6715 struct seg *badseg2 = NULL;
6752 6716 size_t pgsz;
6753 6717 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6754 6718 int err;
6755 6719 uint_t szc = seg->s_szc;
6756 6720 uint_t tszcvec;
6757 6721
6758 6722 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock));
6759 6723 ASSERT(svd->tr_state == SEGVN_TR_OFF);
6760 6724 ASSERT(szc != 0);
6761 6725 pgsz = page_get_pagesize(szc);
6762 6726 ASSERT(seg->s_base != addr || seg->s_size != len);
6763 6727 ASSERT(addr >= seg->s_base && eaddr <= seg->s_base + seg->s_size);
6764 6728 ASSERT(svd->softlockcnt == 0);
6765 6729 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
6766 6730 ASSERT(szcvec == 0 || (flag == SDR_END && svd->type == MAP_SHARED));
6767 6731
6768 6732 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
6769 6733 ASSERT(flag == SDR_RANGE || eaddr < lpgeaddr || addr > lpgaddr);
6770 6734 if (flag == SDR_RANGE) {
6771 6735 /* demote entire range */
6772 6736 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6773 6737 (void) segvn_split_seg(nseg, lpgeaddr);
6774 6738 ASSERT(badseg1->s_base == lpgaddr);
6775 6739 ASSERT(badseg1->s_size == lpgeaddr - lpgaddr);
6776 6740 } else if (addr != lpgaddr) {
6777 6741 ASSERT(flag == SDR_END);
6778 6742 badseg1 = nseg = segvn_split_seg(seg, lpgaddr);
6779 6743 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz &&
6780 6744 eaddr < lpgaddr + 2 * pgsz) {
6781 6745 (void) segvn_split_seg(nseg, lpgeaddr);
6782 6746 ASSERT(badseg1->s_base == lpgaddr);
6783 6747 ASSERT(badseg1->s_size == 2 * pgsz);
6784 6748 } else {
6785 6749 nseg = segvn_split_seg(nseg, lpgaddr + pgsz);
6786 6750 ASSERT(badseg1->s_base == lpgaddr);
6787 6751 ASSERT(badseg1->s_size == pgsz);
6788 6752 if (eaddr != lpgeaddr && eaddr > lpgaddr + pgsz) {
6789 6753 ASSERT(lpgeaddr - lpgaddr > 2 * pgsz);
6790 6754 nseg = segvn_split_seg(nseg, lpgeaddr - pgsz);
6791 6755 badseg2 = nseg;
6792 6756 (void) segvn_split_seg(nseg, lpgeaddr);
6793 6757 ASSERT(badseg2->s_base == lpgeaddr - pgsz);
6794 6758 ASSERT(badseg2->s_size == pgsz);
6795 6759 }
6796 6760 }
6797 6761 } else {
6798 6762 ASSERT(flag == SDR_END);
6799 6763 ASSERT(eaddr < lpgeaddr);
6800 6764 badseg1 = nseg = segvn_split_seg(seg, lpgeaddr - pgsz);
6801 6765 (void) segvn_split_seg(nseg, lpgeaddr);
6802 6766 ASSERT(badseg1->s_base == lpgeaddr - pgsz);
6803 6767 ASSERT(badseg1->s_size == pgsz);
6804 6768 }
6805 6769
6806 6770 ASSERT(badseg1 != NULL);
6807 6771 ASSERT(badseg1->s_szc == szc);
6808 6772 ASSERT(flag == SDR_RANGE || badseg1->s_size == pgsz ||
6809 6773 badseg1->s_size == 2 * pgsz);
6810 6774 ASSERT(sameprot(badseg1, badseg1->s_base, pgsz));
6811 6775 ASSERT(badseg1->s_size == pgsz ||
6812 6776 sameprot(badseg1, badseg1->s_base + pgsz, pgsz));
6813 6777 if (err = segvn_clrszc(badseg1)) {
6814 6778 return (err);
6815 6779 }
6816 6780 ASSERT(badseg1->s_szc == 0);
6817 6781
6818 6782 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6819 6783 uint_t tszc = highbit(tszcvec) - 1;
6820 6784 caddr_t ta = MAX(addr, badseg1->s_base);
6821 6785 caddr_t te;
6822 6786 size_t tpgsz = page_get_pagesize(tszc);
6823 6787
6824 6788 ASSERT(svd->type == MAP_SHARED);
6825 6789 ASSERT(flag == SDR_END);
6826 6790 ASSERT(tszc < szc && tszc > 0);
6827 6791
6828 6792 if (eaddr > badseg1->s_base + badseg1->s_size) {
6829 6793 te = badseg1->s_base + badseg1->s_size;
6830 6794 } else {
6831 6795 te = eaddr;
6832 6796 }
6833 6797
6834 6798 ASSERT(ta <= te);
6835 6799 badseg1->s_szc = tszc;
6836 6800 if (!IS_P2ALIGNED(ta, tpgsz) || !IS_P2ALIGNED(te, tpgsz)) {
6837 6801 if (badseg2 != NULL) {
6838 6802 err = segvn_demote_range(badseg1, ta, te - ta,
6839 6803 SDR_END, tszcvec);
6840 6804 if (err != 0) {
6841 6805 return (err);
6842 6806 }
6843 6807 } else {
6844 6808 return (segvn_demote_range(badseg1, ta,
6845 6809 te - ta, SDR_END, tszcvec));
6846 6810 }
6847 6811 }
6848 6812 }
6849 6813
6850 6814 if (badseg2 == NULL)
6851 6815 return (0);
6852 6816 ASSERT(badseg2->s_szc == szc);
6853 6817 ASSERT(badseg2->s_size == pgsz);
6854 6818 ASSERT(sameprot(badseg2, badseg2->s_base, badseg2->s_size));
6855 6819 if (err = segvn_clrszc(badseg2)) {
6856 6820 return (err);
6857 6821 }
6858 6822 ASSERT(badseg2->s_szc == 0);
6859 6823
6860 6824 if (szc > 1 && (tszcvec = P2PHASE(szcvec, 1 << szc)) > 1) {
6861 6825 uint_t tszc = highbit(tszcvec) - 1;
6862 6826 size_t tpgsz = page_get_pagesize(tszc);
6863 6827
6864 6828 ASSERT(svd->type == MAP_SHARED);
6865 6829 ASSERT(flag == SDR_END);
6866 6830 ASSERT(tszc < szc && tszc > 0);
6867 6831 ASSERT(badseg2->s_base > addr);
6868 6832 ASSERT(eaddr > badseg2->s_base);
6869 6833 ASSERT(eaddr < badseg2->s_base + badseg2->s_size);
6870 6834
6871 6835 badseg2->s_szc = tszc;
6872 6836 if (!IS_P2ALIGNED(eaddr, tpgsz)) {
6873 6837 return (segvn_demote_range(badseg2, badseg2->s_base,
6874 6838 eaddr - badseg2->s_base, SDR_END, tszcvec));
6875 6839 }
6876 6840 }
6877 6841
6878 6842 return (0);
6879 6843 }
6880 6844
6881 6845 static int
6882 6846 segvn_checkprot(struct seg *seg, caddr_t addr, size_t len, uint_t prot)
6883 6847 {
6884 6848 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6885 6849 struct vpage *vp, *evp;
6886 6850
6887 6851 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6888 6852
6889 6853 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6890 6854 /*
6891 6855 * If segment protection can be used, simply check against them.
6892 6856 */
6893 6857 if (svd->pageprot == 0) {
6894 6858 int err;
6895 6859
6896 6860 err = ((svd->prot & prot) != prot) ? EACCES : 0;
6897 6861 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6898 6862 return (err);
6899 6863 }
6900 6864
6901 6865 /*
6902 6866 * Have to check down to the vpage level.
6903 6867 */
6904 6868 evp = &svd->vpage[seg_page(seg, addr + len)];
6905 6869 for (vp = &svd->vpage[seg_page(seg, addr)]; vp < evp; vp++) {
6906 6870 if ((VPP_PROT(vp) & prot) != prot) {
6907 6871 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6908 6872 return (EACCES);
6909 6873 }
6910 6874 }
6911 6875 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6912 6876 return (0);
6913 6877 }
6914 6878
6915 6879 static int
6916 6880 segvn_getprot(struct seg *seg, caddr_t addr, size_t len, uint_t *protv)
6917 6881 {
6918 6882 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6919 6883 size_t pgno = seg_page(seg, addr + len) - seg_page(seg, addr) + 1;
6920 6884
6921 6885 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6922 6886
6923 6887 if (pgno != 0) {
6924 6888 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
6925 6889 if (svd->pageprot == 0) {
6926 6890 do {
6927 6891 protv[--pgno] = svd->prot;
6928 6892 } while (pgno != 0);
6929 6893 } else {
6930 6894 size_t pgoff = seg_page(seg, addr);
6931 6895
6932 6896 do {
6933 6897 pgno--;
6934 6898 protv[pgno] = VPP_PROT(&svd->vpage[pgno+pgoff]);
6935 6899 } while (pgno != 0);
6936 6900 }
6937 6901 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
6938 6902 }
6939 6903 return (0);
6940 6904 }
6941 6905
6942 6906 static u_offset_t
6943 6907 segvn_getoffset(struct seg *seg, caddr_t addr)
6944 6908 {
6945 6909 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6946 6910
6947 6911 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6948 6912
6949 6913 return (svd->offset + (uintptr_t)(addr - seg->s_base));
6950 6914 }
6951 6915
6952 6916 /*ARGSUSED*/
6953 6917 static int
6954 6918 segvn_gettype(struct seg *seg, caddr_t addr)
6955 6919 {
6956 6920 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6957 6921
6958 6922 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6959 6923
6960 6924 return (svd->type | (svd->flags & (MAP_NORESERVE | MAP_TEXT |
6961 6925 MAP_INITDATA)));
6962 6926 }
6963 6927
6964 6928 /*ARGSUSED*/
6965 6929 static int
6966 6930 segvn_getvp(struct seg *seg, caddr_t addr, struct vnode **vpp)
6967 6931 {
6968 6932 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6969 6933
6970 6934 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6971 6935
6972 6936 *vpp = svd->vp;
6973 6937 return (0);
6974 6938 }
6975 6939
6976 6940 /*
6977 6941 * Check to see if it makes sense to do kluster/read ahead to
6978 6942 * addr + delta relative to the mapping at addr. We assume here
6979 6943 * that delta is a signed PAGESIZE'd multiple (which can be negative).
6980 6944 *
6981 6945 * For segvn, we currently "approve" of the action if we are
6982 6946 * still in the segment and it maps from the same vp/off,
6983 6947 * or if the advice stored in segvn_data or vpages allows it.
6984 6948 * Currently, klustering is not allowed only if MADV_RANDOM is set.
6985 6949 */
6986 6950 static int
6987 6951 segvn_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
6988 6952 {
6989 6953 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
6990 6954 struct anon *oap, *ap;
6991 6955 ssize_t pd;
6992 6956 size_t page;
6993 6957 struct vnode *vp1, *vp2;
6994 6958 u_offset_t off1, off2;
6995 6959 struct anon_map *amp;
6996 6960
6997 6961 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
6998 6962 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
6999 6963 SEGVN_LOCK_HELD(seg->s_as, &svd->lock));
7000 6964
7001 6965 if (addr + delta < seg->s_base ||
7002 6966 addr + delta >= (seg->s_base + seg->s_size))
7003 6967 return (-1); /* exceeded segment bounds */
7004 6968
7005 6969 pd = delta / (ssize_t)PAGESIZE; /* divide to preserve sign bit */
7006 6970 page = seg_page(seg, addr);
7007 6971
7008 6972 /*
7009 6973 * Check to see if either of the pages addr or addr + delta
7010 6974 * have advice set that prevents klustering (if MADV_RANDOM advice
7011 6975 * is set for entire segment, or MADV_SEQUENTIAL is set and delta
7012 6976 * is negative).
7013 6977 */
7014 6978 if (svd->advice == MADV_RANDOM ||
7015 6979 svd->advice == MADV_SEQUENTIAL && delta < 0)
7016 6980 return (-1);
7017 6981 else if (svd->pageadvice && svd->vpage) {
7018 6982 struct vpage *bvpp, *evpp;
7019 6983
7020 6984 bvpp = &svd->vpage[page];
7021 6985 evpp = &svd->vpage[page + pd];
7022 6986 if (VPP_ADVICE(bvpp) == MADV_RANDOM ||
7023 6987 VPP_ADVICE(evpp) == MADV_SEQUENTIAL && delta < 0)
7024 6988 return (-1);
7025 6989 if (VPP_ADVICE(bvpp) != VPP_ADVICE(evpp) &&
7026 6990 VPP_ADVICE(evpp) == MADV_RANDOM)
7027 6991 return (-1);
7028 6992 }
7029 6993
7030 6994 if (svd->type == MAP_SHARED)
7031 6995 return (0); /* shared mapping - all ok */
7032 6996
7033 6997 if ((amp = svd->amp) == NULL)
7034 6998 return (0); /* off original vnode */
7035 6999
7036 7000 page += svd->anon_index;
7037 7001
7038 7002 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7039 7003
7040 7004 oap = anon_get_ptr(amp->ahp, page);
7041 7005 ap = anon_get_ptr(amp->ahp, page + pd);
7042 7006
7043 7007 ANON_LOCK_EXIT(&->a_rwlock);
7044 7008
7045 7009 if ((oap == NULL && ap != NULL) || (oap != NULL && ap == NULL)) {
7046 7010 return (-1); /* one with and one without an anon */
7047 7011 }
7048 7012
7049 7013 if (oap == NULL) { /* implies that ap == NULL */
7050 7014 return (0); /* off original vnode */
7051 7015 }
7052 7016
7053 7017 /*
7054 7018 * Now we know we have two anon pointers - check to
7055 7019 * see if they happen to be properly allocated.
7056 7020 */
7057 7021
7058 7022 /*
7059 7023 * XXX We cheat here and don't lock the anon slots. We can't because
7060 7024 * we may have been called from the anon layer which might already
7061 7025 * have locked them. We are holding a refcnt on the slots so they
7062 7026 * can't disappear. The worst that will happen is we'll get the wrong
7063 7027 * names (vp, off) for the slots and make a poor klustering decision.
7064 7028 */
↓ open down ↓ |
967 lines elided |
↑ open up ↑ |
7065 7029 swap_xlate(ap, &vp1, &off1);
7066 7030 swap_xlate(oap, &vp2, &off2);
7067 7031
7068 7032
7069 7033 if (!VOP_CMP(vp1, vp2, NULL) || off1 - off2 != delta)
7070 7034 return (-1);
7071 7035 return (0);
7072 7036 }
7073 7037
7074 7038 /*
7075 - * Swap the pages of seg out to secondary storage, returning the
7076 - * number of bytes of storage freed.
7077 - *
7078 - * The basic idea is first to unload all translations and then to call
7079 - * VOP_PUTPAGE() for all newly-unmapped pages, to push them out to the
7080 - * swap device. Pages to which other segments have mappings will remain
7081 - * mapped and won't be swapped. Our caller (as_swapout) has already
7082 - * performed the unloading step.
7083 - *
7084 - * The value returned is intended to correlate well with the process's
7085 - * memory requirements. However, there are some caveats:
7086 - * 1) When given a shared segment as argument, this routine will
7087 - * only succeed in swapping out pages for the last sharer of the
7088 - * segment. (Previous callers will only have decremented mapping
7089 - * reference counts.)
7090 - * 2) We assume that the hat layer maintains a large enough translation
7091 - * cache to capture process reference patterns.
7092 - */
7093 -static size_t
7094 -segvn_swapout(struct seg *seg)
7095 -{
7096 - struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7097 - struct anon_map *amp;
7098 - pgcnt_t pgcnt = 0;
7099 - pgcnt_t npages;
7100 - pgcnt_t page;
7101 - ulong_t anon_index;
7102 -
7103 - ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7104 -
7105 - SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7106 - /*
7107 - * Find pages unmapped by our caller and force them
7108 - * out to the virtual swap device.
7109 - */
7110 - if ((amp = svd->amp) != NULL)
7111 - anon_index = svd->anon_index;
7112 - npages = seg->s_size >> PAGESHIFT;
7113 - for (page = 0; page < npages; page++) {
7114 - page_t *pp;
7115 - struct anon *ap;
7116 - struct vnode *vp;
7117 - u_offset_t off;
7118 - anon_sync_obj_t cookie;
7119 -
7120 - /*
7121 - * Obtain <vp, off> pair for the page, then look it up.
7122 - *
7123 - * Note that this code is willing to consider regular
7124 - * pages as well as anon pages. Is this appropriate here?
7125 - */
7126 - ap = NULL;
7127 - if (amp != NULL) {
7128 - ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7129 - if (anon_array_try_enter(amp, anon_index + page,
7130 - &cookie)) {
7131 - ANON_LOCK_EXIT(&->a_rwlock);
7132 - continue;
7133 - }
7134 - ap = anon_get_ptr(amp->ahp, anon_index + page);
7135 - if (ap != NULL) {
7136 - swap_xlate(ap, &vp, &off);
7137 - } else {
7138 - vp = svd->vp;
7139 - off = svd->offset + ptob(page);
7140 - }
7141 - anon_array_exit(&cookie);
7142 - ANON_LOCK_EXIT(&->a_rwlock);
7143 - } else {
7144 - vp = svd->vp;
7145 - off = svd->offset + ptob(page);
7146 - }
7147 - if (vp == NULL) { /* untouched zfod page */
7148 - ASSERT(ap == NULL);
7149 - continue;
7150 - }
7151 -
7152 - pp = page_lookup_nowait(vp, off, SE_SHARED);
7153 - if (pp == NULL)
7154 - continue;
7155 -
7156 -
7157 - /*
7158 - * Examine the page to see whether it can be tossed out,
7159 - * keeping track of how many we've found.
7160 - */
7161 - if (!page_tryupgrade(pp)) {
7162 - /*
7163 - * If the page has an i/o lock and no mappings,
7164 - * it's very likely that the page is being
7165 - * written out as a result of klustering.
7166 - * Assume this is so and take credit for it here.
7167 - */
7168 - if (!page_io_trylock(pp)) {
7169 - if (!hat_page_is_mapped(pp))
7170 - pgcnt++;
7171 - } else {
7172 - page_io_unlock(pp);
7173 - }
7174 - page_unlock(pp);
7175 - continue;
7176 - }
7177 - ASSERT(!page_iolock_assert(pp));
7178 -
7179 -
7180 - /*
7181 - * Skip if page is locked or has mappings.
7182 - * We don't need the page_struct_lock to look at lckcnt
7183 - * and cowcnt because the page is exclusive locked.
7184 - */
7185 - if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0 ||
7186 - hat_page_is_mapped(pp)) {
7187 - page_unlock(pp);
7188 - continue;
7189 - }
7190 -
7191 - /*
7192 - * dispose skips large pages so try to demote first.
7193 - */
7194 - if (pp->p_szc != 0 && !page_try_demote_pages(pp)) {
7195 - page_unlock(pp);
7196 - /*
7197 - * XXX should skip the remaining page_t's of this
7198 - * large page.
7199 - */
7200 - continue;
7201 - }
7202 -
7203 - ASSERT(pp->p_szc == 0);
7204 -
7205 - /*
7206 - * No longer mapped -- we can toss it out. How
7207 - * we do so depends on whether or not it's dirty.
7208 - */
7209 - if (hat_ismod(pp) && pp->p_vnode) {
7210 - /*
7211 - * We must clean the page before it can be
7212 - * freed. Setting B_FREE will cause pvn_done
7213 - * to free the page when the i/o completes.
7214 - * XXX: This also causes it to be accounted
7215 - * as a pageout instead of a swap: need
7216 - * B_SWAPOUT bit to use instead of B_FREE.
7217 - *
7218 - * Hold the vnode before releasing the page lock
7219 - * to prevent it from being freed and re-used by
7220 - * some other thread.
7221 - */
7222 - VN_HOLD(vp);
7223 - page_unlock(pp);
7224 -
7225 - /*
7226 - * Queue all i/o requests for the pageout thread
7227 - * to avoid saturating the pageout devices.
7228 - */
7229 - if (!queue_io_request(vp, off))
7230 - VN_RELE(vp);
7231 - } else {
7232 - /*
7233 - * The page was clean, free it.
7234 - *
7235 - * XXX: Can we ever encounter modified pages
7236 - * with no associated vnode here?
7237 - */
7238 - ASSERT(pp->p_vnode != NULL);
7239 - /*LINTED: constant in conditional context*/
7240 - VN_DISPOSE(pp, B_FREE, 0, kcred);
7241 - }
7242 -
7243 - /*
7244 - * Credit now even if i/o is in progress.
7245 - */
7246 - pgcnt++;
7247 - }
7248 - SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7249 -
7250 - /*
7251 - * Wakeup pageout to initiate i/o on all queued requests.
7252 - */
7253 - cv_signal_pageout();
7254 - return (ptob(pgcnt));
7255 -}
7256 -
7257 -/*
7258 7039 * Synchronize primary storage cache with real object in virtual memory.
7259 7040 *
7260 7041 * XXX - Anonymous pages should not be sync'ed out at all.
7261 7042 */
7262 7043 static int
7263 7044 segvn_sync(struct seg *seg, caddr_t addr, size_t len, int attr, uint_t flags)
7264 7045 {
7265 7046 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7266 7047 struct vpage *vpp;
7267 7048 page_t *pp;
7268 7049 u_offset_t offset;
7269 7050 struct vnode *vp;
7270 7051 u_offset_t off;
7271 7052 caddr_t eaddr;
7272 7053 int bflags;
7273 7054 int err = 0;
7274 7055 int segtype;
7275 7056 int pageprot;
7276 7057 int prot;
7277 7058 ulong_t anon_index;
7278 7059 struct anon_map *amp;
7279 7060 struct anon *ap;
7280 7061 anon_sync_obj_t cookie;
7281 7062
7282 7063 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7283 7064
7284 7065 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7285 7066
7286 7067 if (svd->softlockcnt > 0) {
7287 7068 /*
7288 7069 * If this is shared segment non 0 softlockcnt
7289 7070 * means locked pages are still in use.
7290 7071 */
7291 7072 if (svd->type == MAP_SHARED) {
7292 7073 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7293 7074 return (EAGAIN);
7294 7075 }
7295 7076
7296 7077 /*
7297 7078 * flush all pages from seg cache
7298 7079 * otherwise we may deadlock in swap_putpage
7299 7080 * for B_INVAL page (4175402).
7300 7081 *
7301 7082 * Even if we grab segvn WRITER's lock
7302 7083 * here, there might be another thread which could've
7303 7084 * successfully performed lookup/insert just before
7304 7085 * we acquired the lock here. So, grabbing either
7305 7086 * lock here is of not much use. Until we devise
7306 7087 * a strategy at upper layers to solve the
7307 7088 * synchronization issues completely, we expect
7308 7089 * applications to handle this appropriately.
7309 7090 */
7310 7091 segvn_purge(seg);
7311 7092 if (svd->softlockcnt > 0) {
7312 7093 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7313 7094 return (EAGAIN);
7314 7095 }
7315 7096 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
7316 7097 svd->amp->a_softlockcnt > 0) {
7317 7098 /*
7318 7099 * Try to purge this amp's entries from pcache. It will
7319 7100 * succeed only if other segments that share the amp have no
7320 7101 * outstanding softlock's.
7321 7102 */
7322 7103 segvn_purge(seg);
7323 7104 if (svd->amp->a_softlockcnt > 0 || svd->softlockcnt > 0) {
7324 7105 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7325 7106 return (EAGAIN);
7326 7107 }
7327 7108 }
7328 7109
7329 7110 vpp = svd->vpage;
7330 7111 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7331 7112 bflags = ((flags & MS_ASYNC) ? B_ASYNC : 0) |
7332 7113 ((flags & MS_INVALIDATE) ? B_INVAL : 0);
7333 7114
7334 7115 if (attr) {
7335 7116 pageprot = attr & ~(SHARED|PRIVATE);
7336 7117 segtype = (attr & SHARED) ? MAP_SHARED : MAP_PRIVATE;
7337 7118
7338 7119 /*
7339 7120 * We are done if the segment types don't match
7340 7121 * or if we have segment level protections and
7341 7122 * they don't match.
7342 7123 */
7343 7124 if (svd->type != segtype) {
7344 7125 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7345 7126 return (0);
7346 7127 }
7347 7128 if (vpp == NULL) {
7348 7129 if (svd->prot != pageprot) {
7349 7130 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7350 7131 return (0);
7351 7132 }
7352 7133 prot = svd->prot;
7353 7134 } else
7354 7135 vpp = &svd->vpage[seg_page(seg, addr)];
7355 7136
7356 7137 } else if (svd->vp && svd->amp == NULL &&
7357 7138 (flags & MS_INVALIDATE) == 0) {
7358 7139
7359 7140 /*
7360 7141 * No attributes, no anonymous pages and MS_INVALIDATE flag
7361 7142 * is not on, just use one big request.
7362 7143 */
7363 7144 err = VOP_PUTPAGE(svd->vp, (offset_t)offset, len,
7364 7145 bflags, svd->cred, NULL);
7365 7146 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7366 7147 return (err);
7367 7148 }
7368 7149
7369 7150 if ((amp = svd->amp) != NULL)
7370 7151 anon_index = svd->anon_index + seg_page(seg, addr);
7371 7152
7372 7153 for (eaddr = addr + len; addr < eaddr; addr += PAGESIZE) {
7373 7154 ap = NULL;
7374 7155 if (amp != NULL) {
7375 7156 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7376 7157 anon_array_enter(amp, anon_index, &cookie);
7377 7158 ap = anon_get_ptr(amp->ahp, anon_index++);
7378 7159 if (ap != NULL) {
7379 7160 swap_xlate(ap, &vp, &off);
7380 7161 } else {
7381 7162 vp = svd->vp;
7382 7163 off = offset;
7383 7164 }
7384 7165 anon_array_exit(&cookie);
7385 7166 ANON_LOCK_EXIT(&->a_rwlock);
7386 7167 } else {
7387 7168 vp = svd->vp;
7388 7169 off = offset;
7389 7170 }
7390 7171 offset += PAGESIZE;
7391 7172
7392 7173 if (vp == NULL) /* untouched zfod page */
7393 7174 continue;
7394 7175
7395 7176 if (attr) {
7396 7177 if (vpp) {
7397 7178 prot = VPP_PROT(vpp);
7398 7179 vpp++;
7399 7180 }
7400 7181 if (prot != pageprot) {
7401 7182 continue;
7402 7183 }
7403 7184 }
7404 7185
7405 7186 /*
7406 7187 * See if any of these pages are locked -- if so, then we
7407 7188 * will have to truncate an invalidate request at the first
7408 7189 * locked one. We don't need the page_struct_lock to test
7409 7190 * as this is only advisory; even if we acquire it someone
7410 7191 * might race in and lock the page after we unlock and before
7411 7192 * we do the PUTPAGE, then PUTPAGE simply does nothing.
7412 7193 */
7413 7194 if (flags & MS_INVALIDATE) {
7414 7195 if ((pp = page_lookup(vp, off, SE_SHARED)) != NULL) {
7415 7196 if (pp->p_lckcnt != 0 || pp->p_cowcnt != 0) {
7416 7197 page_unlock(pp);
7417 7198 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7418 7199 return (EBUSY);
7419 7200 }
7420 7201 if (ap != NULL && pp->p_szc != 0 &&
7421 7202 page_tryupgrade(pp)) {
7422 7203 if (pp->p_lckcnt == 0 &&
7423 7204 pp->p_cowcnt == 0) {
7424 7205 /*
7425 7206 * swapfs VN_DISPOSE() won't
7426 7207 * invalidate large pages.
7427 7208 * Attempt to demote.
7428 7209 * XXX can't help it if it
7429 7210 * fails. But for swapfs
7430 7211 * pages it is no big deal.
7431 7212 */
7432 7213 (void) page_try_demote_pages(
7433 7214 pp);
7434 7215 }
7435 7216 }
7436 7217 page_unlock(pp);
7437 7218 }
7438 7219 } else if (svd->type == MAP_SHARED && amp != NULL) {
7439 7220 /*
7440 7221 * Avoid writing out to disk ISM's large pages
7441 7222 * because segspt_free_pages() relies on NULL an_pvp
7442 7223 * of anon slots of such pages.
7443 7224 */
7444 7225
7445 7226 ASSERT(svd->vp == NULL);
7446 7227 /*
7447 7228 * swapfs uses page_lookup_nowait if not freeing or
7448 7229 * invalidating and skips a page if
7449 7230 * page_lookup_nowait returns NULL.
7450 7231 */
7451 7232 pp = page_lookup_nowait(vp, off, SE_SHARED);
7452 7233 if (pp == NULL) {
7453 7234 continue;
7454 7235 }
7455 7236 if (pp->p_szc != 0) {
7456 7237 page_unlock(pp);
7457 7238 continue;
7458 7239 }
7459 7240
7460 7241 /*
7461 7242 * Note ISM pages are created large so (vp, off)'s
7462 7243 * page cannot suddenly become large after we unlock
7463 7244 * pp.
7464 7245 */
7465 7246 page_unlock(pp);
7466 7247 }
7467 7248 /*
7468 7249 * XXX - Should ultimately try to kluster
7469 7250 * calls to VOP_PUTPAGE() for performance.
7470 7251 */
7471 7252 VN_HOLD(vp);
7472 7253 err = VOP_PUTPAGE(vp, (offset_t)off, PAGESIZE,
7473 7254 (bflags | (IS_SWAPFSVP(vp) ? B_PAGE_NOWAIT : 0)),
7474 7255 svd->cred, NULL);
7475 7256
7476 7257 VN_RELE(vp);
7477 7258 if (err)
7478 7259 break;
7479 7260 }
7480 7261 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7481 7262 return (err);
7482 7263 }
7483 7264
7484 7265 /*
7485 7266 * Determine if we have data corresponding to pages in the
7486 7267 * primary storage virtual memory cache (i.e., "in core").
7487 7268 */
7488 7269 static size_t
7489 7270 segvn_incore(struct seg *seg, caddr_t addr, size_t len, char *vec)
7490 7271 {
7491 7272 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7492 7273 struct vnode *vp, *avp;
7493 7274 u_offset_t offset, aoffset;
7494 7275 size_t p, ep;
7495 7276 int ret;
7496 7277 struct vpage *vpp;
7497 7278 page_t *pp;
7498 7279 uint_t start;
7499 7280 struct anon_map *amp; /* XXX - for locknest */
7500 7281 struct anon *ap;
7501 7282 uint_t attr;
7502 7283 anon_sync_obj_t cookie;
7503 7284
7504 7285 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7505 7286
7506 7287 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
7507 7288 if (svd->amp == NULL && svd->vp == NULL) {
7508 7289 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7509 7290 bzero(vec, btopr(len));
7510 7291 return (len); /* no anonymous pages created yet */
7511 7292 }
7512 7293
7513 7294 p = seg_page(seg, addr);
7514 7295 ep = seg_page(seg, addr + len);
7515 7296 start = svd->vp ? SEG_PAGE_VNODEBACKED : 0;
7516 7297
7517 7298 amp = svd->amp;
7518 7299 for (; p < ep; p++, addr += PAGESIZE) {
7519 7300 vpp = (svd->vpage) ? &svd->vpage[p]: NULL;
7520 7301 ret = start;
7521 7302 ap = NULL;
7522 7303 avp = NULL;
7523 7304 /* Grab the vnode/offset for the anon slot */
7524 7305 if (amp != NULL) {
7525 7306 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7526 7307 anon_array_enter(amp, svd->anon_index + p, &cookie);
7527 7308 ap = anon_get_ptr(amp->ahp, svd->anon_index + p);
7528 7309 if (ap != NULL) {
7529 7310 swap_xlate(ap, &avp, &aoffset);
7530 7311 }
7531 7312 anon_array_exit(&cookie);
7532 7313 ANON_LOCK_EXIT(&->a_rwlock);
7533 7314 }
7534 7315 if ((avp != NULL) && page_exists(avp, aoffset)) {
7535 7316 /* A page exists for the anon slot */
7536 7317 ret |= SEG_PAGE_INCORE;
7537 7318
7538 7319 /*
7539 7320 * If page is mapped and writable
7540 7321 */
7541 7322 attr = (uint_t)0;
7542 7323 if ((hat_getattr(seg->s_as->a_hat, addr,
7543 7324 &attr) != -1) && (attr & PROT_WRITE)) {
7544 7325 ret |= SEG_PAGE_ANON;
7545 7326 }
7546 7327 /*
7547 7328 * Don't get page_struct lock for lckcnt and cowcnt,
7548 7329 * since this is purely advisory.
7549 7330 */
7550 7331 if ((pp = page_lookup_nowait(avp, aoffset,
7551 7332 SE_SHARED)) != NULL) {
7552 7333 if (pp->p_lckcnt)
7553 7334 ret |= SEG_PAGE_SOFTLOCK;
7554 7335 if (pp->p_cowcnt)
7555 7336 ret |= SEG_PAGE_HASCOW;
7556 7337 page_unlock(pp);
7557 7338 }
7558 7339 }
7559 7340
7560 7341 /* Gather vnode statistics */
7561 7342 vp = svd->vp;
7562 7343 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7563 7344
7564 7345 if (vp != NULL) {
7565 7346 /*
7566 7347 * Try to obtain a "shared" lock on the page
7567 7348 * without blocking. If this fails, determine
7568 7349 * if the page is in memory.
7569 7350 */
7570 7351 pp = page_lookup_nowait(vp, offset, SE_SHARED);
7571 7352 if ((pp == NULL) && (page_exists(vp, offset))) {
7572 7353 /* Page is incore, and is named */
7573 7354 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7574 7355 }
7575 7356 /*
7576 7357 * Don't get page_struct lock for lckcnt and cowcnt,
7577 7358 * since this is purely advisory.
7578 7359 */
7579 7360 if (pp != NULL) {
7580 7361 ret |= (SEG_PAGE_INCORE | SEG_PAGE_VNODE);
7581 7362 if (pp->p_lckcnt)
7582 7363 ret |= SEG_PAGE_SOFTLOCK;
7583 7364 if (pp->p_cowcnt)
7584 7365 ret |= SEG_PAGE_HASCOW;
7585 7366 page_unlock(pp);
7586 7367 }
7587 7368 }
7588 7369
7589 7370 /* Gather virtual page information */
7590 7371 if (vpp) {
7591 7372 if (VPP_ISPPLOCK(vpp))
7592 7373 ret |= SEG_PAGE_LOCKED;
7593 7374 vpp++;
7594 7375 }
7595 7376
7596 7377 *vec++ = (char)ret;
7597 7378 }
7598 7379 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7599 7380 return (len);
7600 7381 }
7601 7382
7602 7383 /*
7603 7384 * Statement for p_cowcnts/p_lckcnts.
7604 7385 *
7605 7386 * p_cowcnt is updated while mlock/munlocking MAP_PRIVATE and PROT_WRITE region
7606 7387 * irrespective of the following factors or anything else:
7607 7388 *
7608 7389 * (1) anon slots are populated or not
7609 7390 * (2) cow is broken or not
7610 7391 * (3) refcnt on ap is 1 or greater than 1
7611 7392 *
7612 7393 * If it's not MAP_PRIVATE and PROT_WRITE, p_lckcnt is updated during mlock
7613 7394 * and munlock.
7614 7395 *
7615 7396 *
7616 7397 * Handling p_cowcnts/p_lckcnts during copy-on-write fault:
7617 7398 *
7618 7399 * if vpage has PROT_WRITE
7619 7400 * transfer cowcnt on the oldpage -> cowcnt on the newpage
7620 7401 * else
7621 7402 * transfer lckcnt on the oldpage -> lckcnt on the newpage
7622 7403 *
7623 7404 * During copy-on-write, decrement p_cowcnt on the oldpage and increment
7624 7405 * p_cowcnt on the newpage *if* the corresponding vpage has PROT_WRITE.
7625 7406 *
7626 7407 * We may also break COW if softlocking on read access in the physio case.
7627 7408 * In this case, vpage may not have PROT_WRITE. So, we need to decrement
7628 7409 * p_lckcnt on the oldpage and increment p_lckcnt on the newpage *if* the
7629 7410 * vpage doesn't have PROT_WRITE.
7630 7411 *
7631 7412 *
7632 7413 * Handling p_cowcnts/p_lckcnts during mprotect on mlocked region:
7633 7414 *
7634 7415 * If a MAP_PRIVATE region loses PROT_WRITE, we decrement p_cowcnt and
7635 7416 * increment p_lckcnt by calling page_subclaim() which takes care of
7636 7417 * availrmem accounting and p_lckcnt overflow.
7637 7418 *
7638 7419 * If a MAP_PRIVATE region gains PROT_WRITE, we decrement p_lckcnt and
7639 7420 * increment p_cowcnt by calling page_addclaim() which takes care of
7640 7421 * availrmem availability and p_cowcnt overflow.
7641 7422 */
7642 7423
7643 7424 /*
7644 7425 * Lock down (or unlock) pages mapped by this segment.
7645 7426 *
7646 7427 * XXX only creates PAGESIZE pages if anon slots are not initialized.
7647 7428 * At fault time they will be relocated into larger pages.
7648 7429 */
7649 7430 static int
7650 7431 segvn_lockop(struct seg *seg, caddr_t addr, size_t len,
7651 7432 int attr, int op, ulong_t *lockmap, size_t pos)
7652 7433 {
7653 7434 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
7654 7435 struct vpage *vpp;
7655 7436 struct vpage *evp;
7656 7437 page_t *pp;
7657 7438 u_offset_t offset;
7658 7439 u_offset_t off;
7659 7440 int segtype;
7660 7441 int pageprot;
7661 7442 int claim;
7662 7443 struct vnode *vp;
7663 7444 ulong_t anon_index;
7664 7445 struct anon_map *amp;
7665 7446 struct anon *ap;
7666 7447 struct vattr va;
7667 7448 anon_sync_obj_t cookie;
7668 7449 struct kshmid *sp = NULL;
7669 7450 struct proc *p = curproc;
7670 7451 kproject_t *proj = NULL;
7671 7452 int chargeproc = 1;
7672 7453 size_t locked_bytes = 0;
7673 7454 size_t unlocked_bytes = 0;
7674 7455 int err = 0;
7675 7456
7676 7457 /*
7677 7458 * Hold write lock on address space because may split or concatenate
7678 7459 * segments
7679 7460 */
7680 7461 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
7681 7462
7682 7463 /*
7683 7464 * If this is a shm, use shm's project and zone, else use
7684 7465 * project and zone of calling process
7685 7466 */
7686 7467
7687 7468 /* Determine if this segment backs a sysV shm */
7688 7469 if (svd->amp != NULL && svd->amp->a_sp != NULL) {
7689 7470 ASSERT(svd->type == MAP_SHARED);
7690 7471 ASSERT(svd->tr_state == SEGVN_TR_OFF);
7691 7472 sp = svd->amp->a_sp;
7692 7473 proj = sp->shm_perm.ipc_proj;
7693 7474 chargeproc = 0;
7694 7475 }
7695 7476
7696 7477 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
7697 7478 if (attr) {
7698 7479 pageprot = attr & ~(SHARED|PRIVATE);
7699 7480 segtype = attr & SHARED ? MAP_SHARED : MAP_PRIVATE;
7700 7481
7701 7482 /*
7702 7483 * We are done if the segment types don't match
7703 7484 * or if we have segment level protections and
7704 7485 * they don't match.
7705 7486 */
7706 7487 if (svd->type != segtype) {
7707 7488 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7708 7489 return (0);
7709 7490 }
7710 7491 if (svd->pageprot == 0 && svd->prot != pageprot) {
7711 7492 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7712 7493 return (0);
7713 7494 }
7714 7495 }
7715 7496
7716 7497 if (op == MC_LOCK) {
7717 7498 if (svd->tr_state == SEGVN_TR_INIT) {
7718 7499 svd->tr_state = SEGVN_TR_OFF;
7719 7500 } else if (svd->tr_state == SEGVN_TR_ON) {
7720 7501 ASSERT(svd->amp != NULL);
7721 7502 segvn_textunrepl(seg, 0);
7722 7503 ASSERT(svd->amp == NULL &&
7723 7504 svd->tr_state == SEGVN_TR_OFF);
7724 7505 }
7725 7506 }
7726 7507
7727 7508 /*
7728 7509 * If we're locking, then we must create a vpage structure if
7729 7510 * none exists. If we're unlocking, then check to see if there
7730 7511 * is a vpage -- if not, then we could not have locked anything.
7731 7512 */
7732 7513
7733 7514 if ((vpp = svd->vpage) == NULL) {
7734 7515 if (op == MC_LOCK) {
7735 7516 segvn_vpage(seg);
7736 7517 if (svd->vpage == NULL) {
7737 7518 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7738 7519 return (ENOMEM);
7739 7520 }
7740 7521 } else {
7741 7522 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7742 7523 return (0);
7743 7524 }
7744 7525 }
7745 7526
7746 7527 /*
7747 7528 * The anonymous data vector (i.e., previously
7748 7529 * unreferenced mapping to swap space) can be allocated
7749 7530 * by lazily testing for its existence.
7750 7531 */
7751 7532 if (op == MC_LOCK && svd->amp == NULL && svd->vp == NULL) {
7752 7533 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
7753 7534 svd->amp = anonmap_alloc(seg->s_size, 0, ANON_SLEEP);
7754 7535 svd->amp->a_szc = seg->s_szc;
7755 7536 }
7756 7537
7757 7538 if ((amp = svd->amp) != NULL) {
7758 7539 anon_index = svd->anon_index + seg_page(seg, addr);
7759 7540 }
7760 7541
7761 7542 offset = svd->offset + (uintptr_t)(addr - seg->s_base);
7762 7543 evp = &svd->vpage[seg_page(seg, addr + len)];
7763 7544
7764 7545 if (sp != NULL)
7765 7546 mutex_enter(&sp->shm_mlock);
7766 7547
7767 7548 /* determine number of unlocked bytes in range for lock operation */
7768 7549 if (op == MC_LOCK) {
7769 7550
7770 7551 if (sp == NULL) {
7771 7552 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7772 7553 vpp++) {
7773 7554 if (!VPP_ISPPLOCK(vpp))
7774 7555 unlocked_bytes += PAGESIZE;
7775 7556 }
7776 7557 } else {
7777 7558 ulong_t i_idx, i_edx;
7778 7559 anon_sync_obj_t i_cookie;
7779 7560 struct anon *i_ap;
7780 7561 struct vnode *i_vp;
7781 7562 u_offset_t i_off;
7782 7563
7783 7564 /* Only count sysV pages once for locked memory */
7784 7565 i_edx = svd->anon_index + seg_page(seg, addr + len);
7785 7566 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7786 7567 for (i_idx = anon_index; i_idx < i_edx; i_idx++) {
7787 7568 anon_array_enter(amp, i_idx, &i_cookie);
7788 7569 i_ap = anon_get_ptr(amp->ahp, i_idx);
7789 7570 if (i_ap == NULL) {
7790 7571 unlocked_bytes += PAGESIZE;
7791 7572 anon_array_exit(&i_cookie);
7792 7573 continue;
7793 7574 }
7794 7575 swap_xlate(i_ap, &i_vp, &i_off);
7795 7576 anon_array_exit(&i_cookie);
7796 7577 pp = page_lookup(i_vp, i_off, SE_SHARED);
7797 7578 if (pp == NULL) {
7798 7579 unlocked_bytes += PAGESIZE;
7799 7580 continue;
7800 7581 } else if (pp->p_lckcnt == 0)
7801 7582 unlocked_bytes += PAGESIZE;
7802 7583 page_unlock(pp);
7803 7584 }
7804 7585 ANON_LOCK_EXIT(&->a_rwlock);
7805 7586 }
7806 7587
7807 7588 mutex_enter(&p->p_lock);
7808 7589 err = rctl_incr_locked_mem(p, proj, unlocked_bytes,
7809 7590 chargeproc);
7810 7591 mutex_exit(&p->p_lock);
7811 7592
7812 7593 if (err) {
7813 7594 if (sp != NULL)
7814 7595 mutex_exit(&sp->shm_mlock);
7815 7596 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
7816 7597 return (err);
7817 7598 }
7818 7599 }
7819 7600 /*
7820 7601 * Loop over all pages in the range. Process if we're locking and
7821 7602 * page has not already been locked in this mapping; or if we're
7822 7603 * unlocking and the page has been locked.
7823 7604 */
7824 7605 for (vpp = &svd->vpage[seg_page(seg, addr)]; vpp < evp;
7825 7606 vpp++, pos++, addr += PAGESIZE, offset += PAGESIZE, anon_index++) {
7826 7607 if ((attr == 0 || VPP_PROT(vpp) == pageprot) &&
7827 7608 ((op == MC_LOCK && !VPP_ISPPLOCK(vpp)) ||
7828 7609 (op == MC_UNLOCK && VPP_ISPPLOCK(vpp)))) {
7829 7610
7830 7611 if (amp != NULL)
7831 7612 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
7832 7613 /*
7833 7614 * If this isn't a MAP_NORESERVE segment and
7834 7615 * we're locking, allocate anon slots if they
7835 7616 * don't exist. The page is brought in later on.
7836 7617 */
7837 7618 if (op == MC_LOCK && svd->vp == NULL &&
7838 7619 ((svd->flags & MAP_NORESERVE) == 0) &&
7839 7620 amp != NULL &&
7840 7621 ((ap = anon_get_ptr(amp->ahp, anon_index))
7841 7622 == NULL)) {
7842 7623 anon_array_enter(amp, anon_index, &cookie);
7843 7624
7844 7625 if ((ap = anon_get_ptr(amp->ahp,
7845 7626 anon_index)) == NULL) {
7846 7627 pp = anon_zero(seg, addr, &ap,
7847 7628 svd->cred);
7848 7629 if (pp == NULL) {
7849 7630 anon_array_exit(&cookie);
7850 7631 ANON_LOCK_EXIT(&->a_rwlock);
7851 7632 err = ENOMEM;
7852 7633 goto out;
7853 7634 }
7854 7635 ASSERT(anon_get_ptr(amp->ahp,
7855 7636 anon_index) == NULL);
7856 7637 (void) anon_set_ptr(amp->ahp,
7857 7638 anon_index, ap, ANON_SLEEP);
7858 7639 page_unlock(pp);
7859 7640 }
7860 7641 anon_array_exit(&cookie);
7861 7642 }
7862 7643
7863 7644 /*
7864 7645 * Get name for page, accounting for
7865 7646 * existence of private copy.
7866 7647 */
7867 7648 ap = NULL;
7868 7649 if (amp != NULL) {
7869 7650 anon_array_enter(amp, anon_index, &cookie);
7870 7651 ap = anon_get_ptr(amp->ahp, anon_index);
7871 7652 if (ap != NULL) {
7872 7653 swap_xlate(ap, &vp, &off);
7873 7654 } else {
7874 7655 if (svd->vp == NULL &&
7875 7656 (svd->flags & MAP_NORESERVE)) {
7876 7657 anon_array_exit(&cookie);
7877 7658 ANON_LOCK_EXIT(&->a_rwlock);
7878 7659 continue;
7879 7660 }
7880 7661 vp = svd->vp;
7881 7662 off = offset;
7882 7663 }
7883 7664 if (op != MC_LOCK || ap == NULL) {
7884 7665 anon_array_exit(&cookie);
7885 7666 ANON_LOCK_EXIT(&->a_rwlock);
7886 7667 }
7887 7668 } else {
7888 7669 vp = svd->vp;
7889 7670 off = offset;
7890 7671 }
7891 7672
7892 7673 /*
7893 7674 * Get page frame. It's ok if the page is
7894 7675 * not available when we're unlocking, as this
7895 7676 * may simply mean that a page we locked got
7896 7677 * truncated out of existence after we locked it.
7897 7678 *
7898 7679 * Invoke VOP_GETPAGE() to obtain the page struct
7899 7680 * since we may need to read it from disk if its
7900 7681 * been paged out.
7901 7682 */
7902 7683 if (op != MC_LOCK)
7903 7684 pp = page_lookup(vp, off, SE_SHARED);
7904 7685 else {
7905 7686 page_t *pl[1 + 1];
7906 7687 int error;
7907 7688
7908 7689 ASSERT(vp != NULL);
7909 7690
7910 7691 error = VOP_GETPAGE(vp, (offset_t)off, PAGESIZE,
7911 7692 (uint_t *)NULL, pl, PAGESIZE, seg, addr,
7912 7693 S_OTHER, svd->cred, NULL);
7913 7694
7914 7695 if (error && ap != NULL) {
7915 7696 anon_array_exit(&cookie);
7916 7697 ANON_LOCK_EXIT(&->a_rwlock);
7917 7698 }
7918 7699
7919 7700 /*
7920 7701 * If the error is EDEADLK then we must bounce
7921 7702 * up and drop all vm subsystem locks and then
7922 7703 * retry the operation later
7923 7704 * This behavior is a temporary measure because
7924 7705 * ufs/sds logging is badly designed and will
7925 7706 * deadlock if we don't allow this bounce to
7926 7707 * happen. The real solution is to re-design
7927 7708 * the logging code to work properly. See bug
7928 7709 * 4125102 for details of the problem.
7929 7710 */
7930 7711 if (error == EDEADLK) {
7931 7712 err = error;
7932 7713 goto out;
7933 7714 }
7934 7715 /*
7935 7716 * Quit if we fail to fault in the page. Treat
7936 7717 * the failure as an error, unless the addr
7937 7718 * is mapped beyond the end of a file.
7938 7719 */
7939 7720 if (error && svd->vp) {
7940 7721 va.va_mask = AT_SIZE;
7941 7722 if (VOP_GETATTR(svd->vp, &va, 0,
7942 7723 svd->cred, NULL) != 0) {
7943 7724 err = EIO;
7944 7725 goto out;
7945 7726 }
7946 7727 if (btopr(va.va_size) >=
7947 7728 btopr(off + 1)) {
7948 7729 err = EIO;
7949 7730 goto out;
7950 7731 }
7951 7732 goto out;
7952 7733
7953 7734 } else if (error) {
7954 7735 err = EIO;
7955 7736 goto out;
7956 7737 }
7957 7738 pp = pl[0];
7958 7739 ASSERT(pp != NULL);
7959 7740 }
7960 7741
7961 7742 /*
7962 7743 * See Statement at the beginning of this routine.
7963 7744 *
7964 7745 * claim is always set if MAP_PRIVATE and PROT_WRITE
7965 7746 * irrespective of following factors:
7966 7747 *
7967 7748 * (1) anon slots are populated or not
7968 7749 * (2) cow is broken or not
7969 7750 * (3) refcnt on ap is 1 or greater than 1
7970 7751 *
7971 7752 * See 4140683 for details
7972 7753 */
7973 7754 claim = ((VPP_PROT(vpp) & PROT_WRITE) &&
7974 7755 (svd->type == MAP_PRIVATE));
7975 7756
7976 7757 /*
7977 7758 * Perform page-level operation appropriate to
7978 7759 * operation. If locking, undo the SOFTLOCK
7979 7760 * performed to bring the page into memory
7980 7761 * after setting the lock. If unlocking,
7981 7762 * and no page was found, account for the claim
7982 7763 * separately.
7983 7764 */
7984 7765 if (op == MC_LOCK) {
7985 7766 int ret = 1; /* Assume success */
7986 7767
7987 7768 ASSERT(!VPP_ISPPLOCK(vpp));
7988 7769
7989 7770 ret = page_pp_lock(pp, claim, 0);
7990 7771 if (ap != NULL) {
7991 7772 if (ap->an_pvp != NULL) {
7992 7773 anon_swap_free(ap, pp);
7993 7774 }
7994 7775 anon_array_exit(&cookie);
7995 7776 ANON_LOCK_EXIT(&->a_rwlock);
7996 7777 }
7997 7778 if (ret == 0) {
7998 7779 /* locking page failed */
7999 7780 page_unlock(pp);
8000 7781 err = EAGAIN;
8001 7782 goto out;
8002 7783 }
8003 7784 VPP_SETPPLOCK(vpp);
8004 7785 if (sp != NULL) {
8005 7786 if (pp->p_lckcnt == 1)
8006 7787 locked_bytes += PAGESIZE;
8007 7788 } else
8008 7789 locked_bytes += PAGESIZE;
8009 7790
8010 7791 if (lockmap != (ulong_t *)NULL)
8011 7792 BT_SET(lockmap, pos);
8012 7793
8013 7794 page_unlock(pp);
8014 7795 } else {
8015 7796 ASSERT(VPP_ISPPLOCK(vpp));
8016 7797 if (pp != NULL) {
8017 7798 /* sysV pages should be locked */
8018 7799 ASSERT(sp == NULL || pp->p_lckcnt > 0);
8019 7800 page_pp_unlock(pp, claim, 0);
8020 7801 if (sp != NULL) {
8021 7802 if (pp->p_lckcnt == 0)
8022 7803 unlocked_bytes
8023 7804 += PAGESIZE;
8024 7805 } else
8025 7806 unlocked_bytes += PAGESIZE;
8026 7807 page_unlock(pp);
8027 7808 } else {
8028 7809 ASSERT(sp == NULL);
8029 7810 unlocked_bytes += PAGESIZE;
8030 7811 }
8031 7812 VPP_CLRPPLOCK(vpp);
8032 7813 }
8033 7814 }
8034 7815 }
8035 7816 out:
8036 7817 if (op == MC_LOCK) {
8037 7818 /* Credit back bytes that did not get locked */
8038 7819 if ((unlocked_bytes - locked_bytes) > 0) {
8039 7820 if (proj == NULL)
8040 7821 mutex_enter(&p->p_lock);
8041 7822 rctl_decr_locked_mem(p, proj,
8042 7823 (unlocked_bytes - locked_bytes), chargeproc);
8043 7824 if (proj == NULL)
8044 7825 mutex_exit(&p->p_lock);
8045 7826 }
8046 7827
8047 7828 } else {
8048 7829 /* Account bytes that were unlocked */
8049 7830 if (unlocked_bytes > 0) {
8050 7831 if (proj == NULL)
8051 7832 mutex_enter(&p->p_lock);
8052 7833 rctl_decr_locked_mem(p, proj, unlocked_bytes,
8053 7834 chargeproc);
8054 7835 if (proj == NULL)
8055 7836 mutex_exit(&p->p_lock);
8056 7837 }
8057 7838 }
8058 7839 if (sp != NULL)
8059 7840 mutex_exit(&sp->shm_mlock);
8060 7841 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8061 7842
8062 7843 return (err);
8063 7844 }
8064 7845
8065 7846 /*
8066 7847 * Set advice from user for specified pages
8067 7848 * There are 9 types of advice:
8068 7849 * MADV_NORMAL - Normal (default) behavior (whatever that is)
8069 7850 * MADV_RANDOM - Random page references
8070 7851 * do not allow readahead or 'klustering'
8071 7852 * MADV_SEQUENTIAL - Sequential page references
8072 7853 * Pages previous to the one currently being
8073 7854 * accessed (determined by fault) are 'not needed'
8074 7855 * and are freed immediately
8075 7856 * MADV_WILLNEED - Pages are likely to be used (fault ahead in mctl)
8076 7857 * MADV_DONTNEED - Pages are not needed (synced out in mctl)
8077 7858 * MADV_FREE - Contents can be discarded
8078 7859 * MADV_ACCESS_DEFAULT- Default access
8079 7860 * MADV_ACCESS_LWP - Next LWP will access heavily
8080 7861 * MADV_ACCESS_MANY- Many LWPs or processes will access heavily
8081 7862 */
8082 7863 static int
8083 7864 segvn_advise(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8084 7865 {
8085 7866 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8086 7867 size_t page;
8087 7868 int err = 0;
8088 7869 int already_set;
8089 7870 struct anon_map *amp;
8090 7871 ulong_t anon_index;
8091 7872 struct seg *next;
8092 7873 lgrp_mem_policy_t policy;
8093 7874 struct seg *prev;
8094 7875 struct vnode *vp;
8095 7876
8096 7877 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8097 7878
8098 7879 /*
8099 7880 * In case of MADV_FREE, we won't be modifying any segment private
8100 7881 * data structures; so, we only need to grab READER's lock
8101 7882 */
8102 7883 if (behav != MADV_FREE) {
8103 7884 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8104 7885 if (svd->tr_state != SEGVN_TR_OFF) {
8105 7886 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8106 7887 return (0);
8107 7888 }
8108 7889 } else {
8109 7890 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8110 7891 }
8111 7892
8112 7893 /*
8113 7894 * Large pages are assumed to be only turned on when accesses to the
8114 7895 * segment's address range have spatial and temporal locality. That
8115 7896 * justifies ignoring MADV_SEQUENTIAL for large page segments.
8116 7897 * Also, ignore advice affecting lgroup memory allocation
8117 7898 * if don't need to do lgroup optimizations on this system
8118 7899 */
8119 7900
8120 7901 if ((behav == MADV_SEQUENTIAL &&
8121 7902 (seg->s_szc != 0 || HAT_IS_REGION_COOKIE_VALID(svd->rcookie))) ||
8122 7903 (!lgrp_optimizations() && (behav == MADV_ACCESS_DEFAULT ||
8123 7904 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY))) {
8124 7905 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8125 7906 return (0);
8126 7907 }
8127 7908
8128 7909 if (behav == MADV_SEQUENTIAL || behav == MADV_ACCESS_DEFAULT ||
8129 7910 behav == MADV_ACCESS_LWP || behav == MADV_ACCESS_MANY) {
8130 7911 /*
8131 7912 * Since we are going to unload hat mappings
8132 7913 * we first have to flush the cache. Otherwise
8133 7914 * this might lead to system panic if another
8134 7915 * thread is doing physio on the range whose
8135 7916 * mappings are unloaded by madvise(3C).
8136 7917 */
8137 7918 if (svd->softlockcnt > 0) {
8138 7919 /*
8139 7920 * If this is shared segment non 0 softlockcnt
8140 7921 * means locked pages are still in use.
8141 7922 */
8142 7923 if (svd->type == MAP_SHARED) {
8143 7924 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8144 7925 return (EAGAIN);
8145 7926 }
8146 7927 /*
8147 7928 * Since we do have the segvn writers lock
8148 7929 * nobody can fill the cache with entries
8149 7930 * belonging to this seg during the purge.
8150 7931 * The flush either succeeds or we still
8151 7932 * have pending I/Os. In the later case,
8152 7933 * madvise(3C) fails.
8153 7934 */
8154 7935 segvn_purge(seg);
8155 7936 if (svd->softlockcnt > 0) {
8156 7937 /*
8157 7938 * Since madvise(3C) is advisory and
8158 7939 * it's not part of UNIX98, madvise(3C)
8159 7940 * failure here doesn't cause any hardship.
8160 7941 * Note that we don't block in "as" layer.
8161 7942 */
8162 7943 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8163 7944 return (EAGAIN);
8164 7945 }
8165 7946 } else if (svd->type == MAP_SHARED && svd->amp != NULL &&
8166 7947 svd->amp->a_softlockcnt > 0) {
8167 7948 /*
8168 7949 * Try to purge this amp's entries from pcache. It
8169 7950 * will succeed only if other segments that share the
8170 7951 * amp have no outstanding softlock's.
8171 7952 */
8172 7953 segvn_purge(seg);
8173 7954 }
8174 7955 }
8175 7956
8176 7957 amp = svd->amp;
8177 7958 vp = svd->vp;
8178 7959 if (behav == MADV_FREE) {
8179 7960 /*
8180 7961 * MADV_FREE is not supported for segments with
8181 7962 * underlying object; if anonmap is NULL, anon slots
8182 7963 * are not yet populated and there is nothing for
8183 7964 * us to do. As MADV_FREE is advisory, we don't
8184 7965 * return error in either case.
8185 7966 */
8186 7967 if (vp != NULL || amp == NULL) {
8187 7968 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8188 7969 return (0);
8189 7970 }
8190 7971
8191 7972 segvn_purge(seg);
8192 7973
8193 7974 page = seg_page(seg, addr);
8194 7975 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8195 7976 anon_disclaim(amp, svd->anon_index + page, len);
8196 7977 ANON_LOCK_EXIT(&->a_rwlock);
8197 7978 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8198 7979 return (0);
8199 7980 }
8200 7981
8201 7982 /*
8202 7983 * If advice is to be applied to entire segment,
8203 7984 * use advice field in seg_data structure
8204 7985 * otherwise use appropriate vpage entry.
8205 7986 */
8206 7987 if ((addr == seg->s_base) && (len == seg->s_size)) {
8207 7988 switch (behav) {
8208 7989 case MADV_ACCESS_LWP:
8209 7990 case MADV_ACCESS_MANY:
8210 7991 case MADV_ACCESS_DEFAULT:
8211 7992 /*
8212 7993 * Set memory allocation policy for this segment
8213 7994 */
8214 7995 policy = lgrp_madv_to_policy(behav, len, svd->type);
8215 7996 if (svd->type == MAP_SHARED)
8216 7997 already_set = lgrp_shm_policy_set(policy, amp,
8217 7998 svd->anon_index, vp, svd->offset, len);
8218 7999 else {
8219 8000 /*
8220 8001 * For private memory, need writers lock on
8221 8002 * address space because the segment may be
8222 8003 * split or concatenated when changing policy
8223 8004 */
8224 8005 if (AS_READ_HELD(seg->s_as,
8225 8006 &seg->s_as->a_lock)) {
8226 8007 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8227 8008 return (IE_RETRY);
8228 8009 }
8229 8010
8230 8011 already_set = lgrp_privm_policy_set(policy,
8231 8012 &svd->policy_info, len);
8232 8013 }
8233 8014
8234 8015 /*
8235 8016 * If policy set already and it shouldn't be reapplied,
8236 8017 * don't do anything.
8237 8018 */
8238 8019 if (already_set &&
8239 8020 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8240 8021 break;
8241 8022
8242 8023 /*
8243 8024 * Mark any existing pages in given range for
8244 8025 * migration
8245 8026 */
8246 8027 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8247 8028 vp, svd->offset, 1);
8248 8029
8249 8030 /*
8250 8031 * If same policy set already or this is a shared
8251 8032 * memory segment, don't need to try to concatenate
8252 8033 * segment with adjacent ones.
8253 8034 */
8254 8035 if (already_set || svd->type == MAP_SHARED)
8255 8036 break;
8256 8037
8257 8038 /*
8258 8039 * Try to concatenate this segment with previous
8259 8040 * one and next one, since we changed policy for
8260 8041 * this one and it may be compatible with adjacent
8261 8042 * ones now.
8262 8043 */
8263 8044 prev = AS_SEGPREV(seg->s_as, seg);
8264 8045 next = AS_SEGNEXT(seg->s_as, seg);
8265 8046
8266 8047 if (next && next->s_ops == &segvn_ops &&
8267 8048 addr + len == next->s_base)
8268 8049 (void) segvn_concat(seg, next, 1);
8269 8050
8270 8051 if (prev && prev->s_ops == &segvn_ops &&
8271 8052 addr == prev->s_base + prev->s_size) {
8272 8053 /*
8273 8054 * Drop lock for private data of current
8274 8055 * segment before concatenating (deleting) it
8275 8056 * and return IE_REATTACH to tell as_ctl() that
8276 8057 * current segment has changed
8277 8058 */
8278 8059 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8279 8060 if (!segvn_concat(prev, seg, 1))
8280 8061 err = IE_REATTACH;
8281 8062
8282 8063 return (err);
8283 8064 }
8284 8065 break;
8285 8066
8286 8067 case MADV_SEQUENTIAL:
8287 8068 /*
8288 8069 * unloading mapping guarantees
8289 8070 * detection in segvn_fault
8290 8071 */
8291 8072 ASSERT(seg->s_szc == 0);
8292 8073 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8293 8074 hat_unload(seg->s_as->a_hat, addr, len,
8294 8075 HAT_UNLOAD);
8295 8076 /* FALLTHROUGH */
8296 8077 case MADV_NORMAL:
8297 8078 case MADV_RANDOM:
8298 8079 svd->advice = (uchar_t)behav;
8299 8080 svd->pageadvice = 0;
8300 8081 break;
8301 8082 case MADV_WILLNEED: /* handled in memcntl */
8302 8083 case MADV_DONTNEED: /* handled in memcntl */
8303 8084 case MADV_FREE: /* handled above */
8304 8085 break;
8305 8086 default:
8306 8087 err = EINVAL;
8307 8088 }
8308 8089 } else {
8309 8090 caddr_t eaddr;
8310 8091 struct seg *new_seg;
8311 8092 struct segvn_data *new_svd;
8312 8093 u_offset_t off;
8313 8094 caddr_t oldeaddr;
8314 8095
8315 8096 page = seg_page(seg, addr);
8316 8097
8317 8098 segvn_vpage(seg);
8318 8099 if (svd->vpage == NULL) {
8319 8100 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8320 8101 return (ENOMEM);
8321 8102 }
8322 8103
8323 8104 switch (behav) {
8324 8105 struct vpage *bvpp, *evpp;
8325 8106
8326 8107 case MADV_ACCESS_LWP:
8327 8108 case MADV_ACCESS_MANY:
8328 8109 case MADV_ACCESS_DEFAULT:
8329 8110 /*
8330 8111 * Set memory allocation policy for portion of this
8331 8112 * segment
8332 8113 */
8333 8114
8334 8115 /*
8335 8116 * Align address and length of advice to page
8336 8117 * boundaries for large pages
8337 8118 */
8338 8119 if (seg->s_szc != 0) {
8339 8120 size_t pgsz;
8340 8121
8341 8122 pgsz = page_get_pagesize(seg->s_szc);
8342 8123 addr = (caddr_t)P2ALIGN((uintptr_t)addr, pgsz);
8343 8124 len = P2ROUNDUP(len, pgsz);
8344 8125 }
8345 8126
8346 8127 /*
8347 8128 * Check to see whether policy is set already
8348 8129 */
8349 8130 policy = lgrp_madv_to_policy(behav, len, svd->type);
8350 8131
8351 8132 anon_index = svd->anon_index + page;
8352 8133 off = svd->offset + (uintptr_t)(addr - seg->s_base);
8353 8134
8354 8135 if (svd->type == MAP_SHARED)
8355 8136 already_set = lgrp_shm_policy_set(policy, amp,
8356 8137 anon_index, vp, off, len);
8357 8138 else
8358 8139 already_set =
8359 8140 (policy == svd->policy_info.mem_policy);
8360 8141
8361 8142 /*
8362 8143 * If policy set already and it shouldn't be reapplied,
8363 8144 * don't do anything.
8364 8145 */
8365 8146 if (already_set &&
8366 8147 !LGRP_MEM_POLICY_REAPPLICABLE(policy))
8367 8148 break;
8368 8149
8369 8150 /*
8370 8151 * For private memory, need writers lock on
8371 8152 * address space because the segment may be
8372 8153 * split or concatenated when changing policy
8373 8154 */
8374 8155 if (svd->type == MAP_PRIVATE &&
8375 8156 AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)) {
8376 8157 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8377 8158 return (IE_RETRY);
8378 8159 }
8379 8160
8380 8161 /*
8381 8162 * Mark any existing pages in given range for
8382 8163 * migration
8383 8164 */
8384 8165 page_mark_migrate(seg, addr, len, amp, svd->anon_index,
8385 8166 vp, svd->offset, 1);
8386 8167
8387 8168 /*
8388 8169 * Don't need to try to split or concatenate
8389 8170 * segments, since policy is same or this is a shared
8390 8171 * memory segment
8391 8172 */
8392 8173 if (already_set || svd->type == MAP_SHARED)
8393 8174 break;
8394 8175
8395 8176 if (HAT_IS_REGION_COOKIE_VALID(svd->rcookie)) {
8396 8177 ASSERT(svd->amp == NULL);
8397 8178 ASSERT(svd->tr_state == SEGVN_TR_OFF);
8398 8179 ASSERT(svd->softlockcnt == 0);
8399 8180 hat_leave_region(seg->s_as->a_hat, svd->rcookie,
8400 8181 HAT_REGION_TEXT);
8401 8182 svd->rcookie = HAT_INVALID_REGION_COOKIE;
8402 8183 }
8403 8184
8404 8185 /*
8405 8186 * Split off new segment if advice only applies to a
8406 8187 * portion of existing segment starting in middle
8407 8188 */
8408 8189 new_seg = NULL;
8409 8190 eaddr = addr + len;
8410 8191 oldeaddr = seg->s_base + seg->s_size;
8411 8192 if (addr > seg->s_base) {
8412 8193 /*
8413 8194 * Must flush I/O page cache
8414 8195 * before splitting segment
8415 8196 */
8416 8197 if (svd->softlockcnt > 0)
8417 8198 segvn_purge(seg);
8418 8199
8419 8200 /*
8420 8201 * Split segment and return IE_REATTACH to tell
8421 8202 * as_ctl() that current segment changed
8422 8203 */
8423 8204 new_seg = segvn_split_seg(seg, addr);
8424 8205 new_svd = (struct segvn_data *)new_seg->s_data;
8425 8206 err = IE_REATTACH;
8426 8207
8427 8208 /*
8428 8209 * If new segment ends where old one
8429 8210 * did, try to concatenate the new
8430 8211 * segment with next one.
8431 8212 */
8432 8213 if (eaddr == oldeaddr) {
8433 8214 /*
8434 8215 * Set policy for new segment
8435 8216 */
8436 8217 (void) lgrp_privm_policy_set(policy,
8437 8218 &new_svd->policy_info,
8438 8219 new_seg->s_size);
8439 8220
8440 8221 next = AS_SEGNEXT(new_seg->s_as,
8441 8222 new_seg);
8442 8223
8443 8224 if (next &&
8444 8225 next->s_ops == &segvn_ops &&
8445 8226 eaddr == next->s_base)
8446 8227 (void) segvn_concat(new_seg,
8447 8228 next, 1);
8448 8229 }
8449 8230 }
8450 8231
8451 8232 /*
8452 8233 * Split off end of existing segment if advice only
8453 8234 * applies to a portion of segment ending before
8454 8235 * end of the existing segment
8455 8236 */
8456 8237 if (eaddr < oldeaddr) {
8457 8238 /*
8458 8239 * Must flush I/O page cache
8459 8240 * before splitting segment
8460 8241 */
8461 8242 if (svd->softlockcnt > 0)
8462 8243 segvn_purge(seg);
8463 8244
8464 8245 /*
8465 8246 * If beginning of old segment was already
8466 8247 * split off, use new segment to split end off
8467 8248 * from.
8468 8249 */
8469 8250 if (new_seg != NULL && new_seg != seg) {
8470 8251 /*
8471 8252 * Split segment
8472 8253 */
8473 8254 (void) segvn_split_seg(new_seg, eaddr);
8474 8255
8475 8256 /*
8476 8257 * Set policy for new segment
8477 8258 */
8478 8259 (void) lgrp_privm_policy_set(policy,
8479 8260 &new_svd->policy_info,
8480 8261 new_seg->s_size);
8481 8262 } else {
8482 8263 /*
8483 8264 * Split segment and return IE_REATTACH
8484 8265 * to tell as_ctl() that current
8485 8266 * segment changed
8486 8267 */
8487 8268 (void) segvn_split_seg(seg, eaddr);
8488 8269 err = IE_REATTACH;
8489 8270
8490 8271 (void) lgrp_privm_policy_set(policy,
8491 8272 &svd->policy_info, seg->s_size);
8492 8273
8493 8274 /*
8494 8275 * If new segment starts where old one
8495 8276 * did, try to concatenate it with
8496 8277 * previous segment.
8497 8278 */
8498 8279 if (addr == seg->s_base) {
8499 8280 prev = AS_SEGPREV(seg->s_as,
8500 8281 seg);
8501 8282
8502 8283 /*
8503 8284 * Drop lock for private data
8504 8285 * of current segment before
8505 8286 * concatenating (deleting) it
8506 8287 */
8507 8288 if (prev &&
8508 8289 prev->s_ops ==
8509 8290 &segvn_ops &&
8510 8291 addr == prev->s_base +
8511 8292 prev->s_size) {
8512 8293 SEGVN_LOCK_EXIT(
8513 8294 seg->s_as,
8514 8295 &svd->lock);
8515 8296 (void) segvn_concat(
8516 8297 prev, seg, 1);
8517 8298 return (err);
8518 8299 }
8519 8300 }
8520 8301 }
8521 8302 }
8522 8303 break;
8523 8304 case MADV_SEQUENTIAL:
8524 8305 ASSERT(seg->s_szc == 0);
8525 8306 ASSERT(svd->rcookie == HAT_INVALID_REGION_COOKIE);
8526 8307 hat_unload(seg->s_as->a_hat, addr, len, HAT_UNLOAD);
8527 8308 /* FALLTHROUGH */
8528 8309 case MADV_NORMAL:
8529 8310 case MADV_RANDOM:
8530 8311 bvpp = &svd->vpage[page];
8531 8312 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8532 8313 for (; bvpp < evpp; bvpp++)
8533 8314 VPP_SETADVICE(bvpp, behav);
8534 8315 svd->advice = MADV_NORMAL;
8535 8316 break;
8536 8317 case MADV_WILLNEED: /* handled in memcntl */
8537 8318 case MADV_DONTNEED: /* handled in memcntl */
8538 8319 case MADV_FREE: /* handled above */
8539 8320 break;
8540 8321 default:
8541 8322 err = EINVAL;
8542 8323 }
8543 8324 }
8544 8325 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8545 8326 return (err);
8546 8327 }
8547 8328
8548 8329 /*
8549 8330 * There is one kind of inheritance that can be specified for pages:
8550 8331 *
8551 8332 * SEGP_INH_ZERO - Pages should be zeroed in the child
8552 8333 */
8553 8334 static int
8554 8335 segvn_inherit(struct seg *seg, caddr_t addr, size_t len, uint_t behav)
8555 8336 {
8556 8337 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8557 8338 struct vpage *bvpp, *evpp;
8558 8339 size_t page;
8559 8340 int ret = 0;
8560 8341
8561 8342 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8562 8343
8563 8344 /* Can't support something we don't know about */
8564 8345 if (behav != SEGP_INH_ZERO)
8565 8346 return (ENOTSUP);
8566 8347
8567 8348 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_WRITER);
8568 8349
8569 8350 /*
8570 8351 * This must be a straightforward anonymous segment that is mapped
8571 8352 * privately and is not backed by a vnode.
8572 8353 */
8573 8354 if (svd->tr_state != SEGVN_TR_OFF ||
8574 8355 svd->type != MAP_PRIVATE ||
8575 8356 svd->vp != NULL) {
8576 8357 ret = EINVAL;
8577 8358 goto out;
8578 8359 }
8579 8360
8580 8361 /*
8581 8362 * If the entire segment has been marked as inherit zero, then no reason
8582 8363 * to do anything else.
8583 8364 */
8584 8365 if (svd->svn_inz == SEGVN_INZ_ALL) {
8585 8366 ret = 0;
8586 8367 goto out;
8587 8368 }
8588 8369
8589 8370 /*
8590 8371 * If this applies to the entire segment, simply mark it and we're done.
8591 8372 */
8592 8373 if ((addr == seg->s_base) && (len == seg->s_size)) {
8593 8374 svd->svn_inz = SEGVN_INZ_ALL;
8594 8375 ret = 0;
8595 8376 goto out;
8596 8377 }
8597 8378
8598 8379 /*
8599 8380 * We've been asked to mark a subset of this segment as inherit zero,
8600 8381 * therefore we need to mainpulate its vpages.
8601 8382 */
8602 8383 if (svd->vpage == NULL) {
8603 8384 segvn_vpage(seg);
8604 8385 if (svd->vpage == NULL) {
8605 8386 ret = ENOMEM;
8606 8387 goto out;
8607 8388 }
8608 8389 }
8609 8390
8610 8391 svd->svn_inz = SEGVN_INZ_VPP;
8611 8392 page = seg_page(seg, addr);
8612 8393 bvpp = &svd->vpage[page];
8613 8394 evpp = &svd->vpage[page + (len >> PAGESHIFT)];
8614 8395 for (; bvpp < evpp; bvpp++)
8615 8396 VPP_SETINHZERO(bvpp);
8616 8397 ret = 0;
8617 8398
8618 8399 out:
8619 8400 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
8620 8401 return (ret);
8621 8402 }
8622 8403
8623 8404 /*
8624 8405 * Create a vpage structure for this seg.
8625 8406 */
8626 8407 static void
8627 8408 segvn_vpage(struct seg *seg)
8628 8409 {
8629 8410 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8630 8411 struct vpage *vp, *evp;
8631 8412 static pgcnt_t page_limit = 0;
8632 8413
8633 8414 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
8634 8415
8635 8416 /*
8636 8417 * If no vpage structure exists, allocate one. Copy the protections
8637 8418 * and the advice from the segment itself to the individual pages.
8638 8419 */
8639 8420 if (svd->vpage == NULL) {
8640 8421 /*
8641 8422 * Start by calculating the number of pages we must allocate to
8642 8423 * track the per-page vpage structs needs for this entire
8643 8424 * segment. If we know now that it will require more than our
8644 8425 * heuristic for the maximum amount of kmem we can consume then
8645 8426 * fail. We do this here, instead of trying to detect this deep
8646 8427 * in page_resv and propagating the error up, since the entire
8647 8428 * memory allocation stack is not amenable to passing this
8648 8429 * back. Instead, it wants to keep trying.
8649 8430 *
8650 8431 * As a heuristic we set a page limit of 5/8s of total_pages
8651 8432 * for this allocation. We use shifts so that no floating
8652 8433 * point conversion takes place and only need to do the
8653 8434 * calculation once.
8654 8435 */
8655 8436 ulong_t mem_needed = seg_pages(seg) * sizeof (struct vpage);
8656 8437 pgcnt_t npages = mem_needed >> PAGESHIFT;
8657 8438
8658 8439 if (page_limit == 0)
8659 8440 page_limit = (total_pages >> 1) + (total_pages >> 3);
8660 8441
8661 8442 if (npages > page_limit)
8662 8443 return;
8663 8444
8664 8445 svd->pageadvice = 1;
8665 8446 svd->vpage = kmem_zalloc(mem_needed, KM_SLEEP);
8666 8447 evp = &svd->vpage[seg_page(seg, seg->s_base + seg->s_size)];
8667 8448 for (vp = svd->vpage; vp < evp; vp++) {
8668 8449 VPP_SETPROT(vp, svd->prot);
8669 8450 VPP_SETADVICE(vp, svd->advice);
8670 8451 }
8671 8452 }
8672 8453 }
8673 8454
8674 8455 /*
8675 8456 * Dump the pages belonging to this segvn segment.
8676 8457 */
8677 8458 static void
8678 8459 segvn_dump(struct seg *seg)
8679 8460 {
8680 8461 struct segvn_data *svd;
8681 8462 page_t *pp;
8682 8463 struct anon_map *amp;
8683 8464 ulong_t anon_index;
8684 8465 struct vnode *vp;
8685 8466 u_offset_t off, offset;
8686 8467 pfn_t pfn;
8687 8468 pgcnt_t page, npages;
8688 8469 caddr_t addr;
8689 8470
8690 8471 npages = seg_pages(seg);
8691 8472 svd = (struct segvn_data *)seg->s_data;
8692 8473 vp = svd->vp;
8693 8474 off = offset = svd->offset;
8694 8475 addr = seg->s_base;
8695 8476
8696 8477 if ((amp = svd->amp) != NULL) {
8697 8478 anon_index = svd->anon_index;
8698 8479 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
8699 8480 }
8700 8481
8701 8482 for (page = 0; page < npages; page++, offset += PAGESIZE) {
8702 8483 struct anon *ap;
8703 8484 int we_own_it = 0;
8704 8485
8705 8486 if (amp && (ap = anon_get_ptr(svd->amp->ahp, anon_index++))) {
8706 8487 swap_xlate_nopanic(ap, &vp, &off);
8707 8488 } else {
8708 8489 vp = svd->vp;
8709 8490 off = offset;
8710 8491 }
8711 8492
8712 8493 /*
8713 8494 * If pp == NULL, the page either does not exist
8714 8495 * or is exclusively locked. So determine if it
8715 8496 * exists before searching for it.
8716 8497 */
8717 8498
8718 8499 if ((pp = page_lookup_nowait(vp, off, SE_SHARED)))
8719 8500 we_own_it = 1;
8720 8501 else
8721 8502 pp = page_exists(vp, off);
8722 8503
8723 8504 if (pp) {
8724 8505 pfn = page_pptonum(pp);
8725 8506 dump_addpage(seg->s_as, addr, pfn);
8726 8507 if (we_own_it)
8727 8508 page_unlock(pp);
8728 8509 }
8729 8510 addr += PAGESIZE;
8730 8511 dump_timeleft = dump_timeout;
8731 8512 }
8732 8513
8733 8514 if (amp != NULL)
8734 8515 ANON_LOCK_EXIT(&->a_rwlock);
8735 8516 }
8736 8517
8737 8518 #ifdef DEBUG
8738 8519 static uint32_t segvn_pglock_mtbf = 0;
8739 8520 #endif
8740 8521
8741 8522 #define PCACHE_SHWLIST ((page_t *)-2)
8742 8523 #define NOPCACHE_SHWLIST ((page_t *)-1)
8743 8524
8744 8525 /*
8745 8526 * Lock/Unlock anon pages over a given range. Return shadow list. This routine
8746 8527 * uses global segment pcache to cache shadow lists (i.e. pp arrays) of pages
8747 8528 * to avoid the overhead of per page locking, unlocking for subsequent IOs to
8748 8529 * the same parts of the segment. Currently shadow list creation is only
8749 8530 * supported for pure anon segments. MAP_PRIVATE segment pcache entries are
8750 8531 * tagged with segment pointer, starting virtual address and length. This
8751 8532 * approach for MAP_SHARED segments may add many pcache entries for the same
8752 8533 * set of pages and lead to long hash chains that decrease pcache lookup
8753 8534 * performance. To avoid this issue for shared segments shared anon map and
8754 8535 * starting anon index are used for pcache entry tagging. This allows all
8755 8536 * segments to share pcache entries for the same anon range and reduces pcache
8756 8537 * chain's length as well as memory overhead from duplicate shadow lists and
8757 8538 * pcache entries.
8758 8539 *
8759 8540 * softlockcnt field in segvn_data structure counts the number of F_SOFTLOCK'd
8760 8541 * pages via segvn_fault() and pagelock'd pages via this routine. But pagelock
8761 8542 * part of softlockcnt accounting is done differently for private and shared
8762 8543 * segments. In private segment case softlock is only incremented when a new
8763 8544 * shadow list is created but not when an existing one is found via
8764 8545 * seg_plookup(). pcache entries have reference count incremented/decremented
8765 8546 * by each seg_plookup()/seg_pinactive() operation. Only entries that have 0
8766 8547 * reference count can be purged (and purging is needed before segment can be
8767 8548 * freed). When a private segment pcache entry is purged segvn_reclaim() will
8768 8549 * decrement softlockcnt. Since in private segment case each of its pcache
8769 8550 * entries only belongs to this segment we can expect that when
8770 8551 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8771 8552 * segment purge will succeed and softlockcnt will drop to 0. In shared
8772 8553 * segment case reference count in pcache entry counts active locks from many
8773 8554 * different segments so we can't expect segment purging to succeed even when
8774 8555 * segvn_pagelock(L_PAGEUNLOCK) was called for all outstanding IOs in this
8775 8556 * segment. To be able to determine when there're no pending pagelocks in
8776 8557 * shared segment case we don't rely on purging to make softlockcnt drop to 0
8777 8558 * but instead softlockcnt is incremented and decremented for every
8778 8559 * segvn_pagelock(L_PAGELOCK/L_PAGEUNLOCK) call regardless if a new shadow
8779 8560 * list was created or an existing one was found. When softlockcnt drops to 0
8780 8561 * this segment no longer has any claims for pcached shadow lists and the
8781 8562 * segment can be freed even if there're still active pcache entries
8782 8563 * shared by this segment anon map. Shared segment pcache entries belong to
8783 8564 * anon map and are typically removed when anon map is freed after all
8784 8565 * processes destroy the segments that use this anon map.
8785 8566 */
8786 8567 static int
8787 8568 segvn_pagelock(struct seg *seg, caddr_t addr, size_t len, struct page ***ppp,
8788 8569 enum lock_type type, enum seg_rw rw)
8789 8570 {
8790 8571 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
8791 8572 size_t np;
8792 8573 pgcnt_t adjustpages;
8793 8574 pgcnt_t npages;
8794 8575 ulong_t anon_index;
8795 8576 uint_t protchk = (rw == S_READ) ? PROT_READ : PROT_WRITE;
8796 8577 uint_t error;
8797 8578 struct anon_map *amp;
8798 8579 pgcnt_t anpgcnt;
8799 8580 struct page **pplist, **pl, *pp;
8800 8581 caddr_t a;
8801 8582 size_t page;
8802 8583 caddr_t lpgaddr, lpgeaddr;
8803 8584 anon_sync_obj_t cookie;
8804 8585 int anlock;
8805 8586 struct anon_map *pamp;
8806 8587 caddr_t paddr;
8807 8588 seg_preclaim_cbfunc_t preclaim_callback;
8808 8589 size_t pgsz;
8809 8590 int use_pcache;
8810 8591 size_t wlen;
8811 8592 uint_t pflags = 0;
8812 8593 int sftlck_sbase = 0;
8813 8594 int sftlck_send = 0;
8814 8595
8815 8596 #ifdef DEBUG
8816 8597 if (type == L_PAGELOCK && segvn_pglock_mtbf) {
8817 8598 hrtime_t ts = gethrtime();
8818 8599 if ((ts % segvn_pglock_mtbf) == 0) {
8819 8600 return (ENOTSUP);
8820 8601 }
8821 8602 if ((ts % segvn_pglock_mtbf) == 1) {
8822 8603 return (EFAULT);
8823 8604 }
8824 8605 }
8825 8606 #endif
8826 8607
8827 8608 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_START,
8828 8609 "segvn_pagelock: start seg %p addr %p", seg, addr);
8829 8610
8830 8611 ASSERT(seg->s_as && AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
8831 8612 ASSERT(type == L_PAGELOCK || type == L_PAGEUNLOCK);
8832 8613
8833 8614 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
8834 8615
8835 8616 /*
8836 8617 * for now we only support pagelock to anon memory. We would have to
8837 8618 * check protections for vnode objects and call into the vnode driver.
8838 8619 * That's too much for a fast path. Let the fault entry point handle
8839 8620 * it.
8840 8621 */
8841 8622 if (svd->vp != NULL) {
8842 8623 if (type == L_PAGELOCK) {
8843 8624 error = ENOTSUP;
8844 8625 goto out;
8845 8626 }
8846 8627 panic("segvn_pagelock(L_PAGEUNLOCK): vp != NULL");
8847 8628 }
8848 8629 if ((amp = svd->amp) == NULL) {
8849 8630 if (type == L_PAGELOCK) {
8850 8631 error = EFAULT;
8851 8632 goto out;
8852 8633 }
8853 8634 panic("segvn_pagelock(L_PAGEUNLOCK): amp == NULL");
8854 8635 }
8855 8636 if (rw != S_READ && rw != S_WRITE) {
8856 8637 if (type == L_PAGELOCK) {
8857 8638 error = ENOTSUP;
8858 8639 goto out;
8859 8640 }
8860 8641 panic("segvn_pagelock(L_PAGEUNLOCK): bad rw");
8861 8642 }
8862 8643
8863 8644 if (seg->s_szc != 0) {
8864 8645 /*
8865 8646 * We are adjusting the pagelock region to the large page size
8866 8647 * boundary because the unlocked part of a large page cannot
8867 8648 * be freed anyway unless all constituent pages of a large
8868 8649 * page are locked. Bigger regions reduce pcache chain length
8869 8650 * and improve lookup performance. The tradeoff is that the
8870 8651 * very first segvn_pagelock() call for a given page is more
8871 8652 * expensive if only 1 page_t is needed for IO. This is only
8872 8653 * an issue if pcache entry doesn't get reused by several
8873 8654 * subsequent calls. We optimize here for the case when pcache
8874 8655 * is heavily used by repeated IOs to the same address range.
8875 8656 *
8876 8657 * Note segment's page size cannot change while we are holding
8877 8658 * as lock. And then it cannot change while softlockcnt is
8878 8659 * not 0. This will allow us to correctly recalculate large
8879 8660 * page size region for the matching pageunlock/reclaim call
8880 8661 * since as_pageunlock() caller must always match
8881 8662 * as_pagelock() call's addr and len.
8882 8663 *
8883 8664 * For pageunlock *ppp points to the pointer of page_t that
8884 8665 * corresponds to the real unadjusted start address. Similar
8885 8666 * for pagelock *ppp must point to the pointer of page_t that
8886 8667 * corresponds to the real unadjusted start address.
8887 8668 */
8888 8669 pgsz = page_get_pagesize(seg->s_szc);
8889 8670 CALC_LPG_REGION(pgsz, seg, addr, len, lpgaddr, lpgeaddr);
8890 8671 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8891 8672 } else if (len < segvn_pglock_comb_thrshld) {
8892 8673 lpgaddr = addr;
8893 8674 lpgeaddr = addr + len;
8894 8675 adjustpages = 0;
8895 8676 pgsz = PAGESIZE;
8896 8677 } else {
8897 8678 /*
8898 8679 * Align the address range of large enough requests to allow
8899 8680 * combining of different shadow lists into 1 to reduce memory
8900 8681 * overhead from potentially overlapping large shadow lists
8901 8682 * (worst case is we have a 1MB IO into buffers with start
8902 8683 * addresses separated by 4K). Alignment is only possible if
8903 8684 * padded chunks have sufficient access permissions. Note
8904 8685 * permissions won't change between L_PAGELOCK and
8905 8686 * L_PAGEUNLOCK calls since non 0 softlockcnt will force
8906 8687 * segvn_setprot() to wait until softlockcnt drops to 0. This
8907 8688 * allows us to determine in L_PAGEUNLOCK the same range we
8908 8689 * computed in L_PAGELOCK.
8909 8690 *
8910 8691 * If alignment is limited by segment ends set
8911 8692 * sftlck_sbase/sftlck_send flags. In L_PAGELOCK case when
8912 8693 * these flags are set bump softlockcnt_sbase/softlockcnt_send
8913 8694 * per segment counters. In L_PAGEUNLOCK case decrease
8914 8695 * softlockcnt_sbase/softlockcnt_send counters if
8915 8696 * sftlck_sbase/sftlck_send flags are set. When
8916 8697 * softlockcnt_sbase/softlockcnt_send are non 0
8917 8698 * segvn_concat()/segvn_extend_prev()/segvn_extend_next()
8918 8699 * won't merge the segments. This restriction combined with
8919 8700 * restriction on segment unmapping and splitting for segments
8920 8701 * that have non 0 softlockcnt allows L_PAGEUNLOCK to
8921 8702 * correctly determine the same range that was previously
8922 8703 * locked by matching L_PAGELOCK.
8923 8704 */
8924 8705 pflags = SEGP_PSHIFT | (segvn_pglock_comb_bshift << 16);
8925 8706 pgsz = PAGESIZE;
8926 8707 if (svd->type == MAP_PRIVATE) {
8927 8708 lpgaddr = (caddr_t)P2ALIGN((uintptr_t)addr,
8928 8709 segvn_pglock_comb_balign);
8929 8710 if (lpgaddr < seg->s_base) {
8930 8711 lpgaddr = seg->s_base;
8931 8712 sftlck_sbase = 1;
8932 8713 }
8933 8714 } else {
8934 8715 ulong_t aix = svd->anon_index + seg_page(seg, addr);
8935 8716 ulong_t aaix = P2ALIGN(aix, segvn_pglock_comb_palign);
8936 8717 if (aaix < svd->anon_index) {
8937 8718 lpgaddr = seg->s_base;
8938 8719 sftlck_sbase = 1;
8939 8720 } else {
8940 8721 lpgaddr = addr - ptob(aix - aaix);
8941 8722 ASSERT(lpgaddr >= seg->s_base);
8942 8723 }
8943 8724 }
8944 8725 if (svd->pageprot && lpgaddr != addr) {
8945 8726 struct vpage *vp = &svd->vpage[seg_page(seg, lpgaddr)];
8946 8727 struct vpage *evp = &svd->vpage[seg_page(seg, addr)];
8947 8728 while (vp < evp) {
8948 8729 if ((VPP_PROT(vp) & protchk) == 0) {
8949 8730 break;
8950 8731 }
8951 8732 vp++;
8952 8733 }
8953 8734 if (vp < evp) {
8954 8735 lpgaddr = addr;
8955 8736 pflags = 0;
8956 8737 }
8957 8738 }
8958 8739 lpgeaddr = addr + len;
8959 8740 if (pflags) {
8960 8741 if (svd->type == MAP_PRIVATE) {
8961 8742 lpgeaddr = (caddr_t)P2ROUNDUP(
8962 8743 (uintptr_t)lpgeaddr,
8963 8744 segvn_pglock_comb_balign);
8964 8745 } else {
8965 8746 ulong_t aix = svd->anon_index +
8966 8747 seg_page(seg, lpgeaddr);
8967 8748 ulong_t aaix = P2ROUNDUP(aix,
8968 8749 segvn_pglock_comb_palign);
8969 8750 if (aaix < aix) {
8970 8751 lpgeaddr = 0;
8971 8752 } else {
8972 8753 lpgeaddr += ptob(aaix - aix);
8973 8754 }
8974 8755 }
8975 8756 if (lpgeaddr == 0 ||
8976 8757 lpgeaddr > seg->s_base + seg->s_size) {
8977 8758 lpgeaddr = seg->s_base + seg->s_size;
8978 8759 sftlck_send = 1;
8979 8760 }
8980 8761 }
8981 8762 if (svd->pageprot && lpgeaddr != addr + len) {
8982 8763 struct vpage *vp;
8983 8764 struct vpage *evp;
8984 8765
8985 8766 vp = &svd->vpage[seg_page(seg, addr + len)];
8986 8767 evp = &svd->vpage[seg_page(seg, lpgeaddr)];
8987 8768
8988 8769 while (vp < evp) {
8989 8770 if ((VPP_PROT(vp) & protchk) == 0) {
8990 8771 break;
8991 8772 }
8992 8773 vp++;
8993 8774 }
8994 8775 if (vp < evp) {
8995 8776 lpgeaddr = addr + len;
8996 8777 }
8997 8778 }
8998 8779 adjustpages = btop((uintptr_t)(addr - lpgaddr));
8999 8780 }
9000 8781
9001 8782 /*
9002 8783 * For MAP_SHARED segments we create pcache entries tagged by amp and
9003 8784 * anon index so that we can share pcache entries with other segments
9004 8785 * that map this amp. For private segments pcache entries are tagged
9005 8786 * with segment and virtual address.
9006 8787 */
9007 8788 if (svd->type == MAP_SHARED) {
9008 8789 pamp = amp;
9009 8790 paddr = (caddr_t)((lpgaddr - seg->s_base) +
9010 8791 ptob(svd->anon_index));
9011 8792 preclaim_callback = shamp_reclaim;
9012 8793 } else {
9013 8794 pamp = NULL;
9014 8795 paddr = lpgaddr;
9015 8796 preclaim_callback = segvn_reclaim;
9016 8797 }
9017 8798
9018 8799 if (type == L_PAGEUNLOCK) {
9019 8800 VM_STAT_ADD(segvnvmstats.pagelock[0]);
9020 8801
9021 8802 /*
9022 8803 * update hat ref bits for /proc. We need to make sure
9023 8804 * that threads tracing the ref and mod bits of the
9024 8805 * address space get the right data.
9025 8806 * Note: page ref and mod bits are updated at reclaim time
9026 8807 */
9027 8808 if (seg->s_as->a_vbits) {
9028 8809 for (a = addr; a < addr + len; a += PAGESIZE) {
9029 8810 if (rw == S_WRITE) {
9030 8811 hat_setstat(seg->s_as, a,
9031 8812 PAGESIZE, P_REF | P_MOD);
9032 8813 } else {
9033 8814 hat_setstat(seg->s_as, a,
9034 8815 PAGESIZE, P_REF);
9035 8816 }
9036 8817 }
9037 8818 }
9038 8819
9039 8820 /*
9040 8821 * Check the shadow list entry after the last page used in
9041 8822 * this IO request. If it's NOPCACHE_SHWLIST the shadow list
9042 8823 * was not inserted into pcache and is not large page
9043 8824 * adjusted. In this case call reclaim callback directly and
9044 8825 * don't adjust the shadow list start and size for large
9045 8826 * pages.
9046 8827 */
9047 8828 npages = btop(len);
9048 8829 if ((*ppp)[npages] == NOPCACHE_SHWLIST) {
9049 8830 void *ptag;
9050 8831 if (pamp != NULL) {
9051 8832 ASSERT(svd->type == MAP_SHARED);
9052 8833 ptag = (void *)pamp;
9053 8834 paddr = (caddr_t)((addr - seg->s_base) +
9054 8835 ptob(svd->anon_index));
9055 8836 } else {
9056 8837 ptag = (void *)seg;
9057 8838 paddr = addr;
9058 8839 }
9059 8840 (*preclaim_callback)(ptag, paddr, len, *ppp, rw, 0);
9060 8841 } else {
9061 8842 ASSERT((*ppp)[npages] == PCACHE_SHWLIST ||
9062 8843 IS_SWAPFSVP((*ppp)[npages]->p_vnode));
9063 8844 len = lpgeaddr - lpgaddr;
9064 8845 npages = btop(len);
9065 8846 seg_pinactive(seg, pamp, paddr, len,
9066 8847 *ppp - adjustpages, rw, pflags, preclaim_callback);
9067 8848 }
9068 8849
9069 8850 if (pamp != NULL) {
9070 8851 ASSERT(svd->type == MAP_SHARED);
9071 8852 ASSERT(svd->softlockcnt >= npages);
9072 8853 atomic_add_long((ulong_t *)&svd->softlockcnt, -npages);
9073 8854 }
9074 8855
9075 8856 if (sftlck_sbase) {
9076 8857 ASSERT(svd->softlockcnt_sbase > 0);
9077 8858 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_sbase);
9078 8859 }
9079 8860 if (sftlck_send) {
9080 8861 ASSERT(svd->softlockcnt_send > 0);
9081 8862 atomic_dec_ulong((ulong_t *)&svd->softlockcnt_send);
9082 8863 }
9083 8864
9084 8865 /*
9085 8866 * If someone is blocked while unmapping, we purge
9086 8867 * segment page cache and thus reclaim pplist synchronously
9087 8868 * without waiting for seg_pasync_thread. This speeds up
9088 8869 * unmapping in cases where munmap(2) is called, while
9089 8870 * raw async i/o is still in progress or where a thread
9090 8871 * exits on data fault in a multithreaded application.
9091 8872 */
9092 8873 if (AS_ISUNMAPWAIT(seg->s_as)) {
9093 8874 if (svd->softlockcnt == 0) {
9094 8875 mutex_enter(&seg->s_as->a_contents);
9095 8876 if (AS_ISUNMAPWAIT(seg->s_as)) {
9096 8877 AS_CLRUNMAPWAIT(seg->s_as);
9097 8878 cv_broadcast(&seg->s_as->a_cv);
9098 8879 }
9099 8880 mutex_exit(&seg->s_as->a_contents);
9100 8881 } else if (pamp == NULL) {
9101 8882 /*
9102 8883 * softlockcnt is not 0 and this is a
9103 8884 * MAP_PRIVATE segment. Try to purge its
9104 8885 * pcache entries to reduce softlockcnt.
9105 8886 * If it drops to 0 segvn_reclaim()
9106 8887 * will wake up a thread waiting on
9107 8888 * unmapwait flag.
9108 8889 *
9109 8890 * We don't purge MAP_SHARED segments with non
9110 8891 * 0 softlockcnt since IO is still in progress
9111 8892 * for such segments.
9112 8893 */
9113 8894 ASSERT(svd->type == MAP_PRIVATE);
9114 8895 segvn_purge(seg);
9115 8896 }
9116 8897 }
9117 8898 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9118 8899 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_UNLOCK_END,
9119 8900 "segvn_pagelock: unlock seg %p addr %p", seg, addr);
9120 8901 return (0);
9121 8902 }
9122 8903
9123 8904 /* The L_PAGELOCK case ... */
9124 8905
9125 8906 VM_STAT_ADD(segvnvmstats.pagelock[1]);
9126 8907
9127 8908 /*
9128 8909 * For MAP_SHARED segments we have to check protections before
9129 8910 * seg_plookup() since pcache entries may be shared by many segments
9130 8911 * with potentially different page protections.
9131 8912 */
9132 8913 if (pamp != NULL) {
9133 8914 ASSERT(svd->type == MAP_SHARED);
9134 8915 if (svd->pageprot == 0) {
9135 8916 if ((svd->prot & protchk) == 0) {
9136 8917 error = EACCES;
9137 8918 goto out;
9138 8919 }
9139 8920 } else {
9140 8921 /*
9141 8922 * check page protections
9142 8923 */
9143 8924 caddr_t ea;
9144 8925
9145 8926 if (seg->s_szc) {
9146 8927 a = lpgaddr;
9147 8928 ea = lpgeaddr;
9148 8929 } else {
9149 8930 a = addr;
9150 8931 ea = addr + len;
9151 8932 }
9152 8933 for (; a < ea; a += pgsz) {
9153 8934 struct vpage *vp;
9154 8935
9155 8936 ASSERT(seg->s_szc == 0 ||
9156 8937 sameprot(seg, a, pgsz));
9157 8938 vp = &svd->vpage[seg_page(seg, a)];
9158 8939 if ((VPP_PROT(vp) & protchk) == 0) {
9159 8940 error = EACCES;
9160 8941 goto out;
9161 8942 }
9162 8943 }
9163 8944 }
9164 8945 }
9165 8946
9166 8947 /*
9167 8948 * try to find pages in segment page cache
9168 8949 */
9169 8950 pplist = seg_plookup(seg, pamp, paddr, lpgeaddr - lpgaddr, rw, pflags);
9170 8951 if (pplist != NULL) {
9171 8952 if (pamp != NULL) {
9172 8953 npages = btop((uintptr_t)(lpgeaddr - lpgaddr));
9173 8954 ASSERT(svd->type == MAP_SHARED);
9174 8955 atomic_add_long((ulong_t *)&svd->softlockcnt,
9175 8956 npages);
9176 8957 }
9177 8958 if (sftlck_sbase) {
9178 8959 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9179 8960 }
9180 8961 if (sftlck_send) {
9181 8962 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9182 8963 }
9183 8964 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9184 8965 *ppp = pplist + adjustpages;
9185 8966 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_HIT_END,
9186 8967 "segvn_pagelock: cache hit seg %p addr %p", seg, addr);
9187 8968 return (0);
9188 8969 }
9189 8970
9190 8971 /*
9191 8972 * For MAP_SHARED segments we already verified above that segment
9192 8973 * protections allow this pagelock operation.
9193 8974 */
9194 8975 if (pamp == NULL) {
9195 8976 ASSERT(svd->type == MAP_PRIVATE);
9196 8977 if (svd->pageprot == 0) {
9197 8978 if ((svd->prot & protchk) == 0) {
9198 8979 error = EACCES;
9199 8980 goto out;
9200 8981 }
9201 8982 if (svd->prot & PROT_WRITE) {
9202 8983 wlen = lpgeaddr - lpgaddr;
9203 8984 } else {
9204 8985 wlen = 0;
9205 8986 ASSERT(rw == S_READ);
9206 8987 }
9207 8988 } else {
9208 8989 int wcont = 1;
9209 8990 /*
9210 8991 * check page protections
9211 8992 */
9212 8993 for (a = lpgaddr, wlen = 0; a < lpgeaddr; a += pgsz) {
9213 8994 struct vpage *vp;
9214 8995
9215 8996 ASSERT(seg->s_szc == 0 ||
9216 8997 sameprot(seg, a, pgsz));
9217 8998 vp = &svd->vpage[seg_page(seg, a)];
9218 8999 if ((VPP_PROT(vp) & protchk) == 0) {
9219 9000 error = EACCES;
9220 9001 goto out;
9221 9002 }
9222 9003 if (wcont && (VPP_PROT(vp) & PROT_WRITE)) {
9223 9004 wlen += pgsz;
9224 9005 } else {
9225 9006 wcont = 0;
9226 9007 ASSERT(rw == S_READ);
9227 9008 }
9228 9009 }
9229 9010 }
9230 9011 ASSERT(rw == S_READ || wlen == lpgeaddr - lpgaddr);
9231 9012 ASSERT(rw == S_WRITE || wlen <= lpgeaddr - lpgaddr);
9232 9013 }
9233 9014
9234 9015 /*
9235 9016 * Only build large page adjusted shadow list if we expect to insert
9236 9017 * it into pcache. For large enough pages it's a big overhead to
9237 9018 * create a shadow list of the entire large page. But this overhead
9238 9019 * should be amortized over repeated pcache hits on subsequent reuse
9239 9020 * of this shadow list (IO into any range within this shadow list will
9240 9021 * find it in pcache since we large page align the request for pcache
9241 9022 * lookups). pcache performance is improved with bigger shadow lists
9242 9023 * as it reduces the time to pcache the entire big segment and reduces
9243 9024 * pcache chain length.
9244 9025 */
9245 9026 if (seg_pinsert_check(seg, pamp, paddr,
9246 9027 lpgeaddr - lpgaddr, pflags) == SEGP_SUCCESS) {
9247 9028 addr = lpgaddr;
9248 9029 len = lpgeaddr - lpgaddr;
9249 9030 use_pcache = 1;
9250 9031 } else {
9251 9032 use_pcache = 0;
9252 9033 /*
9253 9034 * Since this entry will not be inserted into the pcache, we
9254 9035 * will not do any adjustments to the starting address or
9255 9036 * size of the memory to be locked.
9256 9037 */
9257 9038 adjustpages = 0;
9258 9039 }
9259 9040 npages = btop(len);
9260 9041
9261 9042 pplist = kmem_alloc(sizeof (page_t *) * (npages + 1), KM_SLEEP);
9262 9043 pl = pplist;
9263 9044 *ppp = pplist + adjustpages;
9264 9045 /*
9265 9046 * If use_pcache is 0 this shadow list is not large page adjusted.
9266 9047 * Record this info in the last entry of shadow array so that
9267 9048 * L_PAGEUNLOCK can determine if it should large page adjust the
9268 9049 * address range to find the real range that was locked.
9269 9050 */
9270 9051 pl[npages] = use_pcache ? PCACHE_SHWLIST : NOPCACHE_SHWLIST;
9271 9052
9272 9053 page = seg_page(seg, addr);
9273 9054 anon_index = svd->anon_index + page;
9274 9055
9275 9056 anlock = 0;
9276 9057 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9277 9058 ASSERT(amp->a_szc >= seg->s_szc);
9278 9059 anpgcnt = page_get_pagecnt(amp->a_szc);
9279 9060 for (a = addr; a < addr + len; a += PAGESIZE, anon_index++) {
9280 9061 struct anon *ap;
9281 9062 struct vnode *vp;
9282 9063 u_offset_t off;
9283 9064
9284 9065 /*
9285 9066 * Lock and unlock anon array only once per large page.
9286 9067 * anon_array_enter() locks the root anon slot according to
9287 9068 * a_szc which can't change while anon map is locked. We lock
9288 9069 * anon the first time through this loop and each time we
9289 9070 * reach anon index that corresponds to a root of a large
9290 9071 * page.
9291 9072 */
9292 9073 if (a == addr || P2PHASE(anon_index, anpgcnt) == 0) {
9293 9074 ASSERT(anlock == 0);
9294 9075 anon_array_enter(amp, anon_index, &cookie);
9295 9076 anlock = 1;
9296 9077 }
9297 9078 ap = anon_get_ptr(amp->ahp, anon_index);
9298 9079
9299 9080 /*
9300 9081 * We must never use seg_pcache for COW pages
9301 9082 * because we might end up with original page still
9302 9083 * lying in seg_pcache even after private page is
9303 9084 * created. This leads to data corruption as
9304 9085 * aio_write refers to the page still in cache
9305 9086 * while all other accesses refer to the private
9306 9087 * page.
9307 9088 */
9308 9089 if (ap == NULL || ap->an_refcnt != 1) {
9309 9090 struct vpage *vpage;
9310 9091
9311 9092 if (seg->s_szc) {
9312 9093 error = EFAULT;
9313 9094 break;
9314 9095 }
9315 9096 if (svd->vpage != NULL) {
9316 9097 vpage = &svd->vpage[seg_page(seg, a)];
9317 9098 } else {
9318 9099 vpage = NULL;
9319 9100 }
9320 9101 ASSERT(anlock);
9321 9102 anon_array_exit(&cookie);
9322 9103 anlock = 0;
9323 9104 pp = NULL;
9324 9105 error = segvn_faultpage(seg->s_as->a_hat, seg, a, 0,
9325 9106 vpage, &pp, 0, F_INVAL, rw, 1);
9326 9107 if (error) {
9327 9108 error = fc_decode(error);
9328 9109 break;
9329 9110 }
9330 9111 anon_array_enter(amp, anon_index, &cookie);
9331 9112 anlock = 1;
9332 9113 ap = anon_get_ptr(amp->ahp, anon_index);
9333 9114 if (ap == NULL || ap->an_refcnt != 1) {
9334 9115 error = EFAULT;
9335 9116 break;
9336 9117 }
9337 9118 }
9338 9119 swap_xlate(ap, &vp, &off);
9339 9120 pp = page_lookup_nowait(vp, off, SE_SHARED);
9340 9121 if (pp == NULL) {
9341 9122 error = EFAULT;
9342 9123 break;
9343 9124 }
9344 9125 if (ap->an_pvp != NULL) {
9345 9126 anon_swap_free(ap, pp);
9346 9127 }
9347 9128 /*
9348 9129 * Unlock anon if this is the last slot in a large page.
9349 9130 */
9350 9131 if (P2PHASE(anon_index, anpgcnt) == anpgcnt - 1) {
9351 9132 ASSERT(anlock);
9352 9133 anon_array_exit(&cookie);
9353 9134 anlock = 0;
9354 9135 }
9355 9136 *pplist++ = pp;
9356 9137 }
9357 9138 if (anlock) { /* Ensure the lock is dropped */
9358 9139 anon_array_exit(&cookie);
9359 9140 }
9360 9141 ANON_LOCK_EXIT(&->a_rwlock);
9361 9142
9362 9143 if (a >= addr + len) {
9363 9144 atomic_add_long((ulong_t *)&svd->softlockcnt, npages);
9364 9145 if (pamp != NULL) {
9365 9146 ASSERT(svd->type == MAP_SHARED);
9366 9147 atomic_add_long((ulong_t *)&pamp->a_softlockcnt,
9367 9148 npages);
9368 9149 wlen = len;
9369 9150 }
9370 9151 if (sftlck_sbase) {
9371 9152 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_sbase);
9372 9153 }
9373 9154 if (sftlck_send) {
9374 9155 atomic_inc_ulong((ulong_t *)&svd->softlockcnt_send);
9375 9156 }
9376 9157 if (use_pcache) {
9377 9158 (void) seg_pinsert(seg, pamp, paddr, len, wlen, pl,
9378 9159 rw, pflags, preclaim_callback);
9379 9160 }
9380 9161 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9381 9162 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_FILL_END,
9382 9163 "segvn_pagelock: cache fill seg %p addr %p", seg, addr);
9383 9164 return (0);
9384 9165 }
9385 9166
9386 9167 pplist = pl;
9387 9168 np = ((uintptr_t)(a - addr)) >> PAGESHIFT;
9388 9169 while (np > (uint_t)0) {
9389 9170 ASSERT(PAGE_LOCKED(*pplist));
9390 9171 page_unlock(*pplist);
9391 9172 np--;
9392 9173 pplist++;
9393 9174 }
9394 9175 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9395 9176 out:
9396 9177 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9397 9178 *ppp = NULL;
9398 9179 TRACE_2(TR_FAC_PHYSIO, TR_PHYSIO_SEGVN_MISS_END,
9399 9180 "segvn_pagelock: cache miss seg %p addr %p", seg, addr);
9400 9181 return (error);
9401 9182 }
9402 9183
9403 9184 /*
9404 9185 * purge any cached pages in the I/O page cache
9405 9186 */
9406 9187 static void
9407 9188 segvn_purge(struct seg *seg)
9408 9189 {
9409 9190 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9410 9191
9411 9192 /*
9412 9193 * pcache is only used by pure anon segments.
9413 9194 */
9414 9195 if (svd->amp == NULL || svd->vp != NULL) {
9415 9196 return;
9416 9197 }
9417 9198
9418 9199 /*
9419 9200 * For MAP_SHARED segments non 0 segment's softlockcnt means
9420 9201 * active IO is still in progress via this segment. So we only
9421 9202 * purge MAP_SHARED segments when their softlockcnt is 0.
9422 9203 */
9423 9204 if (svd->type == MAP_PRIVATE) {
9424 9205 if (svd->softlockcnt) {
9425 9206 seg_ppurge(seg, NULL, 0);
9426 9207 }
9427 9208 } else if (svd->softlockcnt == 0 && svd->amp->a_softlockcnt != 0) {
9428 9209 seg_ppurge(seg, svd->amp, 0);
9429 9210 }
9430 9211 }
9431 9212
9432 9213 /*
9433 9214 * If async argument is not 0 we are called from pcache async thread and don't
9434 9215 * hold AS lock.
9435 9216 */
9436 9217
9437 9218 /*ARGSUSED*/
9438 9219 static int
9439 9220 segvn_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9440 9221 enum seg_rw rw, int async)
9441 9222 {
9442 9223 struct seg *seg = (struct seg *)ptag;
9443 9224 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9444 9225 pgcnt_t np, npages;
9445 9226 struct page **pl;
9446 9227
9447 9228 npages = np = btop(len);
9448 9229 ASSERT(npages);
9449 9230
9450 9231 ASSERT(svd->vp == NULL && svd->amp != NULL);
9451 9232 ASSERT(svd->softlockcnt >= npages);
9452 9233 ASSERT(async || AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9453 9234
9454 9235 pl = pplist;
9455 9236
9456 9237 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9457 9238 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9458 9239
9459 9240 while (np > (uint_t)0) {
9460 9241 if (rw == S_WRITE) {
9461 9242 hat_setrefmod(*pplist);
9462 9243 } else {
9463 9244 hat_setref(*pplist);
9464 9245 }
9465 9246 page_unlock(*pplist);
9466 9247 np--;
9467 9248 pplist++;
9468 9249 }
9469 9250
9470 9251 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9471 9252
9472 9253 /*
9473 9254 * If we are pcache async thread we don't hold AS lock. This means if
9474 9255 * softlockcnt drops to 0 after the decrement below address space may
9475 9256 * get freed. We can't allow it since after softlock derement to 0 we
9476 9257 * still need to access as structure for possible wakeup of unmap
9477 9258 * waiters. To prevent the disappearance of as we take this segment
9478 9259 * segfree_syncmtx. segvn_free() also takes this mutex as a barrier to
9479 9260 * make sure this routine completes before segment is freed.
9480 9261 *
9481 9262 * The second complication we have to deal with in async case is a
9482 9263 * possibility of missed wake up of unmap wait thread. When we don't
9483 9264 * hold as lock here we may take a_contents lock before unmap wait
9484 9265 * thread that was first to see softlockcnt was still not 0. As a
9485 9266 * result we'll fail to wake up an unmap wait thread. To avoid this
9486 9267 * race we set nounmapwait flag in as structure if we drop softlockcnt
9487 9268 * to 0 when we were called by pcache async thread. unmapwait thread
9488 9269 * will not block if this flag is set.
9489 9270 */
9490 9271 if (async) {
9491 9272 mutex_enter(&svd->segfree_syncmtx);
9492 9273 }
9493 9274
9494 9275 if (!atomic_add_long_nv((ulong_t *)&svd->softlockcnt, -npages)) {
9495 9276 if (async || AS_ISUNMAPWAIT(seg->s_as)) {
9496 9277 mutex_enter(&seg->s_as->a_contents);
9497 9278 if (async) {
9498 9279 AS_SETNOUNMAPWAIT(seg->s_as);
9499 9280 }
9500 9281 if (AS_ISUNMAPWAIT(seg->s_as)) {
9501 9282 AS_CLRUNMAPWAIT(seg->s_as);
9502 9283 cv_broadcast(&seg->s_as->a_cv);
9503 9284 }
9504 9285 mutex_exit(&seg->s_as->a_contents);
9505 9286 }
9506 9287 }
9507 9288
9508 9289 if (async) {
9509 9290 mutex_exit(&svd->segfree_syncmtx);
9510 9291 }
9511 9292 return (0);
9512 9293 }
9513 9294
9514 9295 /*ARGSUSED*/
9515 9296 static int
9516 9297 shamp_reclaim(void *ptag, caddr_t addr, size_t len, struct page **pplist,
9517 9298 enum seg_rw rw, int async)
9518 9299 {
9519 9300 amp_t *amp = (amp_t *)ptag;
9520 9301 pgcnt_t np, npages;
9521 9302 struct page **pl;
9522 9303
9523 9304 npages = np = btop(len);
9524 9305 ASSERT(npages);
9525 9306 ASSERT(amp->a_softlockcnt >= npages);
9526 9307
9527 9308 pl = pplist;
9528 9309
9529 9310 ASSERT(pl[np] == NOPCACHE_SHWLIST || pl[np] == PCACHE_SHWLIST);
9530 9311 ASSERT(!async || pl[np] == PCACHE_SHWLIST);
9531 9312
9532 9313 while (np > (uint_t)0) {
9533 9314 if (rw == S_WRITE) {
9534 9315 hat_setrefmod(*pplist);
9535 9316 } else {
9536 9317 hat_setref(*pplist);
9537 9318 }
9538 9319 page_unlock(*pplist);
9539 9320 np--;
9540 9321 pplist++;
9541 9322 }
9542 9323
9543 9324 kmem_free(pl, sizeof (page_t *) * (npages + 1));
9544 9325
9545 9326 /*
9546 9327 * If somebody sleeps in anonmap_purge() wake them up if a_softlockcnt
9547 9328 * drops to 0. anon map can't be freed until a_softlockcnt drops to 0
9548 9329 * and anonmap_purge() acquires a_purgemtx.
9549 9330 */
9550 9331 mutex_enter(&->a_purgemtx);
9551 9332 if (!atomic_add_long_nv((ulong_t *)&->a_softlockcnt, -npages) &&
9552 9333 amp->a_purgewait) {
9553 9334 amp->a_purgewait = 0;
9554 9335 cv_broadcast(&->a_purgecv);
9555 9336 }
9556 9337 mutex_exit(&->a_purgemtx);
9557 9338 return (0);
9558 9339 }
9559 9340
9560 9341 /*
9561 9342 * get a memory ID for an addr in a given segment
9562 9343 *
9563 9344 * XXX only creates PAGESIZE pages if anon slots are not initialized.
9564 9345 * At fault time they will be relocated into larger pages.
9565 9346 */
9566 9347 static int
9567 9348 segvn_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
9568 9349 {
9569 9350 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9570 9351 struct anon *ap = NULL;
9571 9352 ulong_t anon_index;
9572 9353 struct anon_map *amp;
9573 9354 anon_sync_obj_t cookie;
9574 9355
9575 9356 if (svd->type == MAP_PRIVATE) {
9576 9357 memidp->val[0] = (uintptr_t)seg->s_as;
9577 9358 memidp->val[1] = (uintptr_t)addr;
9578 9359 return (0);
9579 9360 }
9580 9361
9581 9362 if (svd->type == MAP_SHARED) {
9582 9363 if (svd->vp) {
9583 9364 memidp->val[0] = (uintptr_t)svd->vp;
9584 9365 memidp->val[1] = (u_longlong_t)svd->offset +
9585 9366 (uintptr_t)(addr - seg->s_base);
9586 9367 return (0);
9587 9368 } else {
9588 9369
9589 9370 SEGVN_LOCK_ENTER(seg->s_as, &svd->lock, RW_READER);
9590 9371 if ((amp = svd->amp) != NULL) {
9591 9372 anon_index = svd->anon_index +
9592 9373 seg_page(seg, addr);
9593 9374 }
9594 9375 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
9595 9376
9596 9377 ASSERT(amp != NULL);
9597 9378
9598 9379 ANON_LOCK_ENTER(&->a_rwlock, RW_READER);
9599 9380 anon_array_enter(amp, anon_index, &cookie);
9600 9381 ap = anon_get_ptr(amp->ahp, anon_index);
9601 9382 if (ap == NULL) {
9602 9383 page_t *pp;
9603 9384
9604 9385 pp = anon_zero(seg, addr, &ap, svd->cred);
9605 9386 if (pp == NULL) {
9606 9387 anon_array_exit(&cookie);
9607 9388 ANON_LOCK_EXIT(&->a_rwlock);
9608 9389 return (ENOMEM);
9609 9390 }
9610 9391 ASSERT(anon_get_ptr(amp->ahp, anon_index)
9611 9392 == NULL);
9612 9393 (void) anon_set_ptr(amp->ahp, anon_index,
9613 9394 ap, ANON_SLEEP);
9614 9395 page_unlock(pp);
9615 9396 }
9616 9397
9617 9398 anon_array_exit(&cookie);
9618 9399 ANON_LOCK_EXIT(&->a_rwlock);
9619 9400
9620 9401 memidp->val[0] = (uintptr_t)ap;
9621 9402 memidp->val[1] = (uintptr_t)addr & PAGEOFFSET;
9622 9403 return (0);
9623 9404 }
9624 9405 }
9625 9406 return (EINVAL);
9626 9407 }
9627 9408
9628 9409 static int
9629 9410 sameprot(struct seg *seg, caddr_t a, size_t len)
9630 9411 {
9631 9412 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9632 9413 struct vpage *vpage;
9633 9414 spgcnt_t pages = btop(len);
9634 9415 uint_t prot;
9635 9416
9636 9417 if (svd->pageprot == 0)
9637 9418 return (1);
9638 9419
9639 9420 ASSERT(svd->vpage != NULL);
9640 9421
9641 9422 vpage = &svd->vpage[seg_page(seg, a)];
9642 9423 prot = VPP_PROT(vpage);
9643 9424 vpage++;
9644 9425 pages--;
9645 9426 while (pages-- > 0) {
9646 9427 if (prot != VPP_PROT(vpage))
9647 9428 return (0);
9648 9429 vpage++;
9649 9430 }
9650 9431 return (1);
9651 9432 }
9652 9433
9653 9434 /*
9654 9435 * Get memory allocation policy info for specified address in given segment
9655 9436 */
9656 9437 static lgrp_mem_policy_info_t *
9657 9438 segvn_getpolicy(struct seg *seg, caddr_t addr)
9658 9439 {
9659 9440 struct anon_map *amp;
9660 9441 ulong_t anon_index;
9661 9442 lgrp_mem_policy_info_t *policy_info;
9662 9443 struct segvn_data *svn_data;
9663 9444 u_offset_t vn_off;
9664 9445 vnode_t *vp;
9665 9446
9666 9447 ASSERT(seg != NULL);
9667 9448
9668 9449 svn_data = (struct segvn_data *)seg->s_data;
9669 9450 if (svn_data == NULL)
9670 9451 return (NULL);
9671 9452
9672 9453 /*
9673 9454 * Get policy info for private or shared memory
9674 9455 */
9675 9456 if (svn_data->type != MAP_SHARED) {
9676 9457 if (svn_data->tr_state != SEGVN_TR_ON) {
9677 9458 policy_info = &svn_data->policy_info;
9678 9459 } else {
9679 9460 policy_info = &svn_data->tr_policy_info;
9680 9461 ASSERT(policy_info->mem_policy ==
9681 9462 LGRP_MEM_POLICY_NEXT_SEG);
↓ open down ↓ |
2414 lines elided |
↑ open up ↑ |
9682 9463 }
9683 9464 } else {
9684 9465 amp = svn_data->amp;
9685 9466 anon_index = svn_data->anon_index + seg_page(seg, addr);
9686 9467 vp = svn_data->vp;
9687 9468 vn_off = svn_data->offset + (uintptr_t)(addr - seg->s_base);
9688 9469 policy_info = lgrp_shm_policy_get(amp, anon_index, vp, vn_off);
9689 9470 }
9690 9471
9691 9472 return (policy_info);
9692 -}
9693 -
9694 -/*ARGSUSED*/
9695 -static int
9696 -segvn_capable(struct seg *seg, segcapability_t capability)
9697 -{
9698 - return (0);
9699 9473 }
9700 9474
9701 9475 /*
9702 9476 * Bind text vnode segment to an amp. If we bind successfully mappings will be
9703 9477 * established to per vnode mapping per lgroup amp pages instead of to vnode
9704 9478 * pages. There's one amp per vnode text mapping per lgroup. Many processes
9705 9479 * may share the same text replication amp. If a suitable amp doesn't already
9706 9480 * exist in svntr hash table create a new one. We may fail to bind to amp if
9707 9481 * segment is not eligible for text replication. Code below first checks for
9708 9482 * these conditions. If binding is successful segment tr_state is set to on
9709 9483 * and svd->amp points to the amp to use. Otherwise tr_state is set to off and
9710 9484 * svd->amp remains as NULL.
9711 9485 */
9712 9486 static void
9713 9487 segvn_textrepl(struct seg *seg)
9714 9488 {
9715 9489 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
9716 9490 vnode_t *vp = svd->vp;
9717 9491 u_offset_t off = svd->offset;
9718 9492 size_t size = seg->s_size;
9719 9493 u_offset_t eoff = off + size;
9720 9494 uint_t szc = seg->s_szc;
9721 9495 ulong_t hash = SVNTR_HASH_FUNC(vp);
9722 9496 svntr_t *svntrp;
9723 9497 struct vattr va;
9724 9498 proc_t *p = seg->s_as->a_proc;
9725 9499 lgrp_id_t lgrp_id;
9726 9500 lgrp_id_t olid;
9727 9501 int first;
9728 9502 struct anon_map *amp;
9729 9503
9730 9504 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
9731 9505 ASSERT(SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
9732 9506 ASSERT(p != NULL);
9733 9507 ASSERT(svd->tr_state == SEGVN_TR_INIT);
9734 9508 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
9735 9509 ASSERT(svd->flags & MAP_TEXT);
9736 9510 ASSERT(svd->type == MAP_PRIVATE);
9737 9511 ASSERT(vp != NULL && svd->amp == NULL);
9738 9512 ASSERT(!svd->pageprot && !(svd->prot & PROT_WRITE));
9739 9513 ASSERT(!(svd->flags & MAP_NORESERVE) && svd->swresv == 0);
9740 9514 ASSERT(seg->s_as != &kas);
9741 9515 ASSERT(off < eoff);
9742 9516 ASSERT(svntr_hashtab != NULL);
9743 9517
9744 9518 /*
9745 9519 * If numa optimizations are no longer desired bail out.
9746 9520 */
9747 9521 if (!lgrp_optimizations()) {
9748 9522 svd->tr_state = SEGVN_TR_OFF;
9749 9523 return;
9750 9524 }
9751 9525
9752 9526 /*
9753 9527 * Avoid creating anon maps with size bigger than the file size.
9754 9528 * If VOP_GETATTR() call fails bail out.
9755 9529 */
9756 9530 va.va_mask = AT_SIZE | AT_MTIME | AT_CTIME;
9757 9531 if (VOP_GETATTR(vp, &va, 0, svd->cred, NULL) != 0) {
9758 9532 svd->tr_state = SEGVN_TR_OFF;
9759 9533 SEGVN_TR_ADDSTAT(gaerr);
9760 9534 return;
9761 9535 }
9762 9536 if (btopr(va.va_size) < btopr(eoff)) {
9763 9537 svd->tr_state = SEGVN_TR_OFF;
9764 9538 SEGVN_TR_ADDSTAT(overmap);
9765 9539 return;
9766 9540 }
9767 9541
9768 9542 /*
9769 9543 * VVMEXEC may not be set yet if exec() prefaults text segment. Set
9770 9544 * this flag now before vn_is_mapped(V_WRITE) so that MAP_SHARED
9771 9545 * mapping that checks if trcache for this vnode needs to be
9772 9546 * invalidated can't miss us.
9773 9547 */
9774 9548 if (!(vp->v_flag & VVMEXEC)) {
9775 9549 mutex_enter(&vp->v_lock);
9776 9550 vp->v_flag |= VVMEXEC;
9777 9551 mutex_exit(&vp->v_lock);
9778 9552 }
9779 9553 mutex_enter(&svntr_hashtab[hash].tr_lock);
9780 9554 /*
9781 9555 * Bail out if potentially MAP_SHARED writable mappings exist to this
9782 9556 * vnode. We don't want to use old file contents from existing
9783 9557 * replicas if this mapping was established after the original file
9784 9558 * was changed.
9785 9559 */
9786 9560 if (vn_is_mapped(vp, V_WRITE)) {
9787 9561 mutex_exit(&svntr_hashtab[hash].tr_lock);
9788 9562 svd->tr_state = SEGVN_TR_OFF;
9789 9563 SEGVN_TR_ADDSTAT(wrcnt);
9790 9564 return;
9791 9565 }
9792 9566 svntrp = svntr_hashtab[hash].tr_head;
9793 9567 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
9794 9568 ASSERT(svntrp->tr_refcnt != 0);
9795 9569 if (svntrp->tr_vp != vp) {
9796 9570 continue;
9797 9571 }
9798 9572
9799 9573 /*
9800 9574 * Bail out if the file or its attributes were changed after
9801 9575 * this replication entry was created since we need to use the
9802 9576 * latest file contents. Note that mtime test alone is not
9803 9577 * sufficient because a user can explicitly change mtime via
9804 9578 * utimes(2) interfaces back to the old value after modifiying
9805 9579 * the file contents. To detect this case we also have to test
9806 9580 * ctime which among other things records the time of the last
9807 9581 * mtime change by utimes(2). ctime is not changed when the file
9808 9582 * is only read or executed so we expect that typically existing
9809 9583 * replication amp's can be used most of the time.
9810 9584 */
9811 9585 if (!svntrp->tr_valid ||
9812 9586 svntrp->tr_mtime.tv_sec != va.va_mtime.tv_sec ||
9813 9587 svntrp->tr_mtime.tv_nsec != va.va_mtime.tv_nsec ||
9814 9588 svntrp->tr_ctime.tv_sec != va.va_ctime.tv_sec ||
9815 9589 svntrp->tr_ctime.tv_nsec != va.va_ctime.tv_nsec) {
9816 9590 mutex_exit(&svntr_hashtab[hash].tr_lock);
9817 9591 svd->tr_state = SEGVN_TR_OFF;
9818 9592 SEGVN_TR_ADDSTAT(stale);
9819 9593 return;
9820 9594 }
9821 9595 /*
9822 9596 * if off, eoff and szc match current segment we found the
9823 9597 * existing entry we can use.
9824 9598 */
9825 9599 if (svntrp->tr_off == off && svntrp->tr_eoff == eoff &&
9826 9600 svntrp->tr_szc == szc) {
9827 9601 break;
9828 9602 }
9829 9603 /*
9830 9604 * Don't create different but overlapping in file offsets
9831 9605 * entries to avoid replication of the same file pages more
9832 9606 * than once per lgroup.
9833 9607 */
9834 9608 if ((off >= svntrp->tr_off && off < svntrp->tr_eoff) ||
9835 9609 (eoff > svntrp->tr_off && eoff <= svntrp->tr_eoff)) {
9836 9610 mutex_exit(&svntr_hashtab[hash].tr_lock);
9837 9611 svd->tr_state = SEGVN_TR_OFF;
9838 9612 SEGVN_TR_ADDSTAT(overlap);
9839 9613 return;
9840 9614 }
9841 9615 }
9842 9616 /*
9843 9617 * If we didn't find existing entry create a new one.
9844 9618 */
9845 9619 if (svntrp == NULL) {
9846 9620 svntrp = kmem_cache_alloc(svntr_cache, KM_NOSLEEP);
9847 9621 if (svntrp == NULL) {
9848 9622 mutex_exit(&svntr_hashtab[hash].tr_lock);
9849 9623 svd->tr_state = SEGVN_TR_OFF;
9850 9624 SEGVN_TR_ADDSTAT(nokmem);
9851 9625 return;
9852 9626 }
9853 9627 #ifdef DEBUG
9854 9628 {
9855 9629 lgrp_id_t i;
9856 9630 for (i = 0; i < NLGRPS_MAX; i++) {
9857 9631 ASSERT(svntrp->tr_amp[i] == NULL);
9858 9632 }
9859 9633 }
9860 9634 #endif /* DEBUG */
9861 9635 svntrp->tr_vp = vp;
9862 9636 svntrp->tr_off = off;
9863 9637 svntrp->tr_eoff = eoff;
9864 9638 svntrp->tr_szc = szc;
9865 9639 svntrp->tr_valid = 1;
9866 9640 svntrp->tr_mtime = va.va_mtime;
9867 9641 svntrp->tr_ctime = va.va_ctime;
9868 9642 svntrp->tr_refcnt = 0;
9869 9643 svntrp->tr_next = svntr_hashtab[hash].tr_head;
9870 9644 svntr_hashtab[hash].tr_head = svntrp;
9871 9645 }
9872 9646 first = 1;
9873 9647 again:
9874 9648 /*
9875 9649 * We want to pick a replica with pages on main thread's (t_tid = 1,
9876 9650 * aka T1) lgrp. Currently text replication is only optimized for
9877 9651 * workloads that either have all threads of a process on the same
9878 9652 * lgrp or execute their large text primarily on main thread.
9879 9653 */
9880 9654 lgrp_id = p->p_t1_lgrpid;
9881 9655 if (lgrp_id == LGRP_NONE) {
9882 9656 /*
9883 9657 * In case exec() prefaults text on non main thread use
9884 9658 * current thread lgrpid. It will become main thread anyway
9885 9659 * soon.
9886 9660 */
9887 9661 lgrp_id = lgrp_home_id(curthread);
9888 9662 }
9889 9663 /*
9890 9664 * Set p_tr_lgrpid to lgrpid if it hasn't been set yet. Otherwise
9891 9665 * just set it to NLGRPS_MAX if it's different from current process T1
9892 9666 * home lgrp. p_tr_lgrpid is used to detect if process uses text
9893 9667 * replication and T1 new home is different from lgrp used for text
9894 9668 * replication. When this happens asyncronous segvn thread rechecks if
9895 9669 * segments should change lgrps used for text replication. If we fail
9896 9670 * to set p_tr_lgrpid with atomic_cas_32 then set it to NLGRPS_MAX
9897 9671 * without cas if it's not already NLGRPS_MAX and not equal lgrp_id
9898 9672 * we want to use. We don't need to use cas in this case because
9899 9673 * another thread that races in between our non atomic check and set
9900 9674 * may only change p_tr_lgrpid to NLGRPS_MAX at this point.
9901 9675 */
9902 9676 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
9903 9677 olid = p->p_tr_lgrpid;
9904 9678 if (lgrp_id != olid && olid != NLGRPS_MAX) {
9905 9679 lgrp_id_t nlid = (olid == LGRP_NONE) ? lgrp_id : NLGRPS_MAX;
9906 9680 if (atomic_cas_32((uint32_t *)&p->p_tr_lgrpid, olid, nlid) !=
9907 9681 olid) {
9908 9682 olid = p->p_tr_lgrpid;
9909 9683 ASSERT(olid != LGRP_NONE);
9910 9684 if (olid != lgrp_id && olid != NLGRPS_MAX) {
9911 9685 p->p_tr_lgrpid = NLGRPS_MAX;
9912 9686 }
9913 9687 }
9914 9688 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
9915 9689 membar_producer();
9916 9690 /*
9917 9691 * lgrp_move_thread() won't schedule async recheck after
9918 9692 * p->p_t1_lgrpid update unless p->p_tr_lgrpid is not
9919 9693 * LGRP_NONE. Recheck p_t1_lgrpid once now that p->p_tr_lgrpid
9920 9694 * is not LGRP_NONE.
9921 9695 */
9922 9696 if (first && p->p_t1_lgrpid != LGRP_NONE &&
9923 9697 p->p_t1_lgrpid != lgrp_id) {
9924 9698 first = 0;
9925 9699 goto again;
9926 9700 }
9927 9701 }
9928 9702 /*
9929 9703 * If no amp was created yet for lgrp_id create a new one as long as
9930 9704 * we have enough memory to afford it.
9931 9705 */
9932 9706 if ((amp = svntrp->tr_amp[lgrp_id]) == NULL) {
9933 9707 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
9934 9708 if (trmem > segvn_textrepl_max_bytes) {
9935 9709 SEGVN_TR_ADDSTAT(normem);
9936 9710 goto fail;
9937 9711 }
9938 9712 if (anon_try_resv_zone(size, NULL) == 0) {
9939 9713 SEGVN_TR_ADDSTAT(noanon);
9940 9714 goto fail;
9941 9715 }
9942 9716 amp = anonmap_alloc(size, size, ANON_NOSLEEP);
9943 9717 if (amp == NULL) {
9944 9718 anon_unresv_zone(size, NULL);
9945 9719 SEGVN_TR_ADDSTAT(nokmem);
9946 9720 goto fail;
9947 9721 }
9948 9722 ASSERT(amp->refcnt == 1);
9949 9723 amp->a_szc = szc;
9950 9724 svntrp->tr_amp[lgrp_id] = amp;
9951 9725 SEGVN_TR_ADDSTAT(newamp);
9952 9726 }
9953 9727 svntrp->tr_refcnt++;
9954 9728 ASSERT(svd->svn_trnext == NULL);
9955 9729 ASSERT(svd->svn_trprev == NULL);
9956 9730 svd->svn_trnext = svntrp->tr_svnhead;
9957 9731 svd->svn_trprev = NULL;
9958 9732 if (svntrp->tr_svnhead != NULL) {
9959 9733 svntrp->tr_svnhead->svn_trprev = svd;
9960 9734 }
9961 9735 svntrp->tr_svnhead = svd;
9962 9736 ASSERT(amp->a_szc == szc && amp->size == size && amp->swresv == size);
9963 9737 ASSERT(amp->refcnt >= 1);
9964 9738 svd->amp = amp;
9965 9739 svd->anon_index = 0;
9966 9740 svd->tr_policy_info.mem_policy = LGRP_MEM_POLICY_NEXT_SEG;
9967 9741 svd->tr_policy_info.mem_lgrpid = lgrp_id;
9968 9742 svd->tr_state = SEGVN_TR_ON;
9969 9743 mutex_exit(&svntr_hashtab[hash].tr_lock);
9970 9744 SEGVN_TR_ADDSTAT(repl);
9971 9745 return;
9972 9746 fail:
9973 9747 ASSERT(segvn_textrepl_bytes >= size);
9974 9748 atomic_add_long(&segvn_textrepl_bytes, -size);
9975 9749 ASSERT(svntrp != NULL);
9976 9750 ASSERT(svntrp->tr_amp[lgrp_id] == NULL);
9977 9751 if (svntrp->tr_refcnt == 0) {
9978 9752 ASSERT(svntrp == svntr_hashtab[hash].tr_head);
9979 9753 svntr_hashtab[hash].tr_head = svntrp->tr_next;
9980 9754 mutex_exit(&svntr_hashtab[hash].tr_lock);
9981 9755 kmem_cache_free(svntr_cache, svntrp);
9982 9756 } else {
9983 9757 mutex_exit(&svntr_hashtab[hash].tr_lock);
9984 9758 }
9985 9759 svd->tr_state = SEGVN_TR_OFF;
9986 9760 }
9987 9761
9988 9762 /*
9989 9763 * Convert seg back to regular vnode mapping seg by unbinding it from its text
9990 9764 * replication amp. This routine is most typically called when segment is
9991 9765 * unmapped but can also be called when segment no longer qualifies for text
9992 9766 * replication (e.g. due to protection changes). If unload_unmap is set use
9993 9767 * HAT_UNLOAD_UNMAP flag in hat_unload_callback(). If we are the last user of
9994 9768 * svntr free all its anon maps and remove it from the hash table.
9995 9769 */
9996 9770 static void
9997 9771 segvn_textunrepl(struct seg *seg, int unload_unmap)
9998 9772 {
9999 9773 struct segvn_data *svd = (struct segvn_data *)seg->s_data;
10000 9774 vnode_t *vp = svd->vp;
10001 9775 u_offset_t off = svd->offset;
10002 9776 size_t size = seg->s_size;
10003 9777 u_offset_t eoff = off + size;
10004 9778 uint_t szc = seg->s_szc;
10005 9779 ulong_t hash = SVNTR_HASH_FUNC(vp);
10006 9780 svntr_t *svntrp;
10007 9781 svntr_t **prv_svntrp;
10008 9782 lgrp_id_t lgrp_id = svd->tr_policy_info.mem_lgrpid;
10009 9783 lgrp_id_t i;
10010 9784
10011 9785 ASSERT(AS_LOCK_HELD(seg->s_as, &seg->s_as->a_lock));
10012 9786 ASSERT(AS_WRITE_HELD(seg->s_as, &seg->s_as->a_lock) ||
10013 9787 SEGVN_WRITE_HELD(seg->s_as, &svd->lock));
10014 9788 ASSERT(svd->tr_state == SEGVN_TR_ON);
10015 9789 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10016 9790 ASSERT(svd->amp != NULL);
10017 9791 ASSERT(svd->amp->refcnt >= 1);
10018 9792 ASSERT(svd->anon_index == 0);
10019 9793 ASSERT(lgrp_id != LGRP_NONE && lgrp_id < NLGRPS_MAX);
10020 9794 ASSERT(svntr_hashtab != NULL);
10021 9795
10022 9796 mutex_enter(&svntr_hashtab[hash].tr_lock);
10023 9797 prv_svntrp = &svntr_hashtab[hash].tr_head;
10024 9798 for (; (svntrp = *prv_svntrp) != NULL; prv_svntrp = &svntrp->tr_next) {
10025 9799 ASSERT(svntrp->tr_refcnt != 0);
10026 9800 if (svntrp->tr_vp == vp && svntrp->tr_off == off &&
10027 9801 svntrp->tr_eoff == eoff && svntrp->tr_szc == szc) {
10028 9802 break;
10029 9803 }
10030 9804 }
10031 9805 if (svntrp == NULL) {
10032 9806 panic("segvn_textunrepl: svntr record not found");
10033 9807 }
10034 9808 if (svntrp->tr_amp[lgrp_id] != svd->amp) {
10035 9809 panic("segvn_textunrepl: amp mismatch");
10036 9810 }
10037 9811 svd->tr_state = SEGVN_TR_OFF;
10038 9812 svd->amp = NULL;
10039 9813 if (svd->svn_trprev == NULL) {
10040 9814 ASSERT(svntrp->tr_svnhead == svd);
10041 9815 svntrp->tr_svnhead = svd->svn_trnext;
10042 9816 if (svntrp->tr_svnhead != NULL) {
10043 9817 svntrp->tr_svnhead->svn_trprev = NULL;
10044 9818 }
10045 9819 svd->svn_trnext = NULL;
10046 9820 } else {
10047 9821 svd->svn_trprev->svn_trnext = svd->svn_trnext;
10048 9822 if (svd->svn_trnext != NULL) {
10049 9823 svd->svn_trnext->svn_trprev = svd->svn_trprev;
10050 9824 svd->svn_trnext = NULL;
10051 9825 }
10052 9826 svd->svn_trprev = NULL;
10053 9827 }
10054 9828 if (--svntrp->tr_refcnt) {
10055 9829 mutex_exit(&svntr_hashtab[hash].tr_lock);
10056 9830 goto done;
10057 9831 }
10058 9832 *prv_svntrp = svntrp->tr_next;
10059 9833 mutex_exit(&svntr_hashtab[hash].tr_lock);
10060 9834 for (i = 0; i < NLGRPS_MAX; i++) {
10061 9835 struct anon_map *amp = svntrp->tr_amp[i];
10062 9836 if (amp == NULL) {
10063 9837 continue;
10064 9838 }
10065 9839 ASSERT(amp->refcnt == 1);
10066 9840 ASSERT(amp->swresv == size);
10067 9841 ASSERT(amp->size == size);
10068 9842 ASSERT(amp->a_szc == szc);
10069 9843 if (amp->a_szc != 0) {
10070 9844 anon_free_pages(amp->ahp, 0, size, szc);
10071 9845 } else {
10072 9846 anon_free(amp->ahp, 0, size);
10073 9847 }
10074 9848 svntrp->tr_amp[i] = NULL;
10075 9849 ASSERT(segvn_textrepl_bytes >= size);
10076 9850 atomic_add_long(&segvn_textrepl_bytes, -size);
10077 9851 anon_unresv_zone(amp->swresv, NULL);
10078 9852 amp->refcnt = 0;
10079 9853 anonmap_free(amp);
10080 9854 }
10081 9855 kmem_cache_free(svntr_cache, svntrp);
10082 9856 done:
10083 9857 hat_unload_callback(seg->s_as->a_hat, seg->s_base, size,
10084 9858 unload_unmap ? HAT_UNLOAD_UNMAP : 0, NULL);
10085 9859 }
10086 9860
10087 9861 /*
10088 9862 * This is called when a MAP_SHARED writable mapping is created to a vnode
10089 9863 * that is currently used for execution (VVMEXEC flag is set). In this case we
10090 9864 * need to prevent further use of existing replicas.
10091 9865 */
10092 9866 static void
10093 9867 segvn_inval_trcache(vnode_t *vp)
10094 9868 {
10095 9869 ulong_t hash = SVNTR_HASH_FUNC(vp);
10096 9870 svntr_t *svntrp;
10097 9871
10098 9872 ASSERT(vp->v_flag & VVMEXEC);
10099 9873
10100 9874 if (svntr_hashtab == NULL) {
10101 9875 return;
10102 9876 }
10103 9877
10104 9878 mutex_enter(&svntr_hashtab[hash].tr_lock);
10105 9879 svntrp = svntr_hashtab[hash].tr_head;
10106 9880 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
10107 9881 ASSERT(svntrp->tr_refcnt != 0);
10108 9882 if (svntrp->tr_vp == vp && svntrp->tr_valid) {
10109 9883 svntrp->tr_valid = 0;
10110 9884 }
10111 9885 }
10112 9886 mutex_exit(&svntr_hashtab[hash].tr_lock);
10113 9887 }
10114 9888
10115 9889 static void
10116 9890 segvn_trasync_thread(void)
10117 9891 {
10118 9892 callb_cpr_t cpr_info;
10119 9893 kmutex_t cpr_lock; /* just for CPR stuff */
10120 9894
10121 9895 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
10122 9896
10123 9897 CALLB_CPR_INIT(&cpr_info, &cpr_lock,
10124 9898 callb_generic_cpr, "segvn_async");
10125 9899
10126 9900 if (segvn_update_textrepl_interval == 0) {
10127 9901 segvn_update_textrepl_interval = segvn_update_tr_time * hz;
10128 9902 } else {
10129 9903 segvn_update_textrepl_interval *= hz;
10130 9904 }
10131 9905 (void) timeout(segvn_trupdate_wakeup, NULL,
10132 9906 segvn_update_textrepl_interval);
10133 9907
10134 9908 for (;;) {
10135 9909 mutex_enter(&cpr_lock);
10136 9910 CALLB_CPR_SAFE_BEGIN(&cpr_info);
10137 9911 mutex_exit(&cpr_lock);
10138 9912 sema_p(&segvn_trasync_sem);
10139 9913 mutex_enter(&cpr_lock);
10140 9914 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
10141 9915 mutex_exit(&cpr_lock);
10142 9916 segvn_trupdate();
10143 9917 }
10144 9918 }
10145 9919
10146 9920 static uint64_t segvn_lgrp_trthr_migrs_snpsht = 0;
10147 9921
10148 9922 static void
10149 9923 segvn_trupdate_wakeup(void *dummy)
10150 9924 {
10151 9925 uint64_t cur_lgrp_trthr_migrs = lgrp_get_trthr_migrations();
10152 9926
10153 9927 if (cur_lgrp_trthr_migrs != segvn_lgrp_trthr_migrs_snpsht) {
10154 9928 segvn_lgrp_trthr_migrs_snpsht = cur_lgrp_trthr_migrs;
10155 9929 sema_v(&segvn_trasync_sem);
10156 9930 }
10157 9931
10158 9932 if (!segvn_disable_textrepl_update &&
10159 9933 segvn_update_textrepl_interval != 0) {
10160 9934 (void) timeout(segvn_trupdate_wakeup, dummy,
10161 9935 segvn_update_textrepl_interval);
10162 9936 }
10163 9937 }
10164 9938
10165 9939 static void
10166 9940 segvn_trupdate(void)
10167 9941 {
10168 9942 ulong_t hash;
10169 9943 svntr_t *svntrp;
10170 9944 segvn_data_t *svd;
10171 9945
10172 9946 ASSERT(svntr_hashtab != NULL);
10173 9947
10174 9948 for (hash = 0; hash < svntr_hashtab_sz; hash++) {
10175 9949 mutex_enter(&svntr_hashtab[hash].tr_lock);
10176 9950 svntrp = svntr_hashtab[hash].tr_head;
10177 9951 for (; svntrp != NULL; svntrp = svntrp->tr_next) {
10178 9952 ASSERT(svntrp->tr_refcnt != 0);
10179 9953 svd = svntrp->tr_svnhead;
10180 9954 for (; svd != NULL; svd = svd->svn_trnext) {
10181 9955 segvn_trupdate_seg(svd->seg, svd, svntrp,
10182 9956 hash);
10183 9957 }
10184 9958 }
10185 9959 mutex_exit(&svntr_hashtab[hash].tr_lock);
10186 9960 }
10187 9961 }
10188 9962
10189 9963 static void
10190 9964 segvn_trupdate_seg(struct seg *seg,
10191 9965 segvn_data_t *svd,
10192 9966 svntr_t *svntrp,
10193 9967 ulong_t hash)
10194 9968 {
10195 9969 proc_t *p;
10196 9970 lgrp_id_t lgrp_id;
10197 9971 struct as *as;
10198 9972 size_t size;
10199 9973 struct anon_map *amp;
10200 9974
10201 9975 ASSERT(svd->vp != NULL);
10202 9976 ASSERT(svd->vp == svntrp->tr_vp);
10203 9977 ASSERT(svd->offset == svntrp->tr_off);
10204 9978 ASSERT(svd->offset + seg->s_size == svntrp->tr_eoff);
10205 9979 ASSERT(seg != NULL);
10206 9980 ASSERT(svd->seg == seg);
10207 9981 ASSERT(seg->s_data == (void *)svd);
10208 9982 ASSERT(seg->s_szc == svntrp->tr_szc);
10209 9983 ASSERT(svd->tr_state == SEGVN_TR_ON);
10210 9984 ASSERT(!HAT_IS_REGION_COOKIE_VALID(svd->rcookie));
10211 9985 ASSERT(svd->amp != NULL);
10212 9986 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10213 9987 ASSERT(svd->tr_policy_info.mem_lgrpid != LGRP_NONE);
10214 9988 ASSERT(svd->tr_policy_info.mem_lgrpid < NLGRPS_MAX);
10215 9989 ASSERT(svntrp->tr_amp[svd->tr_policy_info.mem_lgrpid] == svd->amp);
10216 9990 ASSERT(svntrp->tr_refcnt != 0);
10217 9991 ASSERT(mutex_owned(&svntr_hashtab[hash].tr_lock));
10218 9992
10219 9993 as = seg->s_as;
10220 9994 ASSERT(as != NULL && as != &kas);
10221 9995 p = as->a_proc;
10222 9996 ASSERT(p != NULL);
10223 9997 ASSERT(p->p_tr_lgrpid != LGRP_NONE);
10224 9998 lgrp_id = p->p_t1_lgrpid;
10225 9999 if (lgrp_id == LGRP_NONE) {
10226 10000 return;
10227 10001 }
10228 10002 ASSERT(lgrp_id < NLGRPS_MAX);
10229 10003 if (svd->tr_policy_info.mem_lgrpid == lgrp_id) {
10230 10004 return;
10231 10005 }
10232 10006
10233 10007 /*
10234 10008 * Use tryenter locking since we are locking as/seg and svntr hash
10235 10009 * lock in reverse from syncrounous thread order.
10236 10010 */
10237 10011 if (!AS_LOCK_TRYENTER(as, &as->a_lock, RW_READER)) {
10238 10012 SEGVN_TR_ADDSTAT(nolock);
10239 10013 if (segvn_lgrp_trthr_migrs_snpsht) {
10240 10014 segvn_lgrp_trthr_migrs_snpsht = 0;
10241 10015 }
10242 10016 return;
10243 10017 }
10244 10018 if (!SEGVN_LOCK_TRYENTER(seg->s_as, &svd->lock, RW_WRITER)) {
10245 10019 AS_LOCK_EXIT(as, &as->a_lock);
10246 10020 SEGVN_TR_ADDSTAT(nolock);
10247 10021 if (segvn_lgrp_trthr_migrs_snpsht) {
10248 10022 segvn_lgrp_trthr_migrs_snpsht = 0;
10249 10023 }
10250 10024 return;
10251 10025 }
10252 10026 size = seg->s_size;
10253 10027 if (svntrp->tr_amp[lgrp_id] == NULL) {
10254 10028 size_t trmem = atomic_add_long_nv(&segvn_textrepl_bytes, size);
10255 10029 if (trmem > segvn_textrepl_max_bytes) {
10256 10030 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10257 10031 AS_LOCK_EXIT(as, &as->a_lock);
10258 10032 atomic_add_long(&segvn_textrepl_bytes, -size);
10259 10033 SEGVN_TR_ADDSTAT(normem);
10260 10034 return;
10261 10035 }
10262 10036 if (anon_try_resv_zone(size, NULL) == 0) {
10263 10037 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10264 10038 AS_LOCK_EXIT(as, &as->a_lock);
10265 10039 atomic_add_long(&segvn_textrepl_bytes, -size);
10266 10040 SEGVN_TR_ADDSTAT(noanon);
10267 10041 return;
10268 10042 }
10269 10043 amp = anonmap_alloc(size, size, KM_NOSLEEP);
10270 10044 if (amp == NULL) {
10271 10045 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10272 10046 AS_LOCK_EXIT(as, &as->a_lock);
10273 10047 atomic_add_long(&segvn_textrepl_bytes, -size);
10274 10048 anon_unresv_zone(size, NULL);
10275 10049 SEGVN_TR_ADDSTAT(nokmem);
10276 10050 return;
10277 10051 }
10278 10052 ASSERT(amp->refcnt == 1);
10279 10053 amp->a_szc = seg->s_szc;
10280 10054 svntrp->tr_amp[lgrp_id] = amp;
10281 10055 }
10282 10056 /*
10283 10057 * We don't need to drop the bucket lock but here we give other
10284 10058 * threads a chance. svntr and svd can't be unlinked as long as
10285 10059 * segment lock is held as a writer and AS held as well. After we
10286 10060 * retake bucket lock we'll continue from where we left. We'll be able
10287 10061 * to reach the end of either list since new entries are always added
10288 10062 * to the beginning of the lists.
10289 10063 */
10290 10064 mutex_exit(&svntr_hashtab[hash].tr_lock);
10291 10065 hat_unload_callback(as->a_hat, seg->s_base, size, 0, NULL);
10292 10066 mutex_enter(&svntr_hashtab[hash].tr_lock);
10293 10067
10294 10068 ASSERT(svd->tr_state == SEGVN_TR_ON);
10295 10069 ASSERT(svd->amp != NULL);
10296 10070 ASSERT(svd->tr_policy_info.mem_policy == LGRP_MEM_POLICY_NEXT_SEG);
10297 10071 ASSERT(svd->tr_policy_info.mem_lgrpid != lgrp_id);
10298 10072 ASSERT(svd->amp != svntrp->tr_amp[lgrp_id]);
10299 10073
10300 10074 svd->tr_policy_info.mem_lgrpid = lgrp_id;
10301 10075 svd->amp = svntrp->tr_amp[lgrp_id];
10302 10076 p->p_tr_lgrpid = NLGRPS_MAX;
10303 10077 SEGVN_LOCK_EXIT(seg->s_as, &svd->lock);
10304 10078 AS_LOCK_EXIT(as, &as->a_lock);
10305 10079
10306 10080 ASSERT(svntrp->tr_refcnt != 0);
10307 10081 ASSERT(svd->vp == svntrp->tr_vp);
10308 10082 ASSERT(svd->tr_policy_info.mem_lgrpid == lgrp_id);
10309 10083 ASSERT(svd->amp != NULL && svd->amp == svntrp->tr_amp[lgrp_id]);
10310 10084 ASSERT(svd->seg == seg);
10311 10085 ASSERT(svd->tr_state == SEGVN_TR_ON);
10312 10086
10313 10087 SEGVN_TR_ADDSTAT(asyncrepl);
10314 10088 }
↓ open down ↓ |
606 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX