Print this page
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory. The code is there and in theory it runs when we get *extremely* low
on memory. In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
remove xhat
The xhat infrastructure was added to support hardware such as the zulu
graphics card - hardware which had on-board MMUs. The VM used the xhat code
to keep the CPU's and Zulu's page tables in-sync. Since the only xhat user
was zulu (which is gone), we can safely remove it simplifying the whole VM
subsystem.
Assorted notes:
- AS_BUSY flag was used solely by xhat
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/vm/as.h
+++ new/usr/src/uts/common/vm/as.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
28 28 */
29 29
30 30 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
31 31 /* All Rights Reserved */
32 32
33 33 /*
34 34 * University Copyright- Copyright (c) 1982, 1986, 1988
35 35 * The Regents of the University of California
36 36 * All Rights Reserved
37 37 *
38 38 * University Acknowledgment- Portions of this document are derived from
39 39 * software developed by the University of California, Berkeley, and its
40 40 * contributors.
41 41 */
42 42
43 43 #ifndef _VM_AS_H
44 44 #define _VM_AS_H
45 45
46 46 #include <sys/watchpoint.h>
47 47 #include <vm/seg.h>
48 48 #include <vm/faultcode.h>
49 49 #include <vm/hat.h>
50 50 #include <sys/avl.h>
51 51 #include <sys/proc.h>
52 52
53 53 #ifdef __cplusplus
54 54 extern "C" {
55 55 #endif
56 56
57 57 /*
58 58 * VM - Address spaces.
59 59 */
60 60
61 61 /*
62 62 * Each address space consists of a sorted list of segments
63 63 * and machine dependent address translation information.
64 64 *
65 65 * All the hard work is in the segment drivers and the
66 66 * hardware address translation code.
67 67 *
68 68 * The segment list is represented as an AVL tree.
69 69 *
70 70 * The address space lock (a_lock) is a long term lock which serializes
71 71 * access to certain operations (as_map, as_unmap) and protects the
72 72 * underlying generic segment data (seg.h) along with some fields in the
73 73 * address space structure as shown below:
74 74 *
75 75 * address space structure segment structure
76 76 *
77 77 * a_segtree s_base
78 78 * a_size s_size
79 79 * a_lastgap s_link
80 80 * a_seglast s_ops
81 81 * s_as
82 82 * s_data
83 83 *
84 84 * The address space contents lock (a_contents) is a short term
85 85 * lock that protects most of the data in the address space structure.
86 86 * This lock is always acquired after the "a_lock" in all situations
87 87 * except while dealing with AS_CLAIMGAP to avoid deadlocks.
88 88 *
89 89 * The following fields are protected by this lock:
90 90 *
91 91 * a_flags (AS_PAGLCK, AS_CLAIMGAP, etc.)
92 92 * a_unmapwait
93 93 * a_seglast
94 94 *
95 95 * The address space lock (a_lock) is always held prior to any segment
96 96 * operation. Some segment drivers use the address space lock to protect
97 97 * some or all of their segment private data, provided the version of
98 98 * "a_lock" (read vs. write) is consistent with the use of the data.
99 99 *
100 100 * The following fields are protected by the hat layer lock:
101 101 *
102 102 * a_vbits
103 103 * a_hat
104 104 * a_hrm
105 105 */
106 106
107 107 struct as {
108 108 kmutex_t a_contents; /* protect certain fields in the structure */
109 109 uchar_t a_flags; /* as attributes */
110 110 uchar_t a_vbits; /* used for collecting statistics */
111 111 kcondvar_t a_cv; /* used by as_rangelock */
112 112 struct hat *a_hat; /* hat structure */
113 113 struct hrmstat *a_hrm; /* ref and mod bits */
114 114 caddr_t a_userlimit; /* highest allowable address in this as */
115 115 struct seg *a_seglast; /* last segment hit on the addr space */
116 116 krwlock_t a_lock; /* protects segment related fields */
117 117 size_t a_size; /* total size of address space */
↓ open down ↓ |
117 lines elided |
↑ open up ↑ |
118 118 struct seg *a_lastgap; /* last seg found by as_gap() w/ AS_HI (mmap) */
119 119 struct seg *a_lastgaphl; /* last seg saved in as_gap() either for */
120 120 /* AS_HI or AS_LO used in as_addseg() */
121 121 avl_tree_t a_segtree; /* segments in this address space. (AVL tree) */
122 122 avl_tree_t a_wpage; /* watched pages (procfs) */
123 123 uchar_t a_updatedir; /* mappings changed, rebuild a_objectdir */
124 124 timespec_t a_updatetime; /* time when mappings last changed */
125 125 vnode_t **a_objectdir; /* object directory (procfs) */
126 126 size_t a_sizedir; /* size of object directory */
127 127 struct as_callback *a_callbacks; /* callback list */
128 - void *a_xhat; /* list of xhat providers */
129 128 proc_t *a_proc; /* back pointer to proc */
130 129 size_t a_resvsize; /* size of reserved part of address space */
131 130 };
132 131
133 132 #define AS_PAGLCK 0x80
134 133 #define AS_CLAIMGAP 0x40
135 134 #define AS_UNMAPWAIT 0x20
136 135 #define AS_NEEDSPURGE 0x10 /* mostly for seg_nf, see as_purge() */
137 136 #define AS_NOUNMAPWAIT 0x02
138 -#define AS_BUSY 0x01 /* needed by XHAT framework */
139 137
140 138 #define AS_ISPGLCK(as) ((as)->a_flags & AS_PAGLCK)
141 139 #define AS_ISCLAIMGAP(as) ((as)->a_flags & AS_CLAIMGAP)
142 140 #define AS_ISUNMAPWAIT(as) ((as)->a_flags & AS_UNMAPWAIT)
143 -#define AS_ISBUSY(as) ((as)->a_flags & AS_BUSY)
144 141 #define AS_ISNOUNMAPWAIT(as) ((as)->a_flags & AS_NOUNMAPWAIT)
145 142
146 143 #define AS_SETPGLCK(as) ((as)->a_flags |= AS_PAGLCK)
147 144 #define AS_SETCLAIMGAP(as) ((as)->a_flags |= AS_CLAIMGAP)
148 145 #define AS_SETUNMAPWAIT(as) ((as)->a_flags |= AS_UNMAPWAIT)
149 -#define AS_SETBUSY(as) ((as)->a_flags |= AS_BUSY)
150 146 #define AS_SETNOUNMAPWAIT(as) ((as)->a_flags |= AS_NOUNMAPWAIT)
151 147
152 148 #define AS_CLRPGLCK(as) ((as)->a_flags &= ~AS_PAGLCK)
153 149 #define AS_CLRCLAIMGAP(as) ((as)->a_flags &= ~AS_CLAIMGAP)
154 150 #define AS_CLRUNMAPWAIT(as) ((as)->a_flags &= ~AS_UNMAPWAIT)
155 -#define AS_CLRBUSY(as) ((as)->a_flags &= ~AS_BUSY)
156 151 #define AS_CLRNOUNMAPWAIT(as) ((as)->a_flags &= ~AS_NOUNMAPWAIT)
157 152
158 153 #define AS_TYPE_64BIT(as) \
159 154 (((as)->a_userlimit > (caddr_t)UINT32_MAX) ? 1 : 0)
160 155
161 156 /*
162 157 * Flags for as_map/as_map_ansegs
163 158 */
164 159 #define AS_MAP_NO_LPOOB ((uint_t)-1)
165 160 #define AS_MAP_HEAP ((uint_t)-2)
166 161 #define AS_MAP_STACK ((uint_t)-3)
167 162
168 163 /*
169 164 * The as_callback is the basic structure which supports the ability to
170 165 * inform clients of specific events pertaining to address space management.
171 166 * A user calls as_add_callback to register an address space callback
172 167 * for a range of pages, specifying the events that need to occur.
173 168 * When as_do_callbacks is called and finds a 'matching' entry, the
174 169 * callback is called once, and the callback function MUST call
175 170 * as_delete_callback when all callback activities are complete.
176 171 * The thread calling as_do_callbacks blocks until the as_delete_callback
177 172 * is called. This allows for asynchorous events to subside before the
178 173 * as_do_callbacks thread continues.
179 174 *
180 175 * An example of the need for this is a driver which has done long-term
181 176 * locking of memory. Address space management operations (events) such
182 177 * as as_free, as_umap, and as_setprot will block indefinitely until the
183 178 * pertinent memory is unlocked. The callback mechanism provides the
184 179 * way to inform the driver of the event so that the driver may do the
185 180 * necessary unlocking.
186 181 *
187 182 * The contents of this structure is protected by a_contents lock
188 183 */
189 184 typedef void (*callback_func_t)(struct as *, void *, uint_t);
190 185 struct as_callback {
191 186 struct as_callback *ascb_next; /* list link */
192 187 uint_t ascb_events; /* event types */
193 188 callback_func_t ascb_func; /* callback function */
194 189 void *ascb_arg; /* callback argument */
195 190 caddr_t ascb_saddr; /* start address */
196 191 size_t ascb_len; /* address range */
197 192 };
198 193 /*
199 194 * Callback events
200 195 */
201 196 #define AS_FREE_EVENT 0x1
202 197 #define AS_SETPROT_EVENT 0x2
203 198 #define AS_UNMAP_EVENT 0x4
204 199 #define AS_CALLBACK_CALLED ((uint_t)(1U << (8 * sizeof (uint_t) - 1U)))
205 200 #define AS_UNMAPWAIT_EVENT \
206 201 (AS_FREE_EVENT | AS_SETPROT_EVENT | AS_UNMAP_EVENT)
207 202 #define AS_ALL_EVENT \
208 203 (AS_FREE_EVENT | AS_SETPROT_EVENT | AS_UNMAP_EVENT)
209 204
210 205
211 206 /* Return code values for as_callback_delete */
212 207 enum as_cbdelete_rc {
213 208 AS_CALLBACK_DELETED,
214 209 AS_CALLBACK_NOTFOUND,
215 210 AS_CALLBACK_DELETE_DEFERRED
216 211 };
217 212
218 213 #ifdef _KERNEL
219 214
220 215 /*
221 216 * Flags for as_gap.
222 217 */
223 218 #define AH_DIR 0x1 /* direction flag mask */
224 219 #define AH_LO 0x0 /* find lowest hole */
225 220 #define AH_HI 0x1 /* find highest hole */
226 221 #define AH_CONTAIN 0x2 /* hole must contain `addr' */
227 222
228 223 extern struct as kas; /* kernel's address space */
229 224
230 225 /*
231 226 * Macros for address space locking. Note that we use RW_READER_STARVEWRITER
232 227 * whenever we acquire the address space lock as reader to assure that it can
233 228 * be used without regard to lock order in conjunction with filesystem locks.
234 229 * This allows filesystems to safely induce user-level page faults with
235 230 * filesystem locks held while concurrently allowing filesystem entry points
236 231 * acquiring those same locks to be called with the address space lock held as
237 232 * reader. RW_READER_STARVEWRITER thus prevents reader/reader+RW_WRITE_WANTED
238 233 * deadlocks in the style of fop_write()+as_fault()/as_*()+fop_putpage() and
239 234 * fop_read()+as_fault()/as_*()+fop_getpage(). (See the Big Theory Statement
240 235 * in rwlock.c for more information on the semantics of and motivation behind
241 236 * RW_READER_STARVEWRITER.)
242 237 */
243 238 #define AS_LOCK_ENTER(as, lock, type) rw_enter((lock), \
244 239 (type) == RW_READER ? RW_READER_STARVEWRITER : (type))
245 240 #define AS_LOCK_EXIT(as, lock) rw_exit((lock))
246 241 #define AS_LOCK_DESTROY(as, lock) rw_destroy((lock))
247 242 #define AS_LOCK_TRYENTER(as, lock, type) rw_tryenter((lock), \
248 243 (type) == RW_READER ? RW_READER_STARVEWRITER : (type))
249 244
250 245 /*
251 246 * Macros to test lock states.
252 247 */
253 248 #define AS_LOCK_HELD(as, lock) RW_LOCK_HELD((lock))
254 249 #define AS_READ_HELD(as, lock) RW_READ_HELD((lock))
255 250 #define AS_WRITE_HELD(as, lock) RW_WRITE_HELD((lock))
256 251
257 252 /*
258 253 * macros to walk thru segment lists
259 254 */
260 255 #define AS_SEGFIRST(as) avl_first(&(as)->a_segtree)
261 256 #define AS_SEGNEXT(as, seg) AVL_NEXT(&(as)->a_segtree, (seg))
262 257 #define AS_SEGPREV(as, seg) AVL_PREV(&(as)->a_segtree, (seg))
263 258
264 259 void as_init(void);
265 260 void as_avlinit(struct as *);
266 261 struct seg *as_segat(struct as *as, caddr_t addr);
267 262 void as_rangelock(struct as *as);
268 263 void as_rangeunlock(struct as *as);
269 264 struct as *as_alloc();
270 265 void as_free(struct as *as);
271 266 int as_dup(struct as *as, struct proc *forkedproc);
272 267 struct seg *as_findseg(struct as *as, caddr_t addr, int tail);
273 268 int as_addseg(struct as *as, struct seg *newseg);
274 269 struct seg *as_removeseg(struct as *as, struct seg *seg);
275 270 faultcode_t as_fault(struct hat *hat, struct as *as, caddr_t addr, size_t size,
276 271 enum fault_type type, enum seg_rw rw);
277 272 faultcode_t as_faulta(struct as *as, caddr_t addr, size_t size);
278 273 int as_setprot(struct as *as, caddr_t addr, size_t size, uint_t prot);
279 274 int as_checkprot(struct as *as, caddr_t addr, size_t size, uint_t prot);
280 275 int as_unmap(struct as *as, caddr_t addr, size_t size);
↓ open down ↓ |
115 lines elided |
↑ open up ↑ |
281 276 int as_map(struct as *as, caddr_t addr, size_t size, int ((*crfp)()),
282 277 void *argsp);
283 278 void as_purge(struct as *as);
284 279 int as_gap(struct as *as, size_t minlen, caddr_t *basep, size_t *lenp,
285 280 uint_t flags, caddr_t addr);
286 281 int as_gap_aligned(struct as *as, size_t minlen, caddr_t *basep,
287 282 size_t *lenp, uint_t flags, caddr_t addr, size_t align,
288 283 size_t redzone, size_t off);
289 284
290 285 int as_memory(struct as *as, caddr_t *basep, size_t *lenp);
291 -size_t as_swapout(struct as *as);
292 286 int as_incore(struct as *as, caddr_t addr, size_t size, char *vec,
293 287 size_t *sizep);
294 288 int as_ctl(struct as *as, caddr_t addr, size_t size, int func, int attr,
295 289 uintptr_t arg, ulong_t *lock_map, size_t pos);
296 290 int as_pagelock(struct as *as, struct page ***ppp, caddr_t addr,
297 291 size_t size, enum seg_rw rw);
298 292 void as_pageunlock(struct as *as, struct page **pp, caddr_t addr,
299 293 size_t size, enum seg_rw rw);
300 294 int as_setpagesize(struct as *as, caddr_t addr, size_t size, uint_t szc,
301 295 boolean_t wait);
302 296 int as_set_default_lpsize(struct as *as, caddr_t addr, size_t size);
303 297 void as_setwatch(struct as *as);
304 298 void as_clearwatch(struct as *as);
305 299 int as_getmemid(struct as *, caddr_t, memid_t *);
306 300
307 301 int as_add_callback(struct as *, void (*)(), void *, uint_t,
308 302 caddr_t, size_t, int);
309 303 uint_t as_delete_callback(struct as *, void *);
310 304
311 305 #endif /* _KERNEL */
312 306
313 307 #ifdef __cplusplus
314 308 }
315 309 #endif
316 310
317 311 #endif /* _VM_AS_H */
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX