Print this page
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory. The code is there and in theory it runs when we get *extremely* low
on memory. In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/sys/disp.h
+++ new/usr/src/uts/common/sys/disp.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
27 27 /* All Rights Reserved */
28 28
29 29
30 30 #ifndef _SYS_DISP_H
31 31 #define _SYS_DISP_H
32 32
33 -#pragma ident "%Z%%M% %I% %E% SMI" /* SVr4.0 1.11 */
34 -
35 33 #include <sys/priocntl.h>
36 34 #include <sys/thread.h>
37 35 #include <sys/class.h>
38 36
39 37 #ifdef __cplusplus
40 38 extern "C" {
41 39 #endif
42 40
43 41 /*
44 42 * The following is the format of a dispatcher queue entry.
45 43 */
46 44 typedef struct dispq {
47 45 kthread_t *dq_first; /* first thread on queue or NULL */
48 46 kthread_t *dq_last; /* last thread on queue or NULL */
49 47 int dq_sruncnt; /* number of loaded, runnable */
50 48 /* threads on queue */
51 49 } dispq_t;
52 50
53 51 /*
54 52 * Dispatch queue structure.
55 53 */
56 54 typedef struct _disp {
57 55 disp_lock_t disp_lock; /* protects dispatching fields */
58 56 pri_t disp_npri; /* # of priority levels in queue */
59 57 dispq_t *disp_q; /* the dispatch queue */
60 58 dispq_t *disp_q_limit; /* ptr past end of dispatch queue */
61 59 ulong_t *disp_qactmap; /* bitmap of active dispatch queues */
62 60
63 61 /*
64 62 * Priorities:
65 63 * disp_maxrunpri is the maximum run priority of runnable threads
66 64 * on this queue. It is -1 if nothing is runnable.
67 65 *
68 66 * disp_max_unbound_pri is the maximum run priority of threads on
69 67 * this dispatch queue but runnable by any CPU. This may be left
70 68 * artificially high, then corrected when some CPU tries to take
71 69 * an unbound thread. It is -1 if nothing is runnable.
72 70 */
73 71 pri_t disp_maxrunpri; /* maximum run priority */
74 72 pri_t disp_max_unbound_pri; /* max pri of unbound threads */
75 73
76 74 volatile int disp_nrunnable; /* runnable threads in cpu dispq */
77 75
78 76 struct cpu *disp_cpu; /* cpu owning this queue or NULL */
79 77 hrtime_t disp_steal; /* time when threads become stealable */
80 78 } disp_t;
81 79
82 80 #if defined(_KERNEL)
83 81
84 82 #define MAXCLSYSPRI 99
85 83 #define MINCLSYSPRI 60
86 84
87 85
88 86 /*
89 87 * Global scheduling variables.
90 88 * - See sys/cpuvar.h for CPU-local variables.
91 89 */
92 90 extern int nswapped; /* number of swapped threads */
93 91 /* nswapped protected by swap_lock */
94 92
95 93 extern pri_t minclsyspri; /* minimum level of any system class */
96 94 extern pri_t maxclsyspri; /* maximum level of any system class */
97 95 extern pri_t intr_pri; /* interrupt thread priority base level */
98 96
99 97 /*
100 98 * Minimum amount of time that a thread can remain runnable before it can
101 99 * be stolen by another CPU (in nanoseconds).
102 100 */
103 101 extern hrtime_t nosteal_nsec;
104 102
105 103 /*
106 104 * Kernel preemption occurs if a higher-priority thread is runnable with
107 105 * a priority at or above kpreemptpri.
108 106 *
109 107 * So that other processors can watch for such threads, a separate
110 108 * dispatch queue with unbound work above kpreemptpri is maintained.
111 109 * This is part of the CPU partition structure (cpupart_t).
112 110 */
113 111 extern pri_t kpreemptpri; /* level above which preemption takes place */
114 112
115 113 extern void disp_kp_alloc(disp_t *, pri_t); /* allocate kp queue */
116 114 extern void disp_kp_free(disp_t *); /* free kp queue */
117 115
118 116 /*
119 117 * Macro for use by scheduling classes to decide whether the thread is about
120 118 * to be scheduled or not. This returns the maximum run priority.
121 119 */
122 120 #define DISP_MAXRUNPRI(t) ((t)->t_disp_queue->disp_maxrunpri)
123 121
124 122 /*
125 123 * Platform callbacks for various dispatcher operations
126 124 *
127 125 * idle_cpu() is invoked when a cpu goes idle, and has nothing to do.
128 126 * disp_enq_thread() is invoked when a thread is placed on a run queue.
129 127 */
130 128 extern void (*idle_cpu)();
131 129 extern void (*disp_enq_thread)(struct cpu *, int);
132 130
133 131
134 132 extern int dispdeq(kthread_t *);
135 133 extern void dispinit(void);
↓ open down ↓ |
91 lines elided |
↑ open up ↑ |
136 134 extern void disp_add(sclass_t *);
137 135 extern int intr_active(struct cpu *, int);
138 136 extern int servicing_interrupt(void);
139 137 extern void preempt(void);
140 138 extern void setbackdq(kthread_t *);
141 139 extern void setfrontdq(kthread_t *);
142 140 extern void swtch(void);
143 141 extern void swtch_to(kthread_t *);
144 142 extern void swtch_from_zombie(void)
145 143 __NORETURN;
146 -extern void dq_sruninc(kthread_t *);
147 -extern void dq_srundec(kthread_t *);
148 144 extern void cpu_rechoose(kthread_t *);
149 145 extern void cpu_surrender(kthread_t *);
150 146 extern void kpreempt(int);
151 147 extern struct cpu *disp_lowpri_cpu(struct cpu *, struct lgrp_ld *, pri_t,
152 148 struct cpu *);
153 149 extern int disp_bound_threads(struct cpu *, int);
154 150 extern int disp_bound_anythreads(struct cpu *, int);
155 151 extern int disp_bound_partition(struct cpu *, int);
156 152 extern void disp_cpu_init(struct cpu *);
157 153 extern void disp_cpu_fini(struct cpu *);
158 154 extern void disp_cpu_inactive(struct cpu *);
159 155 extern void disp_adjust_unbound_pri(kthread_t *);
160 156 extern void resume(kthread_t *);
161 157 extern void resume_from_intr(kthread_t *);
162 158 extern void resume_from_zombie(kthread_t *)
163 159 __NORETURN;
164 160 extern void disp_swapped_enq(kthread_t *);
165 161 extern int disp_anywork(void);
166 162
167 163 #define KPREEMPT_SYNC (-1)
168 164 #define kpreempt_disable() \
169 165 { \
170 166 curthread->t_preempt++; \
171 167 ASSERT(curthread->t_preempt >= 1); \
172 168 }
173 169 #define kpreempt_enable() \
174 170 { \
175 171 ASSERT(curthread->t_preempt >= 1); \
176 172 if (--curthread->t_preempt == 0 && \
177 173 CPU->cpu_kprunrun) \
178 174 kpreempt(KPREEMPT_SYNC); \
179 175 }
180 176
181 177 #endif /* _KERNEL */
182 178
183 179 #ifdef __cplusplus
184 180 }
185 181 #endif
186 182
187 183 #endif /* _SYS_DISP_H */
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX