Print this page
6583 remove whole-process swapping
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/sys/disp.h
+++ new/usr/src/uts/common/sys/disp.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 *
25 25 * Copyright 2013 Nexenta Systems, Inc. All rights reserved.
26 26 */
27 27
28 28 /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */
29 29 /* All Rights Reserved */
30 30
31 31
32 32 #ifndef _SYS_DISP_H
33 33 #define _SYS_DISP_H
34 34
35 35 #include <sys/priocntl.h>
36 36 #include <sys/thread.h>
37 37 #include <sys/class.h>
38 38
39 39 #ifdef __cplusplus
40 40 extern "C" {
41 41 #endif
42 42
43 43 /*
44 44 * The following is the format of a dispatcher queue entry.
45 45 */
46 46 typedef struct dispq {
47 47 kthread_t *dq_first; /* first thread on queue or NULL */
48 48 kthread_t *dq_last; /* last thread on queue or NULL */
49 49 int dq_sruncnt; /* number of loaded, runnable */
50 50 /* threads on queue */
51 51 } dispq_t;
52 52
53 53 /*
54 54 * Dispatch queue structure.
55 55 */
56 56 typedef struct _disp {
57 57 disp_lock_t disp_lock; /* protects dispatching fields */
58 58 pri_t disp_npri; /* # of priority levels in queue */
59 59 dispq_t *disp_q; /* the dispatch queue */
60 60 dispq_t *disp_q_limit; /* ptr past end of dispatch queue */
61 61 ulong_t *disp_qactmap; /* bitmap of active dispatch queues */
62 62
63 63 /*
64 64 * Priorities:
65 65 * disp_maxrunpri is the maximum run priority of runnable threads
66 66 * on this queue. It is -1 if nothing is runnable.
67 67 *
68 68 * disp_max_unbound_pri is the maximum run priority of threads on
69 69 * this dispatch queue but runnable by any CPU. This may be left
70 70 * artificially high, then corrected when some CPU tries to take
71 71 * an unbound thread. It is -1 if nothing is runnable.
72 72 */
73 73 pri_t disp_maxrunpri; /* maximum run priority */
74 74 pri_t disp_max_unbound_pri; /* max pri of unbound threads */
75 75
76 76 volatile int disp_nrunnable; /* runnable threads in cpu dispq */
77 77
78 78 struct cpu *disp_cpu; /* cpu owning this queue or NULL */
79 79 hrtime_t disp_steal; /* time when threads become stealable */
80 80 } disp_t;
81 81
82 82 #if defined(_KERNEL) || defined(_FAKE_KERNEL)
83 83
84 84 #define MAXCLSYSPRI 99
85 85 #define MINCLSYSPRI 60
86 86
87 87
88 88 /*
89 89 * Global scheduling variables.
90 90 * - See sys/cpuvar.h for CPU-local variables.
91 91 */
92 92 extern int nswapped; /* number of swapped threads */
93 93 /* nswapped protected by swap_lock */
94 94
95 95 extern pri_t minclsyspri; /* minimum level of any system class */
96 96 extern pri_t maxclsyspri; /* maximum level of any system class */
97 97 extern pri_t intr_pri; /* interrupt thread priority base level */
98 98
99 99 #endif /* _KERNEL || _FAKE_KERNEL */
100 100 #if defined(_KERNEL)
101 101
102 102 /*
103 103 * Minimum amount of time that a thread can remain runnable before it can
104 104 * be stolen by another CPU (in nanoseconds).
105 105 */
106 106 extern hrtime_t nosteal_nsec;
107 107
108 108 /*
109 109 * Kernel preemption occurs if a higher-priority thread is runnable with
110 110 * a priority at or above kpreemptpri.
111 111 *
112 112 * So that other processors can watch for such threads, a separate
113 113 * dispatch queue with unbound work above kpreemptpri is maintained.
114 114 * This is part of the CPU partition structure (cpupart_t).
115 115 */
116 116 extern pri_t kpreemptpri; /* level above which preemption takes place */
117 117
118 118 extern void disp_kp_alloc(disp_t *, pri_t); /* allocate kp queue */
119 119 extern void disp_kp_free(disp_t *); /* free kp queue */
120 120
121 121 /*
122 122 * Macro for use by scheduling classes to decide whether the thread is about
123 123 * to be scheduled or not. This returns the maximum run priority.
124 124 */
125 125 #define DISP_MAXRUNPRI(t) ((t)->t_disp_queue->disp_maxrunpri)
126 126
127 127 /*
128 128 * Platform callbacks for various dispatcher operations
129 129 *
130 130 * idle_cpu() is invoked when a cpu goes idle, and has nothing to do.
131 131 * disp_enq_thread() is invoked when a thread is placed on a run queue.
132 132 */
133 133 extern void (*idle_cpu)();
134 134 extern void (*disp_enq_thread)(struct cpu *, int);
135 135
136 136
137 137 extern int dispdeq(kthread_t *);
138 138 extern void dispinit(void);
↓ open down ↓ |
138 lines elided |
↑ open up ↑ |
139 139 extern void disp_add(sclass_t *);
140 140 extern int intr_active(struct cpu *, int);
141 141 extern int servicing_interrupt(void);
142 142 extern void preempt(void);
143 143 extern void setbackdq(kthread_t *);
144 144 extern void setfrontdq(kthread_t *);
145 145 extern void swtch(void);
146 146 extern void swtch_to(kthread_t *);
147 147 extern void swtch_from_zombie(void)
148 148 __NORETURN;
149 -extern void dq_sruninc(kthread_t *);
150 -extern void dq_srundec(kthread_t *);
151 149 extern void cpu_rechoose(kthread_t *);
152 150 extern void cpu_surrender(kthread_t *);
153 151 extern void kpreempt(int);
154 152 extern struct cpu *disp_lowpri_cpu(struct cpu *, struct lgrp_ld *, pri_t,
155 153 struct cpu *);
156 154 extern int disp_bound_threads(struct cpu *, int);
157 155 extern int disp_bound_anythreads(struct cpu *, int);
158 156 extern int disp_bound_partition(struct cpu *, int);
159 157 extern void disp_cpu_init(struct cpu *);
160 158 extern void disp_cpu_fini(struct cpu *);
161 159 extern void disp_cpu_inactive(struct cpu *);
162 160 extern void disp_adjust_unbound_pri(kthread_t *);
163 161 extern void resume(kthread_t *);
164 162 extern void resume_from_intr(kthread_t *);
165 163 extern void resume_from_zombie(kthread_t *)
166 164 __NORETURN;
167 165 extern void disp_swapped_enq(kthread_t *);
168 166 extern int disp_anywork(void);
169 167
170 168 #define KPREEMPT_SYNC (-1)
171 169 #define kpreempt_disable() \
172 170 { \
173 171 curthread->t_preempt++; \
174 172 ASSERT(curthread->t_preempt >= 1); \
175 173 }
176 174 #define kpreempt_enable() \
177 175 { \
178 176 ASSERT(curthread->t_preempt >= 1); \
179 177 if (--curthread->t_preempt == 0 && \
180 178 CPU->cpu_kprunrun) \
181 179 kpreempt(KPREEMPT_SYNC); \
182 180 }
183 181
184 182 #endif /* _KERNEL */
185 183
186 184 #ifdef __cplusplus
187 185 }
188 186 #endif
189 187
190 188 #endif /* _SYS_DISP_H */
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX