Print this page
remove whole-process swapping
Long before Unix supported paging, it used process swapping to reclaim
memory. The code is there and in theory it runs when we get *extremely* low
on memory. In practice, it never runs since the definition of low-on-memory
is antiquated. (XXX: define what antiquated means)
You can check the number of swapout/swapin events with kstats:
$ kstat -p ::vm:swapin ::vm:swapout
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/cpu.c
+++ new/usr/src/uts/common/os/cpu.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Architecture-independent CPU control functions.
28 28 */
29 29
30 30 #include <sys/types.h>
31 31 #include <sys/param.h>
32 32 #include <sys/var.h>
33 33 #include <sys/thread.h>
34 34 #include <sys/cpuvar.h>
35 35 #include <sys/cpu_event.h>
36 36 #include <sys/kstat.h>
37 37 #include <sys/uadmin.h>
38 38 #include <sys/systm.h>
39 39 #include <sys/errno.h>
40 40 #include <sys/cmn_err.h>
41 41 #include <sys/procset.h>
42 42 #include <sys/processor.h>
43 43 #include <sys/debug.h>
44 44 #include <sys/cpupart.h>
45 45 #include <sys/lgrp.h>
46 46 #include <sys/pset.h>
47 47 #include <sys/pghw.h>
48 48 #include <sys/kmem.h>
49 49 #include <sys/kmem_impl.h> /* to set per-cpu kmem_cache offset */
50 50 #include <sys/atomic.h>
51 51 #include <sys/callb.h>
52 52 #include <sys/vtrace.h>
53 53 #include <sys/cyclic.h>
54 54 #include <sys/bitmap.h>
55 55 #include <sys/nvpair.h>
56 56 #include <sys/pool_pset.h>
57 57 #include <sys/msacct.h>
58 58 #include <sys/time.h>
59 59 #include <sys/archsystm.h>
60 60 #include <sys/sdt.h>
61 61 #if defined(__x86) || defined(__amd64)
62 62 #include <sys/x86_archext.h>
63 63 #endif
64 64 #include <sys/callo.h>
65 65
66 66 extern int mp_cpu_start(cpu_t *);
67 67 extern int mp_cpu_stop(cpu_t *);
68 68 extern int mp_cpu_poweron(cpu_t *);
69 69 extern int mp_cpu_poweroff(cpu_t *);
70 70 extern int mp_cpu_configure(int);
71 71 extern int mp_cpu_unconfigure(int);
72 72 extern void mp_cpu_faulted_enter(cpu_t *);
73 73 extern void mp_cpu_faulted_exit(cpu_t *);
74 74
75 75 extern int cmp_cpu_to_chip(processorid_t cpuid);
76 76 #ifdef __sparcv9
77 77 extern char *cpu_fru_fmri(cpu_t *cp);
78 78 #endif
79 79
80 80 static void cpu_add_active_internal(cpu_t *cp);
81 81 static void cpu_remove_active(cpu_t *cp);
82 82 static void cpu_info_kstat_create(cpu_t *cp);
83 83 static void cpu_info_kstat_destroy(cpu_t *cp);
84 84 static void cpu_stats_kstat_create(cpu_t *cp);
85 85 static void cpu_stats_kstat_destroy(cpu_t *cp);
86 86
87 87 static int cpu_sys_stats_ks_update(kstat_t *ksp, int rw);
88 88 static int cpu_vm_stats_ks_update(kstat_t *ksp, int rw);
89 89 static int cpu_stat_ks_update(kstat_t *ksp, int rw);
90 90 static int cpu_state_change_hooks(int, cpu_setup_t, cpu_setup_t);
91 91
92 92 /*
93 93 * cpu_lock protects ncpus, ncpus_online, cpu_flag, cpu_list, cpu_active,
94 94 * max_cpu_seqid_ever, and dispatch queue reallocations. The lock ordering with
95 95 * respect to related locks is:
96 96 *
97 97 * cpu_lock --> thread_free_lock ---> p_lock ---> thread_lock()
98 98 *
99 99 * Warning: Certain sections of code do not use the cpu_lock when
100 100 * traversing the cpu_list (e.g. mutex_vector_enter(), clock()). Since
101 101 * all cpus are paused during modifications to this list, a solution
102 102 * to protect the list is too either disable kernel preemption while
103 103 * walking the list, *or* recheck the cpu_next pointer at each
104 104 * iteration in the loop. Note that in no cases can any cached
105 105 * copies of the cpu pointers be kept as they may become invalid.
106 106 */
107 107 kmutex_t cpu_lock;
108 108 cpu_t *cpu_list; /* list of all CPUs */
109 109 cpu_t *clock_cpu_list; /* used by clock to walk CPUs */
110 110 cpu_t *cpu_active; /* list of active CPUs */
111 111 static cpuset_t cpu_available; /* set of available CPUs */
112 112 cpuset_t cpu_seqid_inuse; /* which cpu_seqids are in use */
113 113
114 114 cpu_t **cpu_seq; /* ptrs to CPUs, indexed by seq_id */
115 115
116 116 /*
117 117 * max_ncpus keeps the max cpus the system can have. Initially
118 118 * it's NCPU, but since most archs scan the devtree for cpus
119 119 * fairly early on during boot, the real max can be known before
120 120 * ncpus is set (useful for early NCPU based allocations).
121 121 */
122 122 int max_ncpus = NCPU;
123 123 /*
124 124 * platforms that set max_ncpus to maxiumum number of cpus that can be
125 125 * dynamically added will set boot_max_ncpus to the number of cpus found
126 126 * at device tree scan time during boot.
127 127 */
128 128 int boot_max_ncpus = -1;
129 129 int boot_ncpus = -1;
130 130 /*
131 131 * Maximum possible CPU id. This can never be >= NCPU since NCPU is
132 132 * used to size arrays that are indexed by CPU id.
133 133 */
134 134 processorid_t max_cpuid = NCPU - 1;
135 135
136 136 /*
137 137 * Maximum cpu_seqid was given. This number can only grow and never shrink. It
138 138 * can be used to optimize NCPU loops to avoid going through CPUs which were
139 139 * never on-line.
140 140 */
141 141 processorid_t max_cpu_seqid_ever = 0;
142 142
143 143 int ncpus = 1;
144 144 int ncpus_online = 1;
145 145
146 146 /*
147 147 * CPU that we're trying to offline. Protected by cpu_lock.
148 148 */
149 149 cpu_t *cpu_inmotion;
150 150
151 151 /*
152 152 * Can be raised to suppress further weakbinding, which are instead
153 153 * satisfied by disabling preemption. Must be raised/lowered under cpu_lock,
154 154 * while individual thread weakbinding synchronization is done under thread
155 155 * lock.
156 156 */
157 157 int weakbindingbarrier;
158 158
159 159 /*
160 160 * Variables used in pause_cpus().
161 161 */
162 162 static volatile char safe_list[NCPU];
163 163
164 164 static struct _cpu_pause_info {
165 165 int cp_spl; /* spl saved in pause_cpus() */
166 166 volatile int cp_go; /* Go signal sent after all ready */
167 167 int cp_count; /* # of CPUs to pause */
168 168 ksema_t cp_sem; /* synch pause_cpus & cpu_pause */
169 169 kthread_id_t cp_paused;
170 170 void *(*cp_func)(void *);
171 171 } cpu_pause_info;
172 172
173 173 static kmutex_t pause_free_mutex;
174 174 static kcondvar_t pause_free_cv;
175 175
176 176
177 177 static struct cpu_sys_stats_ks_data {
178 178 kstat_named_t cpu_ticks_idle;
179 179 kstat_named_t cpu_ticks_user;
180 180 kstat_named_t cpu_ticks_kernel;
181 181 kstat_named_t cpu_ticks_wait;
182 182 kstat_named_t cpu_nsec_idle;
183 183 kstat_named_t cpu_nsec_user;
184 184 kstat_named_t cpu_nsec_kernel;
185 185 kstat_named_t cpu_nsec_dtrace;
186 186 kstat_named_t cpu_nsec_intr;
187 187 kstat_named_t cpu_load_intr;
188 188 kstat_named_t wait_ticks_io;
189 189 kstat_named_t dtrace_probes;
190 190 kstat_named_t bread;
191 191 kstat_named_t bwrite;
192 192 kstat_named_t lread;
193 193 kstat_named_t lwrite;
194 194 kstat_named_t phread;
195 195 kstat_named_t phwrite;
196 196 kstat_named_t pswitch;
197 197 kstat_named_t trap;
198 198 kstat_named_t intr;
199 199 kstat_named_t syscall;
200 200 kstat_named_t sysread;
201 201 kstat_named_t syswrite;
202 202 kstat_named_t sysfork;
203 203 kstat_named_t sysvfork;
204 204 kstat_named_t sysexec;
205 205 kstat_named_t readch;
206 206 kstat_named_t writech;
207 207 kstat_named_t rcvint;
208 208 kstat_named_t xmtint;
209 209 kstat_named_t mdmint;
210 210 kstat_named_t rawch;
211 211 kstat_named_t canch;
212 212 kstat_named_t outch;
213 213 kstat_named_t msg;
214 214 kstat_named_t sema;
215 215 kstat_named_t namei;
216 216 kstat_named_t ufsiget;
217 217 kstat_named_t ufsdirblk;
218 218 kstat_named_t ufsipage;
219 219 kstat_named_t ufsinopage;
220 220 kstat_named_t procovf;
221 221 kstat_named_t intrthread;
222 222 kstat_named_t intrblk;
223 223 kstat_named_t intrunpin;
224 224 kstat_named_t idlethread;
225 225 kstat_named_t inv_swtch;
226 226 kstat_named_t nthreads;
227 227 kstat_named_t cpumigrate;
228 228 kstat_named_t xcalls;
229 229 kstat_named_t mutex_adenters;
230 230 kstat_named_t rw_rdfails;
231 231 kstat_named_t rw_wrfails;
232 232 kstat_named_t modload;
233 233 kstat_named_t modunload;
234 234 kstat_named_t bawrite;
235 235 kstat_named_t iowait;
236 236 } cpu_sys_stats_ks_data_template = {
237 237 { "cpu_ticks_idle", KSTAT_DATA_UINT64 },
238 238 { "cpu_ticks_user", KSTAT_DATA_UINT64 },
239 239 { "cpu_ticks_kernel", KSTAT_DATA_UINT64 },
240 240 { "cpu_ticks_wait", KSTAT_DATA_UINT64 },
241 241 { "cpu_nsec_idle", KSTAT_DATA_UINT64 },
242 242 { "cpu_nsec_user", KSTAT_DATA_UINT64 },
243 243 { "cpu_nsec_kernel", KSTAT_DATA_UINT64 },
244 244 { "cpu_nsec_dtrace", KSTAT_DATA_UINT64 },
245 245 { "cpu_nsec_intr", KSTAT_DATA_UINT64 },
246 246 { "cpu_load_intr", KSTAT_DATA_UINT64 },
247 247 { "wait_ticks_io", KSTAT_DATA_UINT64 },
248 248 { "dtrace_probes", KSTAT_DATA_UINT64 },
249 249 { "bread", KSTAT_DATA_UINT64 },
250 250 { "bwrite", KSTAT_DATA_UINT64 },
251 251 { "lread", KSTAT_DATA_UINT64 },
252 252 { "lwrite", KSTAT_DATA_UINT64 },
253 253 { "phread", KSTAT_DATA_UINT64 },
254 254 { "phwrite", KSTAT_DATA_UINT64 },
255 255 { "pswitch", KSTAT_DATA_UINT64 },
256 256 { "trap", KSTAT_DATA_UINT64 },
257 257 { "intr", KSTAT_DATA_UINT64 },
258 258 { "syscall", KSTAT_DATA_UINT64 },
259 259 { "sysread", KSTAT_DATA_UINT64 },
260 260 { "syswrite", KSTAT_DATA_UINT64 },
261 261 { "sysfork", KSTAT_DATA_UINT64 },
262 262 { "sysvfork", KSTAT_DATA_UINT64 },
263 263 { "sysexec", KSTAT_DATA_UINT64 },
264 264 { "readch", KSTAT_DATA_UINT64 },
265 265 { "writech", KSTAT_DATA_UINT64 },
266 266 { "rcvint", KSTAT_DATA_UINT64 },
267 267 { "xmtint", KSTAT_DATA_UINT64 },
268 268 { "mdmint", KSTAT_DATA_UINT64 },
269 269 { "rawch", KSTAT_DATA_UINT64 },
270 270 { "canch", KSTAT_DATA_UINT64 },
271 271 { "outch", KSTAT_DATA_UINT64 },
272 272 { "msg", KSTAT_DATA_UINT64 },
273 273 { "sema", KSTAT_DATA_UINT64 },
274 274 { "namei", KSTAT_DATA_UINT64 },
275 275 { "ufsiget", KSTAT_DATA_UINT64 },
276 276 { "ufsdirblk", KSTAT_DATA_UINT64 },
277 277 { "ufsipage", KSTAT_DATA_UINT64 },
278 278 { "ufsinopage", KSTAT_DATA_UINT64 },
279 279 { "procovf", KSTAT_DATA_UINT64 },
280 280 { "intrthread", KSTAT_DATA_UINT64 },
281 281 { "intrblk", KSTAT_DATA_UINT64 },
282 282 { "intrunpin", KSTAT_DATA_UINT64 },
283 283 { "idlethread", KSTAT_DATA_UINT64 },
284 284 { "inv_swtch", KSTAT_DATA_UINT64 },
285 285 { "nthreads", KSTAT_DATA_UINT64 },
286 286 { "cpumigrate", KSTAT_DATA_UINT64 },
287 287 { "xcalls", KSTAT_DATA_UINT64 },
288 288 { "mutex_adenters", KSTAT_DATA_UINT64 },
289 289 { "rw_rdfails", KSTAT_DATA_UINT64 },
290 290 { "rw_wrfails", KSTAT_DATA_UINT64 },
291 291 { "modload", KSTAT_DATA_UINT64 },
292 292 { "modunload", KSTAT_DATA_UINT64 },
293 293 { "bawrite", KSTAT_DATA_UINT64 },
↓ open down ↓ |
293 lines elided |
↑ open up ↑ |
294 294 { "iowait", KSTAT_DATA_UINT64 },
295 295 };
296 296
297 297 static struct cpu_vm_stats_ks_data {
298 298 kstat_named_t pgrec;
299 299 kstat_named_t pgfrec;
300 300 kstat_named_t pgin;
301 301 kstat_named_t pgpgin;
302 302 kstat_named_t pgout;
303 303 kstat_named_t pgpgout;
304 - kstat_named_t swapin;
305 - kstat_named_t pgswapin;
306 - kstat_named_t swapout;
307 - kstat_named_t pgswapout;
308 304 kstat_named_t zfod;
309 305 kstat_named_t dfree;
310 306 kstat_named_t scan;
311 307 kstat_named_t rev;
312 308 kstat_named_t hat_fault;
313 309 kstat_named_t as_fault;
314 310 kstat_named_t maj_fault;
315 311 kstat_named_t cow_fault;
316 312 kstat_named_t prot_fault;
317 313 kstat_named_t softlock;
318 314 kstat_named_t kernel_asflt;
319 315 kstat_named_t pgrrun;
320 316 kstat_named_t execpgin;
321 317 kstat_named_t execpgout;
322 318 kstat_named_t execfree;
323 319 kstat_named_t anonpgin;
324 320 kstat_named_t anonpgout;
325 321 kstat_named_t anonfree;
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
326 322 kstat_named_t fspgin;
327 323 kstat_named_t fspgout;
328 324 kstat_named_t fsfree;
329 325 } cpu_vm_stats_ks_data_template = {
330 326 { "pgrec", KSTAT_DATA_UINT64 },
331 327 { "pgfrec", KSTAT_DATA_UINT64 },
332 328 { "pgin", KSTAT_DATA_UINT64 },
333 329 { "pgpgin", KSTAT_DATA_UINT64 },
334 330 { "pgout", KSTAT_DATA_UINT64 },
335 331 { "pgpgout", KSTAT_DATA_UINT64 },
336 - { "swapin", KSTAT_DATA_UINT64 },
337 - { "pgswapin", KSTAT_DATA_UINT64 },
338 - { "swapout", KSTAT_DATA_UINT64 },
339 - { "pgswapout", KSTAT_DATA_UINT64 },
340 332 { "zfod", KSTAT_DATA_UINT64 },
341 333 { "dfree", KSTAT_DATA_UINT64 },
342 334 { "scan", KSTAT_DATA_UINT64 },
343 335 { "rev", KSTAT_DATA_UINT64 },
344 336 { "hat_fault", KSTAT_DATA_UINT64 },
345 337 { "as_fault", KSTAT_DATA_UINT64 },
346 338 { "maj_fault", KSTAT_DATA_UINT64 },
347 339 { "cow_fault", KSTAT_DATA_UINT64 },
348 340 { "prot_fault", KSTAT_DATA_UINT64 },
349 341 { "softlock", KSTAT_DATA_UINT64 },
350 342 { "kernel_asflt", KSTAT_DATA_UINT64 },
351 343 { "pgrrun", KSTAT_DATA_UINT64 },
352 344 { "execpgin", KSTAT_DATA_UINT64 },
353 345 { "execpgout", KSTAT_DATA_UINT64 },
354 346 { "execfree", KSTAT_DATA_UINT64 },
355 347 { "anonpgin", KSTAT_DATA_UINT64 },
356 348 { "anonpgout", KSTAT_DATA_UINT64 },
357 349 { "anonfree", KSTAT_DATA_UINT64 },
358 350 { "fspgin", KSTAT_DATA_UINT64 },
359 351 { "fspgout", KSTAT_DATA_UINT64 },
360 352 { "fsfree", KSTAT_DATA_UINT64 },
361 353 };
362 354
363 355 /*
364 356 * Force the specified thread to migrate to the appropriate processor.
365 357 * Called with thread lock held, returns with it dropped.
366 358 */
367 359 static void
368 360 force_thread_migrate(kthread_id_t tp)
369 361 {
370 362 ASSERT(THREAD_LOCK_HELD(tp));
371 363 if (tp == curthread) {
372 364 THREAD_TRANSITION(tp);
373 365 CL_SETRUN(tp);
374 366 thread_unlock_nopreempt(tp);
375 367 swtch();
376 368 } else {
377 369 if (tp->t_state == TS_ONPROC) {
378 370 cpu_surrender(tp);
379 371 } else if (tp->t_state == TS_RUN) {
380 372 (void) dispdeq(tp);
381 373 setbackdq(tp);
382 374 }
383 375 thread_unlock(tp);
384 376 }
385 377 }
386 378
387 379 /*
388 380 * Set affinity for a specified CPU.
389 381 * A reference count is incremented and the affinity is held until the
390 382 * reference count is decremented to zero by thread_affinity_clear().
391 383 * This is so regions of code requiring affinity can be nested.
392 384 * Caller needs to ensure that cpu_id remains valid, which can be
393 385 * done by holding cpu_lock across this call, unless the caller
394 386 * specifies CPU_CURRENT in which case the cpu_lock will be acquired
395 387 * by thread_affinity_set and CPU->cpu_id will be the target CPU.
396 388 */
397 389 void
398 390 thread_affinity_set(kthread_id_t t, int cpu_id)
399 391 {
400 392 cpu_t *cp;
401 393 int c;
402 394
403 395 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
404 396
405 397 if ((c = cpu_id) == CPU_CURRENT) {
406 398 mutex_enter(&cpu_lock);
407 399 cpu_id = CPU->cpu_id;
408 400 }
409 401 /*
410 402 * We should be asserting that cpu_lock is held here, but
411 403 * the NCA code doesn't acquire it. The following assert
412 404 * should be uncommented when the NCA code is fixed.
413 405 *
414 406 * ASSERT(MUTEX_HELD(&cpu_lock));
415 407 */
416 408 ASSERT((cpu_id >= 0) && (cpu_id < NCPU));
417 409 cp = cpu[cpu_id];
418 410 ASSERT(cp != NULL); /* user must provide a good cpu_id */
419 411 /*
420 412 * If there is already a hard affinity requested, and this affinity
421 413 * conflicts with that, panic.
422 414 */
423 415 thread_lock(t);
424 416 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
425 417 panic("affinity_set: setting %p but already bound to %p",
426 418 (void *)cp, (void *)t->t_bound_cpu);
427 419 }
428 420 t->t_affinitycnt++;
429 421 t->t_bound_cpu = cp;
430 422
431 423 /*
432 424 * Make sure we're running on the right CPU.
433 425 */
434 426 if (cp != t->t_cpu || t != curthread) {
435 427 force_thread_migrate(t); /* drops thread lock */
436 428 } else {
437 429 thread_unlock(t);
438 430 }
439 431
440 432 if (c == CPU_CURRENT)
441 433 mutex_exit(&cpu_lock);
442 434 }
443 435
444 436 /*
445 437 * Wrapper for backward compatibility.
446 438 */
447 439 void
448 440 affinity_set(int cpu_id)
449 441 {
450 442 thread_affinity_set(curthread, cpu_id);
451 443 }
452 444
453 445 /*
454 446 * Decrement the affinity reservation count and if it becomes zero,
455 447 * clear the CPU affinity for the current thread, or set it to the user's
456 448 * software binding request.
457 449 */
458 450 void
459 451 thread_affinity_clear(kthread_id_t t)
460 452 {
461 453 register processorid_t binding;
462 454
463 455 thread_lock(t);
464 456 if (--t->t_affinitycnt == 0) {
465 457 if ((binding = t->t_bind_cpu) == PBIND_NONE) {
466 458 /*
467 459 * Adjust disp_max_unbound_pri if necessary.
468 460 */
469 461 disp_adjust_unbound_pri(t);
470 462 t->t_bound_cpu = NULL;
471 463 if (t->t_cpu->cpu_part != t->t_cpupart) {
472 464 force_thread_migrate(t);
473 465 return;
474 466 }
475 467 } else {
476 468 t->t_bound_cpu = cpu[binding];
477 469 /*
478 470 * Make sure the thread is running on the bound CPU.
479 471 */
480 472 if (t->t_cpu != t->t_bound_cpu) {
481 473 force_thread_migrate(t);
482 474 return; /* already dropped lock */
483 475 }
484 476 }
485 477 }
486 478 thread_unlock(t);
487 479 }
488 480
489 481 /*
490 482 * Wrapper for backward compatibility.
491 483 */
492 484 void
493 485 affinity_clear(void)
494 486 {
495 487 thread_affinity_clear(curthread);
496 488 }
497 489
498 490 /*
499 491 * Weak cpu affinity. Bind to the "current" cpu for short periods
500 492 * of time during which the thread must not block (but may be preempted).
501 493 * Use this instead of kpreempt_disable() when it is only "no migration"
502 494 * rather than "no preemption" semantics that are required - disabling
503 495 * preemption holds higher priority threads off of cpu and if the
504 496 * operation that is protected is more than momentary this is not good
505 497 * for realtime etc.
506 498 *
507 499 * Weakly bound threads will not prevent a cpu from being offlined -
508 500 * we'll only run them on the cpu to which they are weakly bound but
509 501 * (because they do not block) we'll always be able to move them on to
510 502 * another cpu at offline time if we give them just a short moment to
511 503 * run during which they will unbind. To give a cpu a chance of offlining,
512 504 * however, we require a barrier to weak bindings that may be raised for a
513 505 * given cpu (offline/move code may set this and then wait a short time for
514 506 * existing weak bindings to drop); the cpu_inmotion pointer is that barrier.
515 507 *
516 508 * There are few restrictions on the calling context of thread_nomigrate.
517 509 * The caller must not hold the thread lock. Calls may be nested.
518 510 *
519 511 * After weakbinding a thread must not perform actions that may block.
520 512 * In particular it must not call thread_affinity_set; calling that when
521 513 * already weakbound is nonsensical anyway.
522 514 *
523 515 * If curthread is prevented from migrating for other reasons
524 516 * (kernel preemption disabled; high pil; strongly bound; interrupt thread)
525 517 * then the weak binding will succeed even if this cpu is the target of an
526 518 * offline/move request.
527 519 */
528 520 void
529 521 thread_nomigrate(void)
530 522 {
531 523 cpu_t *cp;
532 524 kthread_id_t t = curthread;
533 525
534 526 again:
535 527 kpreempt_disable();
536 528 cp = CPU;
537 529
538 530 /*
539 531 * A highlevel interrupt must not modify t_nomigrate or
540 532 * t_weakbound_cpu of the thread it has interrupted. A lowlevel
541 533 * interrupt thread cannot migrate and we can avoid the
542 534 * thread_lock call below by short-circuiting here. In either
543 535 * case we can just return since no migration is possible and
544 536 * the condition will persist (ie, when we test for these again
545 537 * in thread_allowmigrate they can't have changed). Migration
546 538 * is also impossible if we're at or above DISP_LEVEL pil.
547 539 */
548 540 if (CPU_ON_INTR(cp) || t->t_flag & T_INTR_THREAD ||
549 541 getpil() >= DISP_LEVEL) {
550 542 kpreempt_enable();
551 543 return;
552 544 }
553 545
554 546 /*
555 547 * We must be consistent with existing weak bindings. Since we
556 548 * may be interrupted between the increment of t_nomigrate and
557 549 * the store to t_weakbound_cpu below we cannot assume that
558 550 * t_weakbound_cpu will be set if t_nomigrate is. Note that we
559 551 * cannot assert t_weakbound_cpu == t_bind_cpu since that is not
560 552 * always the case.
561 553 */
562 554 if (t->t_nomigrate && t->t_weakbound_cpu && t->t_weakbound_cpu != cp) {
563 555 if (!panicstr)
564 556 panic("thread_nomigrate: binding to %p but already "
565 557 "bound to %p", (void *)cp,
566 558 (void *)t->t_weakbound_cpu);
567 559 }
568 560
569 561 /*
570 562 * At this point we have preemption disabled and we don't yet hold
571 563 * the thread lock. So it's possible that somebody else could
572 564 * set t_bind_cpu here and not be able to force us across to the
573 565 * new cpu (since we have preemption disabled).
574 566 */
575 567 thread_lock(curthread);
576 568
577 569 /*
578 570 * If further weak bindings are being (temporarily) suppressed then
579 571 * we'll settle for disabling kernel preemption (which assures
580 572 * no migration provided the thread does not block which it is
581 573 * not allowed to if using thread_nomigrate). We must remember
582 574 * this disposition so we can take appropriate action in
583 575 * thread_allowmigrate. If this is a nested call and the
584 576 * thread is already weakbound then fall through as normal.
585 577 * We remember the decision to settle for kpreempt_disable through
586 578 * negative nesting counting in t_nomigrate. Once a thread has had one
587 579 * weakbinding request satisfied in this way any further (nested)
588 580 * requests will continue to be satisfied in the same way,
589 581 * even if weak bindings have recommenced.
590 582 */
591 583 if (t->t_nomigrate < 0 || weakbindingbarrier && t->t_nomigrate == 0) {
592 584 --t->t_nomigrate;
593 585 thread_unlock(curthread);
594 586 return; /* with kpreempt_disable still active */
595 587 }
596 588
597 589 /*
598 590 * We hold thread_lock so t_bind_cpu cannot change. We could,
599 591 * however, be running on a different cpu to which we are t_bound_cpu
600 592 * to (as explained above). If we grant the weak binding request
601 593 * in that case then the dispatcher must favour our weak binding
602 594 * over our strong (in which case, just as when preemption is
603 595 * disabled, we can continue to run on a cpu other than the one to
604 596 * which we are strongbound; the difference in this case is that
605 597 * this thread can be preempted and so can appear on the dispatch
606 598 * queues of a cpu other than the one it is strongbound to).
607 599 *
608 600 * If the cpu we are running on does not appear to be a current
609 601 * offline target (we check cpu_inmotion to determine this - since
610 602 * we don't hold cpu_lock we may not see a recent store to that,
611 603 * so it's possible that we at times can grant a weak binding to a
612 604 * cpu that is an offline target, but that one request will not
613 605 * prevent the offline from succeeding) then we will always grant
614 606 * the weak binding request. This includes the case above where
615 607 * we grant a weakbinding not commensurate with our strong binding.
616 608 *
617 609 * If our cpu does appear to be an offline target then we're inclined
618 610 * not to grant the weakbinding request just yet - we'd prefer to
619 611 * migrate to another cpu and grant the request there. The
620 612 * exceptions are those cases where going through preemption code
621 613 * will not result in us changing cpu:
622 614 *
623 615 * . interrupts have already bypassed this case (see above)
624 616 * . we are already weakbound to this cpu (dispatcher code will
625 617 * always return us to the weakbound cpu)
626 618 * . preemption was disabled even before we disabled it above
627 619 * . we are strongbound to this cpu (if we're strongbound to
628 620 * another and not yet running there the trip through the
629 621 * dispatcher will move us to the strongbound cpu and we
630 622 * will grant the weak binding there)
631 623 */
632 624 if (cp != cpu_inmotion || t->t_nomigrate > 0 || t->t_preempt > 1 ||
633 625 t->t_bound_cpu == cp) {
634 626 /*
635 627 * Don't be tempted to store to t_weakbound_cpu only on
636 628 * the first nested bind request - if we're interrupted
637 629 * after the increment of t_nomigrate and before the
638 630 * store to t_weakbound_cpu and the interrupt calls
639 631 * thread_nomigrate then the assertion in thread_allowmigrate
640 632 * would fail.
641 633 */
642 634 t->t_nomigrate++;
643 635 t->t_weakbound_cpu = cp;
644 636 membar_producer();
645 637 thread_unlock(curthread);
646 638 /*
647 639 * Now that we have dropped the thread_lock another thread
648 640 * can set our t_weakbound_cpu, and will try to migrate us
649 641 * to the strongbound cpu (which will not be prevented by
650 642 * preemption being disabled since we're about to enable
651 643 * preemption). We have granted the weakbinding to the current
652 644 * cpu, so again we are in the position that is is is possible
653 645 * that our weak and strong bindings differ. Again this
654 646 * is catered for by dispatcher code which will favour our
655 647 * weak binding.
656 648 */
657 649 kpreempt_enable();
658 650 } else {
659 651 /*
660 652 * Move to another cpu before granting the request by
661 653 * forcing this thread through preemption code. When we
662 654 * get to set{front,back}dq called from CL_PREEMPT()
663 655 * cpu_choose() will be used to select a cpu to queue
664 656 * us on - that will see cpu_inmotion and take
665 657 * steps to avoid returning us to this cpu.
666 658 */
667 659 cp->cpu_kprunrun = 1;
668 660 thread_unlock(curthread);
669 661 kpreempt_enable(); /* will call preempt() */
670 662 goto again;
671 663 }
672 664 }
673 665
674 666 void
675 667 thread_allowmigrate(void)
676 668 {
677 669 kthread_id_t t = curthread;
678 670
679 671 ASSERT(t->t_weakbound_cpu == CPU ||
680 672 (t->t_nomigrate < 0 && t->t_preempt > 0) ||
681 673 CPU_ON_INTR(CPU) || t->t_flag & T_INTR_THREAD ||
682 674 getpil() >= DISP_LEVEL);
683 675
684 676 if (CPU_ON_INTR(CPU) || (t->t_flag & T_INTR_THREAD) ||
685 677 getpil() >= DISP_LEVEL)
686 678 return;
687 679
688 680 if (t->t_nomigrate < 0) {
689 681 /*
690 682 * This thread was granted "weak binding" in the
691 683 * stronger form of kernel preemption disabling.
692 684 * Undo a level of nesting for both t_nomigrate
693 685 * and t_preempt.
694 686 */
695 687 ++t->t_nomigrate;
696 688 kpreempt_enable();
697 689 } else if (--t->t_nomigrate == 0) {
698 690 /*
699 691 * Time to drop the weak binding. We need to cater
700 692 * for the case where we're weakbound to a different
701 693 * cpu than that to which we're strongbound (a very
702 694 * temporary arrangement that must only persist until
703 695 * weak binding drops). We don't acquire thread_lock
704 696 * here so even as this code executes t_bound_cpu
705 697 * may be changing. So we disable preemption and
706 698 * a) in the case that t_bound_cpu changes while we
707 699 * have preemption disabled kprunrun will be set
708 700 * asynchronously, and b) if before disabling
709 701 * preemption we were already on a different cpu to
710 702 * our t_bound_cpu then we set kprunrun ourselves
711 703 * to force a trip through the dispatcher when
712 704 * preemption is enabled.
713 705 */
714 706 kpreempt_disable();
715 707 if (t->t_bound_cpu &&
716 708 t->t_weakbound_cpu != t->t_bound_cpu)
717 709 CPU->cpu_kprunrun = 1;
718 710 t->t_weakbound_cpu = NULL;
719 711 membar_producer();
720 712 kpreempt_enable();
721 713 }
722 714 }
723 715
724 716 /*
725 717 * weakbinding_stop can be used to temporarily cause weakbindings made
726 718 * with thread_nomigrate to be satisfied through the stronger action of
727 719 * kpreempt_disable. weakbinding_start recommences normal weakbinding.
728 720 */
729 721
730 722 void
731 723 weakbinding_stop(void)
732 724 {
733 725 ASSERT(MUTEX_HELD(&cpu_lock));
734 726 weakbindingbarrier = 1;
735 727 membar_producer(); /* make visible before subsequent thread_lock */
736 728 }
737 729
738 730 void
739 731 weakbinding_start(void)
740 732 {
741 733 ASSERT(MUTEX_HELD(&cpu_lock));
742 734 weakbindingbarrier = 0;
743 735 }
744 736
745 737 void
746 738 null_xcall(void)
747 739 {
748 740 }
749 741
750 742 /*
751 743 * This routine is called to place the CPUs in a safe place so that
752 744 * one of them can be taken off line or placed on line. What we are
753 745 * trying to do here is prevent a thread from traversing the list
754 746 * of active CPUs while we are changing it or from getting placed on
755 747 * the run queue of a CPU that has just gone off line. We do this by
756 748 * creating a thread with the highest possible prio for each CPU and
757 749 * having it call this routine. The advantage of this method is that
758 750 * we can eliminate all checks for CPU_ACTIVE in the disp routines.
759 751 * This makes disp faster at the expense of making p_online() slower
760 752 * which is a good trade off.
761 753 */
762 754 static void
763 755 cpu_pause(int index)
764 756 {
765 757 int s;
766 758 struct _cpu_pause_info *cpi = &cpu_pause_info;
767 759 volatile char *safe = &safe_list[index];
768 760 long lindex = index;
769 761
770 762 ASSERT((curthread->t_bound_cpu != NULL) || (*safe == PAUSE_DIE));
771 763
772 764 while (*safe != PAUSE_DIE) {
773 765 *safe = PAUSE_READY;
774 766 membar_enter(); /* make sure stores are flushed */
775 767 sema_v(&cpi->cp_sem); /* signal requesting thread */
776 768
777 769 /*
778 770 * Wait here until all pause threads are running. That
779 771 * indicates that it's safe to do the spl. Until
780 772 * cpu_pause_info.cp_go is set, we don't want to spl
781 773 * because that might block clock interrupts needed
782 774 * to preempt threads on other CPUs.
783 775 */
784 776 while (cpi->cp_go == 0)
785 777 ;
786 778 /*
787 779 * Even though we are at the highest disp prio, we need
788 780 * to block out all interrupts below LOCK_LEVEL so that
789 781 * an intr doesn't come in, wake up a thread, and call
790 782 * setbackdq/setfrontdq.
791 783 */
792 784 s = splhigh();
793 785 /*
794 786 * if cp_func has been set then call it using index as the
795 787 * argument, currently only used by cpr_suspend_cpus().
796 788 * This function is used as the code to execute on the
797 789 * "paused" cpu's when a machine comes out of a sleep state
798 790 * and CPU's were powered off. (could also be used for
799 791 * hotplugging CPU's).
800 792 */
801 793 if (cpi->cp_func != NULL)
802 794 (*cpi->cp_func)((void *)lindex);
803 795
804 796 mach_cpu_pause(safe);
805 797
806 798 splx(s);
807 799 /*
808 800 * Waiting is at an end. Switch out of cpu_pause
809 801 * loop and resume useful work.
810 802 */
811 803 swtch();
812 804 }
813 805
814 806 mutex_enter(&pause_free_mutex);
815 807 *safe = PAUSE_DEAD;
816 808 cv_broadcast(&pause_free_cv);
817 809 mutex_exit(&pause_free_mutex);
818 810 }
819 811
820 812 /*
821 813 * Allow the cpus to start running again.
822 814 */
823 815 void
824 816 start_cpus()
825 817 {
826 818 int i;
827 819
828 820 ASSERT(MUTEX_HELD(&cpu_lock));
829 821 ASSERT(cpu_pause_info.cp_paused);
830 822 cpu_pause_info.cp_paused = NULL;
831 823 for (i = 0; i < NCPU; i++)
832 824 safe_list[i] = PAUSE_IDLE;
833 825 membar_enter(); /* make sure stores are flushed */
834 826 affinity_clear();
835 827 splx(cpu_pause_info.cp_spl);
836 828 kpreempt_enable();
837 829 }
838 830
839 831 /*
840 832 * Allocate a pause thread for a CPU.
841 833 */
842 834 static void
843 835 cpu_pause_alloc(cpu_t *cp)
844 836 {
845 837 kthread_id_t t;
846 838 long cpun = cp->cpu_id;
847 839
848 840 /*
849 841 * Note, v.v_nglobpris will not change value as long as I hold
850 842 * cpu_lock.
851 843 */
852 844 t = thread_create(NULL, 0, cpu_pause, (void *)cpun,
853 845 0, &p0, TS_STOPPED, v.v_nglobpris - 1);
854 846 thread_lock(t);
855 847 t->t_bound_cpu = cp;
856 848 t->t_disp_queue = cp->cpu_disp;
857 849 t->t_affinitycnt = 1;
858 850 t->t_preempt = 1;
859 851 thread_unlock(t);
860 852 cp->cpu_pause_thread = t;
861 853 /*
862 854 * Registering a thread in the callback table is usually done
863 855 * in the initialization code of the thread. In this
864 856 * case, we do it right after thread creation because the
865 857 * thread itself may never run, and we need to register the
866 858 * fact that it is safe for cpr suspend.
867 859 */
868 860 CALLB_CPR_INIT_SAFE(t, "cpu_pause");
869 861 }
870 862
871 863 /*
872 864 * Free a pause thread for a CPU.
873 865 */
874 866 static void
875 867 cpu_pause_free(cpu_t *cp)
876 868 {
877 869 kthread_id_t t;
878 870 int cpun = cp->cpu_id;
879 871
880 872 ASSERT(MUTEX_HELD(&cpu_lock));
881 873 /*
882 874 * We have to get the thread and tell him to die.
883 875 */
884 876 if ((t = cp->cpu_pause_thread) == NULL) {
885 877 ASSERT(safe_list[cpun] == PAUSE_IDLE);
886 878 return;
887 879 }
888 880 thread_lock(t);
889 881 t->t_cpu = CPU; /* disp gets upset if last cpu is quiesced. */
890 882 t->t_bound_cpu = NULL; /* Must un-bind; cpu may not be running. */
891 883 t->t_pri = v.v_nglobpris - 1;
892 884 ASSERT(safe_list[cpun] == PAUSE_IDLE);
893 885 safe_list[cpun] = PAUSE_DIE;
894 886 THREAD_TRANSITION(t);
895 887 setbackdq(t);
896 888 thread_unlock_nopreempt(t);
897 889
898 890 /*
899 891 * If we don't wait for the thread to actually die, it may try to
900 892 * run on the wrong cpu as part of an actual call to pause_cpus().
901 893 */
902 894 mutex_enter(&pause_free_mutex);
903 895 while (safe_list[cpun] != PAUSE_DEAD) {
904 896 cv_wait(&pause_free_cv, &pause_free_mutex);
905 897 }
906 898 mutex_exit(&pause_free_mutex);
907 899 safe_list[cpun] = PAUSE_IDLE;
908 900
909 901 cp->cpu_pause_thread = NULL;
910 902 }
911 903
912 904 /*
913 905 * Initialize basic structures for pausing CPUs.
914 906 */
915 907 void
916 908 cpu_pause_init()
917 909 {
918 910 sema_init(&cpu_pause_info.cp_sem, 0, NULL, SEMA_DEFAULT, NULL);
919 911 /*
920 912 * Create initial CPU pause thread.
921 913 */
922 914 cpu_pause_alloc(CPU);
923 915 }
924 916
925 917 /*
926 918 * Start the threads used to pause another CPU.
927 919 */
928 920 static int
929 921 cpu_pause_start(processorid_t cpu_id)
930 922 {
931 923 int i;
932 924 int cpu_count = 0;
933 925
934 926 for (i = 0; i < NCPU; i++) {
935 927 cpu_t *cp;
936 928 kthread_id_t t;
937 929
938 930 cp = cpu[i];
939 931 if (!CPU_IN_SET(cpu_available, i) || (i == cpu_id)) {
940 932 safe_list[i] = PAUSE_WAIT;
941 933 continue;
942 934 }
943 935
944 936 /*
945 937 * Skip CPU if it is quiesced or not yet started.
946 938 */
947 939 if ((cp->cpu_flags & (CPU_QUIESCED | CPU_READY)) != CPU_READY) {
948 940 safe_list[i] = PAUSE_WAIT;
949 941 continue;
950 942 }
951 943
952 944 /*
953 945 * Start this CPU's pause thread.
954 946 */
955 947 t = cp->cpu_pause_thread;
956 948 thread_lock(t);
957 949 /*
958 950 * Reset the priority, since nglobpris may have
959 951 * changed since the thread was created, if someone
960 952 * has loaded the RT (or some other) scheduling
961 953 * class.
962 954 */
963 955 t->t_pri = v.v_nglobpris - 1;
964 956 THREAD_TRANSITION(t);
965 957 setbackdq(t);
966 958 thread_unlock_nopreempt(t);
967 959 ++cpu_count;
968 960 }
969 961 return (cpu_count);
970 962 }
971 963
972 964
973 965 /*
974 966 * Pause all of the CPUs except the one we are on by creating a high
975 967 * priority thread bound to those CPUs.
976 968 *
977 969 * Note that one must be extremely careful regarding code
978 970 * executed while CPUs are paused. Since a CPU may be paused
979 971 * while a thread scheduling on that CPU is holding an adaptive
980 972 * lock, code executed with CPUs paused must not acquire adaptive
981 973 * (or low-level spin) locks. Also, such code must not block,
982 974 * since the thread that is supposed to initiate the wakeup may
983 975 * never run.
984 976 *
985 977 * With a few exceptions, the restrictions on code executed with CPUs
986 978 * paused match those for code executed at high-level interrupt
987 979 * context.
988 980 */
989 981 void
990 982 pause_cpus(cpu_t *off_cp, void *(*func)(void *))
991 983 {
992 984 processorid_t cpu_id;
993 985 int i;
994 986 struct _cpu_pause_info *cpi = &cpu_pause_info;
995 987
996 988 ASSERT(MUTEX_HELD(&cpu_lock));
997 989 ASSERT(cpi->cp_paused == NULL);
998 990 cpi->cp_count = 0;
999 991 cpi->cp_go = 0;
1000 992 for (i = 0; i < NCPU; i++)
1001 993 safe_list[i] = PAUSE_IDLE;
1002 994 kpreempt_disable();
1003 995
1004 996 cpi->cp_func = func;
1005 997
1006 998 /*
1007 999 * If running on the cpu that is going offline, get off it.
1008 1000 * This is so that it won't be necessary to rechoose a CPU
1009 1001 * when done.
1010 1002 */
1011 1003 if (CPU == off_cp)
1012 1004 cpu_id = off_cp->cpu_next_part->cpu_id;
1013 1005 else
1014 1006 cpu_id = CPU->cpu_id;
1015 1007 affinity_set(cpu_id);
1016 1008
1017 1009 /*
1018 1010 * Start the pause threads and record how many were started
1019 1011 */
1020 1012 cpi->cp_count = cpu_pause_start(cpu_id);
1021 1013
1022 1014 /*
1023 1015 * Now wait for all CPUs to be running the pause thread.
1024 1016 */
1025 1017 while (cpi->cp_count > 0) {
1026 1018 /*
1027 1019 * Spin reading the count without grabbing the disp
1028 1020 * lock to make sure we don't prevent the pause
1029 1021 * threads from getting the lock.
1030 1022 */
1031 1023 while (sema_held(&cpi->cp_sem))
1032 1024 ;
1033 1025 if (sema_tryp(&cpi->cp_sem))
1034 1026 --cpi->cp_count;
1035 1027 }
1036 1028 cpi->cp_go = 1; /* all have reached cpu_pause */
1037 1029
1038 1030 /*
1039 1031 * Now wait for all CPUs to spl. (Transition from PAUSE_READY
1040 1032 * to PAUSE_WAIT.)
1041 1033 */
1042 1034 for (i = 0; i < NCPU; i++) {
1043 1035 while (safe_list[i] != PAUSE_WAIT)
1044 1036 ;
1045 1037 }
1046 1038 cpi->cp_spl = splhigh(); /* block dispatcher on this CPU */
1047 1039 cpi->cp_paused = curthread;
1048 1040 }
1049 1041
1050 1042 /*
1051 1043 * Check whether the current thread has CPUs paused
1052 1044 */
1053 1045 int
1054 1046 cpus_paused(void)
1055 1047 {
1056 1048 if (cpu_pause_info.cp_paused != NULL) {
1057 1049 ASSERT(cpu_pause_info.cp_paused == curthread);
1058 1050 return (1);
1059 1051 }
1060 1052 return (0);
1061 1053 }
1062 1054
1063 1055 static cpu_t *
1064 1056 cpu_get_all(processorid_t cpun)
1065 1057 {
1066 1058 ASSERT(MUTEX_HELD(&cpu_lock));
1067 1059
1068 1060 if (cpun >= NCPU || cpun < 0 || !CPU_IN_SET(cpu_available, cpun))
1069 1061 return (NULL);
1070 1062 return (cpu[cpun]);
1071 1063 }
1072 1064
1073 1065 /*
1074 1066 * Check whether cpun is a valid processor id and whether it should be
1075 1067 * visible from the current zone. If it is, return a pointer to the
1076 1068 * associated CPU structure.
1077 1069 */
1078 1070 cpu_t *
1079 1071 cpu_get(processorid_t cpun)
1080 1072 {
1081 1073 cpu_t *c;
1082 1074
1083 1075 ASSERT(MUTEX_HELD(&cpu_lock));
1084 1076 c = cpu_get_all(cpun);
1085 1077 if (c != NULL && !INGLOBALZONE(curproc) && pool_pset_enabled() &&
1086 1078 zone_pset_get(curproc->p_zone) != cpupart_query_cpu(c))
1087 1079 return (NULL);
1088 1080 return (c);
1089 1081 }
1090 1082
1091 1083 /*
1092 1084 * The following functions should be used to check CPU states in the kernel.
1093 1085 * They should be invoked with cpu_lock held. Kernel subsystems interested
1094 1086 * in CPU states should *not* use cpu_get_state() and various P_ONLINE/etc
1095 1087 * states. Those are for user-land (and system call) use only.
1096 1088 */
1097 1089
1098 1090 /*
1099 1091 * Determine whether the CPU is online and handling interrupts.
1100 1092 */
1101 1093 int
1102 1094 cpu_is_online(cpu_t *cpu)
1103 1095 {
1104 1096 ASSERT(MUTEX_HELD(&cpu_lock));
1105 1097 return (cpu_flagged_online(cpu->cpu_flags));
1106 1098 }
1107 1099
1108 1100 /*
1109 1101 * Determine whether the CPU is offline (this includes spare and faulted).
1110 1102 */
1111 1103 int
1112 1104 cpu_is_offline(cpu_t *cpu)
1113 1105 {
1114 1106 ASSERT(MUTEX_HELD(&cpu_lock));
1115 1107 return (cpu_flagged_offline(cpu->cpu_flags));
1116 1108 }
1117 1109
1118 1110 /*
1119 1111 * Determine whether the CPU is powered off.
1120 1112 */
1121 1113 int
1122 1114 cpu_is_poweredoff(cpu_t *cpu)
1123 1115 {
1124 1116 ASSERT(MUTEX_HELD(&cpu_lock));
1125 1117 return (cpu_flagged_poweredoff(cpu->cpu_flags));
1126 1118 }
1127 1119
1128 1120 /*
1129 1121 * Determine whether the CPU is handling interrupts.
1130 1122 */
1131 1123 int
1132 1124 cpu_is_nointr(cpu_t *cpu)
1133 1125 {
1134 1126 ASSERT(MUTEX_HELD(&cpu_lock));
1135 1127 return (cpu_flagged_nointr(cpu->cpu_flags));
1136 1128 }
1137 1129
1138 1130 /*
1139 1131 * Determine whether the CPU is active (scheduling threads).
1140 1132 */
1141 1133 int
1142 1134 cpu_is_active(cpu_t *cpu)
1143 1135 {
1144 1136 ASSERT(MUTEX_HELD(&cpu_lock));
1145 1137 return (cpu_flagged_active(cpu->cpu_flags));
1146 1138 }
1147 1139
1148 1140 /*
1149 1141 * Same as above, but these require cpu_flags instead of cpu_t pointers.
1150 1142 */
1151 1143 int
1152 1144 cpu_flagged_online(cpu_flag_t cpu_flags)
1153 1145 {
1154 1146 return (cpu_flagged_active(cpu_flags) &&
1155 1147 (cpu_flags & CPU_ENABLE));
1156 1148 }
1157 1149
1158 1150 int
1159 1151 cpu_flagged_offline(cpu_flag_t cpu_flags)
1160 1152 {
1161 1153 return (((cpu_flags & CPU_POWEROFF) == 0) &&
1162 1154 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY));
1163 1155 }
1164 1156
1165 1157 int
1166 1158 cpu_flagged_poweredoff(cpu_flag_t cpu_flags)
1167 1159 {
1168 1160 return ((cpu_flags & CPU_POWEROFF) == CPU_POWEROFF);
1169 1161 }
1170 1162
1171 1163 int
1172 1164 cpu_flagged_nointr(cpu_flag_t cpu_flags)
1173 1165 {
1174 1166 return (cpu_flagged_active(cpu_flags) &&
1175 1167 (cpu_flags & CPU_ENABLE) == 0);
1176 1168 }
1177 1169
1178 1170 int
1179 1171 cpu_flagged_active(cpu_flag_t cpu_flags)
1180 1172 {
1181 1173 return (((cpu_flags & (CPU_POWEROFF | CPU_FAULTED | CPU_SPARE)) == 0) &&
1182 1174 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) == CPU_READY));
1183 1175 }
1184 1176
1185 1177 /*
1186 1178 * Bring the indicated CPU online.
1187 1179 */
1188 1180 int
1189 1181 cpu_online(cpu_t *cp)
1190 1182 {
1191 1183 int error = 0;
1192 1184
1193 1185 /*
1194 1186 * Handle on-line request.
1195 1187 * This code must put the new CPU on the active list before
1196 1188 * starting it because it will not be paused, and will start
1197 1189 * using the active list immediately. The real start occurs
1198 1190 * when the CPU_QUIESCED flag is turned off.
1199 1191 */
1200 1192
1201 1193 ASSERT(MUTEX_HELD(&cpu_lock));
1202 1194
1203 1195 /*
1204 1196 * Put all the cpus into a known safe place.
1205 1197 * No mutexes can be entered while CPUs are paused.
1206 1198 */
1207 1199 error = mp_cpu_start(cp); /* arch-dep hook */
1208 1200 if (error == 0) {
1209 1201 pg_cpupart_in(cp, cp->cpu_part);
1210 1202 pause_cpus(NULL, NULL);
1211 1203 cpu_add_active_internal(cp);
1212 1204 if (cp->cpu_flags & CPU_FAULTED) {
1213 1205 cp->cpu_flags &= ~CPU_FAULTED;
1214 1206 mp_cpu_faulted_exit(cp);
1215 1207 }
1216 1208 cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN |
1217 1209 CPU_SPARE);
1218 1210 CPU_NEW_GENERATION(cp);
1219 1211 start_cpus();
1220 1212 cpu_stats_kstat_create(cp);
1221 1213 cpu_create_intrstat(cp);
1222 1214 lgrp_kstat_create(cp);
1223 1215 cpu_state_change_notify(cp->cpu_id, CPU_ON);
1224 1216 cpu_intr_enable(cp); /* arch-dep hook */
1225 1217 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON);
1226 1218 cpu_set_state(cp);
1227 1219 cyclic_online(cp);
1228 1220 /*
1229 1221 * This has to be called only after cyclic_online(). This
1230 1222 * function uses cyclics.
1231 1223 */
1232 1224 callout_cpu_online(cp);
1233 1225 poke_cpu(cp->cpu_id);
1234 1226 }
1235 1227
1236 1228 return (error);
1237 1229 }
1238 1230
1239 1231 /*
1240 1232 * Take the indicated CPU offline.
1241 1233 */
1242 1234 int
1243 1235 cpu_offline(cpu_t *cp, int flags)
1244 1236 {
1245 1237 cpupart_t *pp;
1246 1238 int error = 0;
1247 1239 cpu_t *ncp;
1248 1240 int intr_enable;
1249 1241 int cyclic_off = 0;
1250 1242 int callout_off = 0;
1251 1243 int loop_count;
1252 1244 int no_quiesce = 0;
1253 1245 int (*bound_func)(struct cpu *, int);
1254 1246 kthread_t *t;
1255 1247 lpl_t *cpu_lpl;
1256 1248 proc_t *p;
1257 1249 int lgrp_diff_lpl;
1258 1250 boolean_t unbind_all_threads = (flags & CPU_FORCED) != 0;
1259 1251
1260 1252 ASSERT(MUTEX_HELD(&cpu_lock));
1261 1253
1262 1254 /*
1263 1255 * If we're going from faulted or spare to offline, just
1264 1256 * clear these flags and update CPU state.
1265 1257 */
1266 1258 if (cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) {
1267 1259 if (cp->cpu_flags & CPU_FAULTED) {
1268 1260 cp->cpu_flags &= ~CPU_FAULTED;
1269 1261 mp_cpu_faulted_exit(cp);
1270 1262 }
1271 1263 cp->cpu_flags &= ~CPU_SPARE;
1272 1264 cpu_set_state(cp);
1273 1265 return (0);
1274 1266 }
1275 1267
1276 1268 /*
1277 1269 * Handle off-line request.
1278 1270 */
1279 1271 pp = cp->cpu_part;
1280 1272 /*
1281 1273 * Don't offline last online CPU in partition
1282 1274 */
1283 1275 if (ncpus_online <= 1 || pp->cp_ncpus <= 1 || cpu_intr_count(cp) < 2)
1284 1276 return (EBUSY);
1285 1277 /*
1286 1278 * Unbind all soft-bound threads bound to our CPU and hard bound threads
1287 1279 * if we were asked to.
1288 1280 */
1289 1281 error = cpu_unbind(cp->cpu_id, unbind_all_threads);
1290 1282 if (error != 0)
1291 1283 return (error);
1292 1284 /*
1293 1285 * We shouldn't be bound to this CPU ourselves.
1294 1286 */
1295 1287 if (curthread->t_bound_cpu == cp)
1296 1288 return (EBUSY);
1297 1289
1298 1290 /*
1299 1291 * Tell interested parties that this CPU is going offline.
1300 1292 */
1301 1293 CPU_NEW_GENERATION(cp);
1302 1294 cpu_state_change_notify(cp->cpu_id, CPU_OFF);
1303 1295
1304 1296 /*
1305 1297 * Tell the PG subsystem that the CPU is leaving the partition
1306 1298 */
1307 1299 pg_cpupart_out(cp, pp);
1308 1300
1309 1301 /*
1310 1302 * Take the CPU out of interrupt participation so we won't find
1311 1303 * bound kernel threads. If the architecture cannot completely
1312 1304 * shut off interrupts on the CPU, don't quiesce it, but don't
1313 1305 * run anything but interrupt thread... this is indicated by
1314 1306 * the CPU_OFFLINE flag being on but the CPU_QUIESCE flag being
1315 1307 * off.
1316 1308 */
1317 1309 intr_enable = cp->cpu_flags & CPU_ENABLE;
1318 1310 if (intr_enable)
1319 1311 no_quiesce = cpu_intr_disable(cp);
1320 1312
1321 1313 /*
1322 1314 * Record that we are aiming to offline this cpu. This acts as
1323 1315 * a barrier to further weak binding requests in thread_nomigrate
1324 1316 * and also causes cpu_choose, disp_lowpri_cpu and setfrontdq to
1325 1317 * lean away from this cpu. Further strong bindings are already
1326 1318 * avoided since we hold cpu_lock. Since threads that are set
1327 1319 * runnable around now and others coming off the target cpu are
1328 1320 * directed away from the target, existing strong and weak bindings
1329 1321 * (especially the latter) to the target cpu stand maximum chance of
1330 1322 * being able to unbind during the short delay loop below (if other
1331 1323 * unbound threads compete they may not see cpu in time to unbind
1332 1324 * even if they would do so immediately.
1333 1325 */
1334 1326 cpu_inmotion = cp;
1335 1327 membar_enter();
1336 1328
1337 1329 /*
1338 1330 * Check for kernel threads (strong or weak) bound to that CPU.
1339 1331 * Strongly bound threads may not unbind, and we'll have to return
1340 1332 * EBUSY. Weakly bound threads should always disappear - we've
1341 1333 * stopped more weak binding with cpu_inmotion and existing
1342 1334 * bindings will drain imminently (they may not block). Nonetheless
1343 1335 * we will wait for a fixed period for all bound threads to disappear.
1344 1336 * Inactive interrupt threads are OK (they'll be in TS_FREE
1345 1337 * state). If test finds some bound threads, wait a few ticks
1346 1338 * to give short-lived threads (such as interrupts) chance to
1347 1339 * complete. Note that if no_quiesce is set, i.e. this cpu
1348 1340 * is required to service interrupts, then we take the route
1349 1341 * that permits interrupt threads to be active (or bypassed).
1350 1342 */
1351 1343 bound_func = no_quiesce ? disp_bound_threads : disp_bound_anythreads;
1352 1344
1353 1345 again: for (loop_count = 0; (*bound_func)(cp, 0); loop_count++) {
1354 1346 if (loop_count >= 5) {
1355 1347 error = EBUSY; /* some threads still bound */
1356 1348 break;
1357 1349 }
1358 1350
1359 1351 /*
1360 1352 * If some threads were assigned, give them
1361 1353 * a chance to complete or move.
1362 1354 *
1363 1355 * This assumes that the clock_thread is not bound
1364 1356 * to any CPU, because the clock_thread is needed to
1365 1357 * do the delay(hz/100).
1366 1358 *
1367 1359 * Note: we still hold the cpu_lock while waiting for
1368 1360 * the next clock tick. This is OK since it isn't
1369 1361 * needed for anything else except processor_bind(2),
1370 1362 * and system initialization. If we drop the lock,
1371 1363 * we would risk another p_online disabling the last
1372 1364 * processor.
1373 1365 */
1374 1366 delay(hz/100);
1375 1367 }
1376 1368
1377 1369 if (error == 0 && callout_off == 0) {
1378 1370 callout_cpu_offline(cp);
1379 1371 callout_off = 1;
1380 1372 }
1381 1373
1382 1374 if (error == 0 && cyclic_off == 0) {
1383 1375 if (!cyclic_offline(cp)) {
1384 1376 /*
1385 1377 * We must have bound cyclics...
1386 1378 */
1387 1379 error = EBUSY;
1388 1380 goto out;
1389 1381 }
1390 1382 cyclic_off = 1;
1391 1383 }
1392 1384
1393 1385 /*
1394 1386 * Call mp_cpu_stop() to perform any special operations
1395 1387 * needed for this machine architecture to offline a CPU.
1396 1388 */
1397 1389 if (error == 0)
1398 1390 error = mp_cpu_stop(cp); /* arch-dep hook */
1399 1391
1400 1392 /*
1401 1393 * If that all worked, take the CPU offline and decrement
1402 1394 * ncpus_online.
1403 1395 */
1404 1396 if (error == 0) {
1405 1397 /*
1406 1398 * Put all the cpus into a known safe place.
1407 1399 * No mutexes can be entered while CPUs are paused.
1408 1400 */
1409 1401 pause_cpus(cp, NULL);
1410 1402 /*
1411 1403 * Repeat the operation, if necessary, to make sure that
1412 1404 * all outstanding low-level interrupts run to completion
1413 1405 * before we set the CPU_QUIESCED flag. It's also possible
1414 1406 * that a thread has weak bound to the cpu despite our raising
1415 1407 * cpu_inmotion above since it may have loaded that
1416 1408 * value before the barrier became visible (this would have
1417 1409 * to be the thread that was on the target cpu at the time
1418 1410 * we raised the barrier).
1419 1411 */
1420 1412 if ((!no_quiesce && cp->cpu_intr_actv != 0) ||
1421 1413 (*bound_func)(cp, 1)) {
1422 1414 start_cpus();
1423 1415 (void) mp_cpu_start(cp);
1424 1416 goto again;
1425 1417 }
1426 1418 ncp = cp->cpu_next_part;
1427 1419 cpu_lpl = cp->cpu_lpl;
1428 1420 ASSERT(cpu_lpl != NULL);
1429 1421
1430 1422 /*
1431 1423 * Remove the CPU from the list of active CPUs.
1432 1424 */
1433 1425 cpu_remove_active(cp);
1434 1426
1435 1427 /*
1436 1428 * Walk the active process list and look for threads
1437 1429 * whose home lgroup needs to be updated, or
1438 1430 * the last CPU they run on is the one being offlined now.
1439 1431 */
1440 1432
1441 1433 ASSERT(curthread->t_cpu != cp);
1442 1434 for (p = practive; p != NULL; p = p->p_next) {
1443 1435
1444 1436 t = p->p_tlist;
1445 1437
1446 1438 if (t == NULL)
1447 1439 continue;
1448 1440
1449 1441 lgrp_diff_lpl = 0;
1450 1442
1451 1443 do {
1452 1444 ASSERT(t->t_lpl != NULL);
1453 1445 /*
1454 1446 * Taking last CPU in lpl offline
1455 1447 * Rehome thread if it is in this lpl
1456 1448 * Otherwise, update the count of how many
1457 1449 * threads are in this CPU's lgroup but have
1458 1450 * a different lpl.
1459 1451 */
1460 1452
1461 1453 if (cpu_lpl->lpl_ncpu == 0) {
1462 1454 if (t->t_lpl == cpu_lpl)
1463 1455 lgrp_move_thread(t,
1464 1456 lgrp_choose(t,
1465 1457 t->t_cpupart), 0);
1466 1458 else if (t->t_lpl->lpl_lgrpid ==
1467 1459 cpu_lpl->lpl_lgrpid)
1468 1460 lgrp_diff_lpl++;
1469 1461 }
1470 1462 ASSERT(t->t_lpl->lpl_ncpu > 0);
1471 1463
1472 1464 /*
1473 1465 * Update CPU last ran on if it was this CPU
1474 1466 */
1475 1467 if (t->t_cpu == cp && t->t_bound_cpu != cp)
1476 1468 t->t_cpu = disp_lowpri_cpu(ncp,
1477 1469 t->t_lpl, t->t_pri, NULL);
1478 1470 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1479 1471 t->t_weakbound_cpu == cp);
1480 1472
1481 1473 t = t->t_forw;
1482 1474 } while (t != p->p_tlist);
1483 1475
1484 1476 /*
1485 1477 * Didn't find any threads in the same lgroup as this
1486 1478 * CPU with a different lpl, so remove the lgroup from
1487 1479 * the process lgroup bitmask.
1488 1480 */
1489 1481
1490 1482 if (lgrp_diff_lpl == 0)
1491 1483 klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid);
1492 1484 }
1493 1485
1494 1486 /*
1495 1487 * Walk thread list looking for threads that need to be
1496 1488 * rehomed, since there are some threads that are not in
1497 1489 * their process's p_tlist.
1498 1490 */
1499 1491
1500 1492 t = curthread;
1501 1493 do {
1502 1494 ASSERT(t != NULL && t->t_lpl != NULL);
1503 1495
1504 1496 /*
1505 1497 * Rehome threads with same lpl as this CPU when this
1506 1498 * is the last CPU in the lpl.
1507 1499 */
1508 1500
1509 1501 if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl))
1510 1502 lgrp_move_thread(t,
1511 1503 lgrp_choose(t, t->t_cpupart), 1);
1512 1504
1513 1505 ASSERT(t->t_lpl->lpl_ncpu > 0);
1514 1506
1515 1507 /*
1516 1508 * Update CPU last ran on if it was this CPU
1517 1509 */
1518 1510
1519 1511 if (t->t_cpu == cp && t->t_bound_cpu != cp) {
1520 1512 t->t_cpu = disp_lowpri_cpu(ncp,
1521 1513 t->t_lpl, t->t_pri, NULL);
1522 1514 }
1523 1515 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1524 1516 t->t_weakbound_cpu == cp);
1525 1517 t = t->t_next;
1526 1518
1527 1519 } while (t != curthread);
1528 1520 ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0);
1529 1521 cp->cpu_flags |= CPU_OFFLINE;
1530 1522 disp_cpu_inactive(cp);
1531 1523 if (!no_quiesce)
1532 1524 cp->cpu_flags |= CPU_QUIESCED;
1533 1525 ncpus_online--;
1534 1526 cpu_set_state(cp);
1535 1527 cpu_inmotion = NULL;
1536 1528 start_cpus();
1537 1529 cpu_stats_kstat_destroy(cp);
1538 1530 cpu_delete_intrstat(cp);
1539 1531 lgrp_kstat_destroy(cp);
1540 1532 }
1541 1533
1542 1534 out:
1543 1535 cpu_inmotion = NULL;
1544 1536
1545 1537 /*
1546 1538 * If we failed, re-enable interrupts.
1547 1539 * Do this even if cpu_intr_disable returned an error, because
1548 1540 * it may have partially disabled interrupts.
1549 1541 */
1550 1542 if (error && intr_enable)
1551 1543 cpu_intr_enable(cp);
1552 1544
1553 1545 /*
1554 1546 * If we failed, but managed to offline the cyclic subsystem on this
1555 1547 * CPU, bring it back online.
1556 1548 */
1557 1549 if (error && cyclic_off)
1558 1550 cyclic_online(cp);
1559 1551
1560 1552 /*
1561 1553 * If we failed, but managed to offline callouts on this CPU,
1562 1554 * bring it back online.
1563 1555 */
1564 1556 if (error && callout_off)
1565 1557 callout_cpu_online(cp);
1566 1558
1567 1559 /*
1568 1560 * If we failed, tell the PG subsystem that the CPU is back
1569 1561 */
1570 1562 pg_cpupart_in(cp, pp);
1571 1563
1572 1564 /*
1573 1565 * If we failed, we need to notify everyone that this CPU is back on.
1574 1566 */
1575 1567 if (error != 0) {
1576 1568 CPU_NEW_GENERATION(cp);
1577 1569 cpu_state_change_notify(cp->cpu_id, CPU_ON);
1578 1570 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON);
1579 1571 }
1580 1572
1581 1573 return (error);
1582 1574 }
1583 1575
1584 1576 /*
1585 1577 * Mark the indicated CPU as faulted, taking it offline.
1586 1578 */
1587 1579 int
1588 1580 cpu_faulted(cpu_t *cp, int flags)
1589 1581 {
1590 1582 int error = 0;
1591 1583
1592 1584 ASSERT(MUTEX_HELD(&cpu_lock));
1593 1585 ASSERT(!cpu_is_poweredoff(cp));
1594 1586
1595 1587 if (cpu_is_offline(cp)) {
1596 1588 cp->cpu_flags &= ~CPU_SPARE;
1597 1589 cp->cpu_flags |= CPU_FAULTED;
1598 1590 mp_cpu_faulted_enter(cp);
1599 1591 cpu_set_state(cp);
1600 1592 return (0);
1601 1593 }
1602 1594
1603 1595 if ((error = cpu_offline(cp, flags)) == 0) {
1604 1596 cp->cpu_flags |= CPU_FAULTED;
1605 1597 mp_cpu_faulted_enter(cp);
1606 1598 cpu_set_state(cp);
1607 1599 }
1608 1600
1609 1601 return (error);
1610 1602 }
1611 1603
1612 1604 /*
1613 1605 * Mark the indicated CPU as a spare, taking it offline.
1614 1606 */
1615 1607 int
1616 1608 cpu_spare(cpu_t *cp, int flags)
1617 1609 {
1618 1610 int error = 0;
1619 1611
1620 1612 ASSERT(MUTEX_HELD(&cpu_lock));
1621 1613 ASSERT(!cpu_is_poweredoff(cp));
1622 1614
1623 1615 if (cpu_is_offline(cp)) {
1624 1616 if (cp->cpu_flags & CPU_FAULTED) {
1625 1617 cp->cpu_flags &= ~CPU_FAULTED;
1626 1618 mp_cpu_faulted_exit(cp);
1627 1619 }
1628 1620 cp->cpu_flags |= CPU_SPARE;
1629 1621 cpu_set_state(cp);
1630 1622 return (0);
1631 1623 }
1632 1624
1633 1625 if ((error = cpu_offline(cp, flags)) == 0) {
1634 1626 cp->cpu_flags |= CPU_SPARE;
1635 1627 cpu_set_state(cp);
1636 1628 }
1637 1629
1638 1630 return (error);
1639 1631 }
1640 1632
1641 1633 /*
1642 1634 * Take the indicated CPU from poweroff to offline.
1643 1635 */
1644 1636 int
1645 1637 cpu_poweron(cpu_t *cp)
1646 1638 {
1647 1639 int error = ENOTSUP;
1648 1640
1649 1641 ASSERT(MUTEX_HELD(&cpu_lock));
1650 1642 ASSERT(cpu_is_poweredoff(cp));
1651 1643
1652 1644 error = mp_cpu_poweron(cp); /* arch-dep hook */
1653 1645 if (error == 0)
1654 1646 cpu_set_state(cp);
1655 1647
1656 1648 return (error);
1657 1649 }
1658 1650
1659 1651 /*
1660 1652 * Take the indicated CPU from any inactive state to powered off.
1661 1653 */
1662 1654 int
1663 1655 cpu_poweroff(cpu_t *cp)
1664 1656 {
1665 1657 int error = ENOTSUP;
1666 1658
1667 1659 ASSERT(MUTEX_HELD(&cpu_lock));
1668 1660 ASSERT(cpu_is_offline(cp));
1669 1661
1670 1662 if (!(cp->cpu_flags & CPU_QUIESCED))
1671 1663 return (EBUSY); /* not completely idle */
1672 1664
1673 1665 error = mp_cpu_poweroff(cp); /* arch-dep hook */
1674 1666 if (error == 0)
1675 1667 cpu_set_state(cp);
1676 1668
1677 1669 return (error);
1678 1670 }
1679 1671
1680 1672 /*
1681 1673 * Initialize the Sequential CPU id lookup table
1682 1674 */
1683 1675 void
1684 1676 cpu_seq_tbl_init()
1685 1677 {
1686 1678 cpu_t **tbl;
1687 1679
1688 1680 tbl = kmem_zalloc(sizeof (struct cpu *) * max_ncpus, KM_SLEEP);
1689 1681 tbl[0] = CPU;
1690 1682
1691 1683 cpu_seq = tbl;
1692 1684 }
1693 1685
1694 1686 /*
1695 1687 * Initialize the CPU lists for the first CPU.
1696 1688 */
1697 1689 void
1698 1690 cpu_list_init(cpu_t *cp)
1699 1691 {
1700 1692 cp->cpu_next = cp;
1701 1693 cp->cpu_prev = cp;
1702 1694 cpu_list = cp;
1703 1695 clock_cpu_list = cp;
1704 1696
1705 1697 cp->cpu_next_onln = cp;
1706 1698 cp->cpu_prev_onln = cp;
1707 1699 cpu_active = cp;
1708 1700
1709 1701 cp->cpu_seqid = 0;
1710 1702 CPUSET_ADD(cpu_seqid_inuse, 0);
1711 1703
1712 1704 /*
1713 1705 * Bootstrap cpu_seq using cpu_list
1714 1706 * The cpu_seq[] table will be dynamically allocated
1715 1707 * when kmem later becomes available (but before going MP)
1716 1708 */
1717 1709 cpu_seq = &cpu_list;
1718 1710
1719 1711 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid);
1720 1712 cp_default.cp_cpulist = cp;
1721 1713 cp_default.cp_ncpus = 1;
1722 1714 cp->cpu_next_part = cp;
1723 1715 cp->cpu_prev_part = cp;
1724 1716 cp->cpu_part = &cp_default;
1725 1717
1726 1718 CPUSET_ADD(cpu_available, cp->cpu_id);
1727 1719 }
1728 1720
1729 1721 /*
1730 1722 * Insert a CPU into the list of available CPUs.
1731 1723 */
1732 1724 void
1733 1725 cpu_add_unit(cpu_t *cp)
1734 1726 {
1735 1727 int seqid;
1736 1728
1737 1729 ASSERT(MUTEX_HELD(&cpu_lock));
1738 1730 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */
1739 1731
1740 1732 lgrp_config(LGRP_CONFIG_CPU_ADD, (uintptr_t)cp, 0);
1741 1733
1742 1734 /*
1743 1735 * Note: most users of the cpu_list will grab the
1744 1736 * cpu_lock to insure that it isn't modified. However,
1745 1737 * certain users can't or won't do that. To allow this
1746 1738 * we pause the other cpus. Users who walk the list
1747 1739 * without cpu_lock, must disable kernel preemption
1748 1740 * to insure that the list isn't modified underneath
1749 1741 * them. Also, any cached pointers to cpu structures
1750 1742 * must be revalidated by checking to see if the
1751 1743 * cpu_next pointer points to itself. This check must
1752 1744 * be done with the cpu_lock held or kernel preemption
1753 1745 * disabled. This check relies upon the fact that
1754 1746 * old cpu structures are not free'ed or cleared after
1755 1747 * then are removed from the cpu_list.
1756 1748 *
1757 1749 * Note that the clock code walks the cpu list dereferencing
1758 1750 * the cpu_part pointer, so we need to initialize it before
1759 1751 * adding the cpu to the list.
1760 1752 */
1761 1753 cp->cpu_part = &cp_default;
1762 1754 pause_cpus(NULL, NULL);
1763 1755 cp->cpu_next = cpu_list;
1764 1756 cp->cpu_prev = cpu_list->cpu_prev;
1765 1757 cpu_list->cpu_prev->cpu_next = cp;
1766 1758 cpu_list->cpu_prev = cp;
1767 1759 start_cpus();
1768 1760
1769 1761 for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++)
1770 1762 continue;
1771 1763 CPUSET_ADD(cpu_seqid_inuse, seqid);
1772 1764 cp->cpu_seqid = seqid;
1773 1765
1774 1766 if (seqid > max_cpu_seqid_ever)
1775 1767 max_cpu_seqid_ever = seqid;
1776 1768
1777 1769 ASSERT(ncpus < max_ncpus);
1778 1770 ncpus++;
1779 1771 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid);
1780 1772 cpu[cp->cpu_id] = cp;
1781 1773 CPUSET_ADD(cpu_available, cp->cpu_id);
1782 1774 cpu_seq[cp->cpu_seqid] = cp;
1783 1775
1784 1776 /*
1785 1777 * allocate a pause thread for this CPU.
1786 1778 */
1787 1779 cpu_pause_alloc(cp);
1788 1780
1789 1781 /*
1790 1782 * So that new CPUs won't have NULL prev_onln and next_onln pointers,
1791 1783 * link them into a list of just that CPU.
1792 1784 * This is so that disp_lowpri_cpu will work for thread_create in
1793 1785 * pause_cpus() when called from the startup thread in a new CPU.
1794 1786 */
1795 1787 cp->cpu_next_onln = cp;
1796 1788 cp->cpu_prev_onln = cp;
1797 1789 cpu_info_kstat_create(cp);
1798 1790 cp->cpu_next_part = cp;
1799 1791 cp->cpu_prev_part = cp;
1800 1792
1801 1793 init_cpu_mstate(cp, CMS_SYSTEM);
1802 1794
1803 1795 pool_pset_mod = gethrtime();
1804 1796 }
1805 1797
1806 1798 /*
1807 1799 * Do the opposite of cpu_add_unit().
1808 1800 */
1809 1801 void
1810 1802 cpu_del_unit(int cpuid)
1811 1803 {
1812 1804 struct cpu *cp, *cpnext;
1813 1805
1814 1806 ASSERT(MUTEX_HELD(&cpu_lock));
1815 1807 cp = cpu[cpuid];
1816 1808 ASSERT(cp != NULL);
1817 1809
1818 1810 ASSERT(cp->cpu_next_onln == cp);
1819 1811 ASSERT(cp->cpu_prev_onln == cp);
1820 1812 ASSERT(cp->cpu_next_part == cp);
1821 1813 ASSERT(cp->cpu_prev_part == cp);
1822 1814
1823 1815 /*
1824 1816 * Tear down the CPU's physical ID cache, and update any
1825 1817 * processor groups
1826 1818 */
1827 1819 pg_cpu_fini(cp, NULL);
1828 1820 pghw_physid_destroy(cp);
1829 1821
1830 1822 /*
1831 1823 * Destroy kstat stuff.
1832 1824 */
1833 1825 cpu_info_kstat_destroy(cp);
1834 1826 term_cpu_mstate(cp);
1835 1827 /*
1836 1828 * Free up pause thread.
1837 1829 */
1838 1830 cpu_pause_free(cp);
1839 1831 CPUSET_DEL(cpu_available, cp->cpu_id);
1840 1832 cpu[cp->cpu_id] = NULL;
1841 1833 cpu_seq[cp->cpu_seqid] = NULL;
1842 1834
1843 1835 /*
1844 1836 * The clock thread and mutex_vector_enter cannot hold the
1845 1837 * cpu_lock while traversing the cpu list, therefore we pause
1846 1838 * all other threads by pausing the other cpus. These, and any
1847 1839 * other routines holding cpu pointers while possibly sleeping
1848 1840 * must be sure to call kpreempt_disable before processing the
1849 1841 * list and be sure to check that the cpu has not been deleted
1850 1842 * after any sleeps (check cp->cpu_next != NULL). We guarantee
1851 1843 * to keep the deleted cpu structure around.
1852 1844 *
1853 1845 * Note that this MUST be done AFTER cpu_available
1854 1846 * has been updated so that we don't waste time
1855 1847 * trying to pause the cpu we're trying to delete.
1856 1848 */
1857 1849 pause_cpus(NULL, NULL);
1858 1850
1859 1851 cpnext = cp->cpu_next;
1860 1852 cp->cpu_prev->cpu_next = cp->cpu_next;
1861 1853 cp->cpu_next->cpu_prev = cp->cpu_prev;
1862 1854 if (cp == cpu_list)
1863 1855 cpu_list = cpnext;
1864 1856
1865 1857 /*
1866 1858 * Signals that the cpu has been deleted (see above).
1867 1859 */
1868 1860 cp->cpu_next = NULL;
1869 1861 cp->cpu_prev = NULL;
1870 1862
1871 1863 start_cpus();
1872 1864
1873 1865 CPUSET_DEL(cpu_seqid_inuse, cp->cpu_seqid);
1874 1866 ncpus--;
1875 1867 lgrp_config(LGRP_CONFIG_CPU_DEL, (uintptr_t)cp, 0);
1876 1868
1877 1869 pool_pset_mod = gethrtime();
1878 1870 }
1879 1871
1880 1872 /*
1881 1873 * Add a CPU to the list of active CPUs.
1882 1874 * This routine must not get any locks, because other CPUs are paused.
1883 1875 */
1884 1876 static void
1885 1877 cpu_add_active_internal(cpu_t *cp)
1886 1878 {
1887 1879 cpupart_t *pp = cp->cpu_part;
1888 1880
1889 1881 ASSERT(MUTEX_HELD(&cpu_lock));
1890 1882 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */
1891 1883
1892 1884 ncpus_online++;
1893 1885 cpu_set_state(cp);
1894 1886 cp->cpu_next_onln = cpu_active;
1895 1887 cp->cpu_prev_onln = cpu_active->cpu_prev_onln;
1896 1888 cpu_active->cpu_prev_onln->cpu_next_onln = cp;
1897 1889 cpu_active->cpu_prev_onln = cp;
1898 1890
1899 1891 if (pp->cp_cpulist) {
1900 1892 cp->cpu_next_part = pp->cp_cpulist;
1901 1893 cp->cpu_prev_part = pp->cp_cpulist->cpu_prev_part;
1902 1894 pp->cp_cpulist->cpu_prev_part->cpu_next_part = cp;
1903 1895 pp->cp_cpulist->cpu_prev_part = cp;
1904 1896 } else {
1905 1897 ASSERT(pp->cp_ncpus == 0);
1906 1898 pp->cp_cpulist = cp->cpu_next_part = cp->cpu_prev_part = cp;
1907 1899 }
1908 1900 pp->cp_ncpus++;
1909 1901 if (pp->cp_ncpus == 1) {
1910 1902 cp_numparts_nonempty++;
1911 1903 ASSERT(cp_numparts_nonempty != 0);
1912 1904 }
1913 1905
1914 1906 pg_cpu_active(cp);
1915 1907 lgrp_config(LGRP_CONFIG_CPU_ONLINE, (uintptr_t)cp, 0);
1916 1908
1917 1909 bzero(&cp->cpu_loadavg, sizeof (cp->cpu_loadavg));
1918 1910 }
1919 1911
1920 1912 /*
1921 1913 * Add a CPU to the list of active CPUs.
1922 1914 * This is called from machine-dependent layers when a new CPU is started.
1923 1915 */
1924 1916 void
1925 1917 cpu_add_active(cpu_t *cp)
1926 1918 {
1927 1919 pg_cpupart_in(cp, cp->cpu_part);
1928 1920
1929 1921 pause_cpus(NULL, NULL);
1930 1922 cpu_add_active_internal(cp);
1931 1923 start_cpus();
1932 1924
1933 1925 cpu_stats_kstat_create(cp);
1934 1926 cpu_create_intrstat(cp);
1935 1927 lgrp_kstat_create(cp);
1936 1928 cpu_state_change_notify(cp->cpu_id, CPU_INIT);
1937 1929 }
1938 1930
1939 1931
1940 1932 /*
1941 1933 * Remove a CPU from the list of active CPUs.
1942 1934 * This routine must not get any locks, because other CPUs are paused.
1943 1935 */
1944 1936 /* ARGSUSED */
1945 1937 static void
1946 1938 cpu_remove_active(cpu_t *cp)
1947 1939 {
1948 1940 cpupart_t *pp = cp->cpu_part;
1949 1941
1950 1942 ASSERT(MUTEX_HELD(&cpu_lock));
1951 1943 ASSERT(cp->cpu_next_onln != cp); /* not the last one */
1952 1944 ASSERT(cp->cpu_prev_onln != cp); /* not the last one */
1953 1945
1954 1946 pg_cpu_inactive(cp);
1955 1947
1956 1948 lgrp_config(LGRP_CONFIG_CPU_OFFLINE, (uintptr_t)cp, 0);
1957 1949
1958 1950 if (cp == clock_cpu_list)
1959 1951 clock_cpu_list = cp->cpu_next_onln;
1960 1952
1961 1953 cp->cpu_prev_onln->cpu_next_onln = cp->cpu_next_onln;
1962 1954 cp->cpu_next_onln->cpu_prev_onln = cp->cpu_prev_onln;
1963 1955 if (cpu_active == cp) {
1964 1956 cpu_active = cp->cpu_next_onln;
1965 1957 }
1966 1958 cp->cpu_next_onln = cp;
1967 1959 cp->cpu_prev_onln = cp;
1968 1960
1969 1961 cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part;
1970 1962 cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part;
1971 1963 if (pp->cp_cpulist == cp) {
1972 1964 pp->cp_cpulist = cp->cpu_next_part;
1973 1965 ASSERT(pp->cp_cpulist != cp);
1974 1966 }
1975 1967 cp->cpu_next_part = cp;
1976 1968 cp->cpu_prev_part = cp;
1977 1969 pp->cp_ncpus--;
1978 1970 if (pp->cp_ncpus == 0) {
1979 1971 cp_numparts_nonempty--;
1980 1972 ASSERT(cp_numparts_nonempty != 0);
1981 1973 }
1982 1974 }
1983 1975
1984 1976 /*
1985 1977 * Routine used to setup a newly inserted CPU in preparation for starting
1986 1978 * it running code.
1987 1979 */
1988 1980 int
1989 1981 cpu_configure(int cpuid)
1990 1982 {
1991 1983 int retval = 0;
1992 1984
1993 1985 ASSERT(MUTEX_HELD(&cpu_lock));
1994 1986
1995 1987 /*
1996 1988 * Some structures are statically allocated based upon
1997 1989 * the maximum number of cpus the system supports. Do not
1998 1990 * try to add anything beyond this limit.
1999 1991 */
2000 1992 if (cpuid < 0 || cpuid >= NCPU) {
2001 1993 return (EINVAL);
2002 1994 }
2003 1995
2004 1996 if ((cpu[cpuid] != NULL) && (cpu[cpuid]->cpu_flags != 0)) {
2005 1997 return (EALREADY);
2006 1998 }
2007 1999
2008 2000 if ((retval = mp_cpu_configure(cpuid)) != 0) {
2009 2001 return (retval);
2010 2002 }
2011 2003
2012 2004 cpu[cpuid]->cpu_flags = CPU_QUIESCED | CPU_OFFLINE | CPU_POWEROFF;
2013 2005 cpu_set_state(cpu[cpuid]);
2014 2006 retval = cpu_state_change_hooks(cpuid, CPU_CONFIG, CPU_UNCONFIG);
2015 2007 if (retval != 0)
2016 2008 (void) mp_cpu_unconfigure(cpuid);
2017 2009
2018 2010 return (retval);
2019 2011 }
2020 2012
2021 2013 /*
2022 2014 * Routine used to cleanup a CPU that has been powered off. This will
2023 2015 * destroy all per-cpu information related to this cpu.
2024 2016 */
2025 2017 int
2026 2018 cpu_unconfigure(int cpuid)
2027 2019 {
2028 2020 int error;
2029 2021
2030 2022 ASSERT(MUTEX_HELD(&cpu_lock));
2031 2023
2032 2024 if (cpu[cpuid] == NULL) {
2033 2025 return (ENODEV);
2034 2026 }
2035 2027
2036 2028 if (cpu[cpuid]->cpu_flags == 0) {
2037 2029 return (EALREADY);
2038 2030 }
2039 2031
2040 2032 if ((cpu[cpuid]->cpu_flags & CPU_POWEROFF) == 0) {
2041 2033 return (EBUSY);
2042 2034 }
2043 2035
2044 2036 if (cpu[cpuid]->cpu_props != NULL) {
2045 2037 (void) nvlist_free(cpu[cpuid]->cpu_props);
2046 2038 cpu[cpuid]->cpu_props = NULL;
2047 2039 }
2048 2040
2049 2041 error = cpu_state_change_hooks(cpuid, CPU_UNCONFIG, CPU_CONFIG);
2050 2042
2051 2043 if (error != 0)
2052 2044 return (error);
2053 2045
2054 2046 return (mp_cpu_unconfigure(cpuid));
2055 2047 }
2056 2048
2057 2049 /*
2058 2050 * Routines for registering and de-registering cpu_setup callback functions.
2059 2051 *
2060 2052 * Caller's context
2061 2053 * These routines must not be called from a driver's attach(9E) or
2062 2054 * detach(9E) entry point.
2063 2055 *
2064 2056 * NOTE: CPU callbacks should not block. They are called with cpu_lock held.
2065 2057 */
2066 2058
2067 2059 /*
2068 2060 * Ideally, these would be dynamically allocated and put into a linked
2069 2061 * list; however that is not feasible because the registration routine
2070 2062 * has to be available before the kmem allocator is working (in fact,
2071 2063 * it is called by the kmem allocator init code). In any case, there
2072 2064 * are quite a few extra entries for future users.
2073 2065 */
2074 2066 #define NCPU_SETUPS 20
2075 2067
2076 2068 struct cpu_setup {
2077 2069 cpu_setup_func_t *func;
2078 2070 void *arg;
2079 2071 } cpu_setups[NCPU_SETUPS];
2080 2072
2081 2073 void
2082 2074 register_cpu_setup_func(cpu_setup_func_t *func, void *arg)
2083 2075 {
2084 2076 int i;
2085 2077
2086 2078 ASSERT(MUTEX_HELD(&cpu_lock));
2087 2079
2088 2080 for (i = 0; i < NCPU_SETUPS; i++)
2089 2081 if (cpu_setups[i].func == NULL)
2090 2082 break;
2091 2083 if (i >= NCPU_SETUPS)
2092 2084 cmn_err(CE_PANIC, "Ran out of cpu_setup callback entries");
2093 2085
2094 2086 cpu_setups[i].func = func;
2095 2087 cpu_setups[i].arg = arg;
2096 2088 }
2097 2089
2098 2090 void
2099 2091 unregister_cpu_setup_func(cpu_setup_func_t *func, void *arg)
2100 2092 {
2101 2093 int i;
2102 2094
2103 2095 ASSERT(MUTEX_HELD(&cpu_lock));
2104 2096
2105 2097 for (i = 0; i < NCPU_SETUPS; i++)
2106 2098 if ((cpu_setups[i].func == func) &&
2107 2099 (cpu_setups[i].arg == arg))
2108 2100 break;
2109 2101 if (i >= NCPU_SETUPS)
2110 2102 cmn_err(CE_PANIC, "Could not find cpu_setup callback to "
2111 2103 "deregister");
2112 2104
2113 2105 cpu_setups[i].func = NULL;
2114 2106 cpu_setups[i].arg = 0;
2115 2107 }
2116 2108
2117 2109 /*
2118 2110 * Call any state change hooks for this CPU, ignore any errors.
2119 2111 */
2120 2112 void
2121 2113 cpu_state_change_notify(int id, cpu_setup_t what)
2122 2114 {
2123 2115 int i;
2124 2116
2125 2117 ASSERT(MUTEX_HELD(&cpu_lock));
2126 2118
2127 2119 for (i = 0; i < NCPU_SETUPS; i++) {
2128 2120 if (cpu_setups[i].func != NULL) {
2129 2121 cpu_setups[i].func(what, id, cpu_setups[i].arg);
2130 2122 }
2131 2123 }
2132 2124 }
2133 2125
2134 2126 /*
2135 2127 * Call any state change hooks for this CPU, undo it if error found.
2136 2128 */
2137 2129 static int
2138 2130 cpu_state_change_hooks(int id, cpu_setup_t what, cpu_setup_t undo)
2139 2131 {
2140 2132 int i;
2141 2133 int retval = 0;
2142 2134
2143 2135 ASSERT(MUTEX_HELD(&cpu_lock));
2144 2136
2145 2137 for (i = 0; i < NCPU_SETUPS; i++) {
2146 2138 if (cpu_setups[i].func != NULL) {
2147 2139 retval = cpu_setups[i].func(what, id,
2148 2140 cpu_setups[i].arg);
2149 2141 if (retval) {
2150 2142 for (i--; i >= 0; i--) {
2151 2143 if (cpu_setups[i].func != NULL)
2152 2144 cpu_setups[i].func(undo,
2153 2145 id, cpu_setups[i].arg);
2154 2146 }
2155 2147 break;
2156 2148 }
2157 2149 }
2158 2150 }
2159 2151 return (retval);
2160 2152 }
2161 2153
2162 2154 /*
2163 2155 * Export information about this CPU via the kstat mechanism.
2164 2156 */
2165 2157 static struct {
2166 2158 kstat_named_t ci_state;
2167 2159 kstat_named_t ci_state_begin;
2168 2160 kstat_named_t ci_cpu_type;
2169 2161 kstat_named_t ci_fpu_type;
2170 2162 kstat_named_t ci_clock_MHz;
2171 2163 kstat_named_t ci_chip_id;
2172 2164 kstat_named_t ci_implementation;
2173 2165 kstat_named_t ci_brandstr;
2174 2166 kstat_named_t ci_core_id;
2175 2167 kstat_named_t ci_curr_clock_Hz;
2176 2168 kstat_named_t ci_supp_freq_Hz;
2177 2169 kstat_named_t ci_pg_id;
2178 2170 #if defined(__sparcv9)
2179 2171 kstat_named_t ci_device_ID;
2180 2172 kstat_named_t ci_cpu_fru;
2181 2173 #endif
2182 2174 #if defined(__x86)
2183 2175 kstat_named_t ci_vendorstr;
2184 2176 kstat_named_t ci_family;
2185 2177 kstat_named_t ci_model;
2186 2178 kstat_named_t ci_step;
2187 2179 kstat_named_t ci_clogid;
2188 2180 kstat_named_t ci_pkg_core_id;
2189 2181 kstat_named_t ci_ncpuperchip;
2190 2182 kstat_named_t ci_ncoreperchip;
2191 2183 kstat_named_t ci_max_cstates;
2192 2184 kstat_named_t ci_curr_cstate;
2193 2185 kstat_named_t ci_cacheid;
2194 2186 kstat_named_t ci_sktstr;
2195 2187 #endif
2196 2188 } cpu_info_template = {
2197 2189 { "state", KSTAT_DATA_CHAR },
2198 2190 { "state_begin", KSTAT_DATA_LONG },
2199 2191 { "cpu_type", KSTAT_DATA_CHAR },
2200 2192 { "fpu_type", KSTAT_DATA_CHAR },
2201 2193 { "clock_MHz", KSTAT_DATA_LONG },
2202 2194 { "chip_id", KSTAT_DATA_LONG },
2203 2195 { "implementation", KSTAT_DATA_STRING },
2204 2196 { "brand", KSTAT_DATA_STRING },
2205 2197 { "core_id", KSTAT_DATA_LONG },
2206 2198 { "current_clock_Hz", KSTAT_DATA_UINT64 },
2207 2199 { "supported_frequencies_Hz", KSTAT_DATA_STRING },
2208 2200 { "pg_id", KSTAT_DATA_LONG },
2209 2201 #if defined(__sparcv9)
2210 2202 { "device_ID", KSTAT_DATA_UINT64 },
2211 2203 { "cpu_fru", KSTAT_DATA_STRING },
2212 2204 #endif
2213 2205 #if defined(__x86)
2214 2206 { "vendor_id", KSTAT_DATA_STRING },
2215 2207 { "family", KSTAT_DATA_INT32 },
2216 2208 { "model", KSTAT_DATA_INT32 },
2217 2209 { "stepping", KSTAT_DATA_INT32 },
2218 2210 { "clog_id", KSTAT_DATA_INT32 },
2219 2211 { "pkg_core_id", KSTAT_DATA_LONG },
2220 2212 { "ncpu_per_chip", KSTAT_DATA_INT32 },
2221 2213 { "ncore_per_chip", KSTAT_DATA_INT32 },
2222 2214 { "supported_max_cstates", KSTAT_DATA_INT32 },
2223 2215 { "current_cstate", KSTAT_DATA_INT32 },
2224 2216 { "cache_id", KSTAT_DATA_INT32 },
2225 2217 { "socket_type", KSTAT_DATA_STRING },
2226 2218 #endif
2227 2219 };
2228 2220
2229 2221 static kmutex_t cpu_info_template_lock;
2230 2222
2231 2223 static int
2232 2224 cpu_info_kstat_update(kstat_t *ksp, int rw)
2233 2225 {
2234 2226 cpu_t *cp = ksp->ks_private;
2235 2227 const char *pi_state;
2236 2228
2237 2229 if (rw == KSTAT_WRITE)
2238 2230 return (EACCES);
2239 2231
2240 2232 #if defined(__x86)
2241 2233 /* Is the cpu still initialising itself? */
2242 2234 if (cpuid_checkpass(cp, 1) == 0)
2243 2235 return (ENXIO);
2244 2236 #endif
2245 2237 switch (cp->cpu_type_info.pi_state) {
2246 2238 case P_ONLINE:
2247 2239 pi_state = PS_ONLINE;
2248 2240 break;
2249 2241 case P_POWEROFF:
2250 2242 pi_state = PS_POWEROFF;
2251 2243 break;
2252 2244 case P_NOINTR:
2253 2245 pi_state = PS_NOINTR;
2254 2246 break;
2255 2247 case P_FAULTED:
2256 2248 pi_state = PS_FAULTED;
2257 2249 break;
2258 2250 case P_SPARE:
2259 2251 pi_state = PS_SPARE;
2260 2252 break;
2261 2253 case P_OFFLINE:
2262 2254 pi_state = PS_OFFLINE;
2263 2255 break;
2264 2256 default:
2265 2257 pi_state = "unknown";
2266 2258 }
2267 2259 (void) strcpy(cpu_info_template.ci_state.value.c, pi_state);
2268 2260 cpu_info_template.ci_state_begin.value.l = cp->cpu_state_begin;
2269 2261 (void) strncpy(cpu_info_template.ci_cpu_type.value.c,
2270 2262 cp->cpu_type_info.pi_processor_type, 15);
2271 2263 (void) strncpy(cpu_info_template.ci_fpu_type.value.c,
2272 2264 cp->cpu_type_info.pi_fputypes, 15);
2273 2265 cpu_info_template.ci_clock_MHz.value.l = cp->cpu_type_info.pi_clock;
2274 2266 cpu_info_template.ci_chip_id.value.l =
2275 2267 pg_plat_hw_instance_id(cp, PGHW_CHIP);
2276 2268 kstat_named_setstr(&cpu_info_template.ci_implementation,
2277 2269 cp->cpu_idstr);
2278 2270 kstat_named_setstr(&cpu_info_template.ci_brandstr, cp->cpu_brandstr);
2279 2271 cpu_info_template.ci_core_id.value.l = pg_plat_get_core_id(cp);
2280 2272 cpu_info_template.ci_curr_clock_Hz.value.ui64 =
2281 2273 cp->cpu_curr_clock;
2282 2274 cpu_info_template.ci_pg_id.value.l =
2283 2275 cp->cpu_pg && cp->cpu_pg->cmt_lineage ?
2284 2276 cp->cpu_pg->cmt_lineage->pg_id : -1;
2285 2277 kstat_named_setstr(&cpu_info_template.ci_supp_freq_Hz,
2286 2278 cp->cpu_supp_freqs);
2287 2279 #if defined(__sparcv9)
2288 2280 cpu_info_template.ci_device_ID.value.ui64 =
2289 2281 cpunodes[cp->cpu_id].device_id;
2290 2282 kstat_named_setstr(&cpu_info_template.ci_cpu_fru, cpu_fru_fmri(cp));
2291 2283 #endif
2292 2284 #if defined(__x86)
2293 2285 kstat_named_setstr(&cpu_info_template.ci_vendorstr,
2294 2286 cpuid_getvendorstr(cp));
2295 2287 cpu_info_template.ci_family.value.l = cpuid_getfamily(cp);
2296 2288 cpu_info_template.ci_model.value.l = cpuid_getmodel(cp);
2297 2289 cpu_info_template.ci_step.value.l = cpuid_getstep(cp);
2298 2290 cpu_info_template.ci_clogid.value.l = cpuid_get_clogid(cp);
2299 2291 cpu_info_template.ci_ncpuperchip.value.l = cpuid_get_ncpu_per_chip(cp);
2300 2292 cpu_info_template.ci_ncoreperchip.value.l =
2301 2293 cpuid_get_ncore_per_chip(cp);
2302 2294 cpu_info_template.ci_pkg_core_id.value.l = cpuid_get_pkgcoreid(cp);
2303 2295 cpu_info_template.ci_max_cstates.value.l = cp->cpu_m.max_cstates;
2304 2296 cpu_info_template.ci_curr_cstate.value.l = cpu_idle_get_cpu_state(cp);
2305 2297 cpu_info_template.ci_cacheid.value.i32 = cpuid_get_cacheid(cp);
2306 2298 kstat_named_setstr(&cpu_info_template.ci_sktstr,
2307 2299 cpuid_getsocketstr(cp));
2308 2300 #endif
2309 2301
2310 2302 return (0);
2311 2303 }
2312 2304
2313 2305 static void
2314 2306 cpu_info_kstat_create(cpu_t *cp)
2315 2307 {
2316 2308 zoneid_t zoneid;
2317 2309
2318 2310 ASSERT(MUTEX_HELD(&cpu_lock));
2319 2311
2320 2312 if (pool_pset_enabled())
2321 2313 zoneid = GLOBAL_ZONEID;
2322 2314 else
2323 2315 zoneid = ALL_ZONES;
2324 2316 if ((cp->cpu_info_kstat = kstat_create_zone("cpu_info", cp->cpu_id,
2325 2317 NULL, "misc", KSTAT_TYPE_NAMED,
2326 2318 sizeof (cpu_info_template) / sizeof (kstat_named_t),
2327 2319 KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE, zoneid)) != NULL) {
2328 2320 cp->cpu_info_kstat->ks_data_size += 2 * CPU_IDSTRLEN;
2329 2321 #if defined(__sparcv9)
2330 2322 cp->cpu_info_kstat->ks_data_size +=
2331 2323 strlen(cpu_fru_fmri(cp)) + 1;
2332 2324 #endif
2333 2325 #if defined(__x86)
2334 2326 cp->cpu_info_kstat->ks_data_size += X86_VENDOR_STRLEN;
2335 2327 #endif
2336 2328 if (cp->cpu_supp_freqs != NULL)
2337 2329 cp->cpu_info_kstat->ks_data_size +=
2338 2330 strlen(cp->cpu_supp_freqs) + 1;
2339 2331 cp->cpu_info_kstat->ks_lock = &cpu_info_template_lock;
2340 2332 cp->cpu_info_kstat->ks_data = &cpu_info_template;
2341 2333 cp->cpu_info_kstat->ks_private = cp;
2342 2334 cp->cpu_info_kstat->ks_update = cpu_info_kstat_update;
2343 2335 kstat_install(cp->cpu_info_kstat);
2344 2336 }
2345 2337 }
2346 2338
2347 2339 static void
2348 2340 cpu_info_kstat_destroy(cpu_t *cp)
2349 2341 {
2350 2342 ASSERT(MUTEX_HELD(&cpu_lock));
2351 2343
2352 2344 kstat_delete(cp->cpu_info_kstat);
2353 2345 cp->cpu_info_kstat = NULL;
2354 2346 }
2355 2347
2356 2348 /*
2357 2349 * Create and install kstats for the boot CPU.
2358 2350 */
2359 2351 void
2360 2352 cpu_kstat_init(cpu_t *cp)
2361 2353 {
2362 2354 mutex_enter(&cpu_lock);
2363 2355 cpu_info_kstat_create(cp);
2364 2356 cpu_stats_kstat_create(cp);
2365 2357 cpu_create_intrstat(cp);
2366 2358 cpu_set_state(cp);
2367 2359 mutex_exit(&cpu_lock);
2368 2360 }
2369 2361
2370 2362 /*
2371 2363 * Make visible to the zone that subset of the cpu information that would be
2372 2364 * initialized when a cpu is configured (but still offline).
2373 2365 */
2374 2366 void
2375 2367 cpu_visibility_configure(cpu_t *cp, zone_t *zone)
2376 2368 {
2377 2369 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
2378 2370
2379 2371 ASSERT(MUTEX_HELD(&cpu_lock));
2380 2372 ASSERT(pool_pset_enabled());
2381 2373 ASSERT(cp != NULL);
2382 2374
2383 2375 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
2384 2376 zone->zone_ncpus++;
2385 2377 ASSERT(zone->zone_ncpus <= ncpus);
2386 2378 }
2387 2379 if (cp->cpu_info_kstat != NULL)
2388 2380 kstat_zone_add(cp->cpu_info_kstat, zoneid);
2389 2381 }
2390 2382
2391 2383 /*
2392 2384 * Make visible to the zone that subset of the cpu information that would be
2393 2385 * initialized when a previously configured cpu is onlined.
2394 2386 */
2395 2387 void
2396 2388 cpu_visibility_online(cpu_t *cp, zone_t *zone)
2397 2389 {
2398 2390 kstat_t *ksp;
2399 2391 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */
2400 2392 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
2401 2393 processorid_t cpun;
2402 2394
2403 2395 ASSERT(MUTEX_HELD(&cpu_lock));
2404 2396 ASSERT(pool_pset_enabled());
2405 2397 ASSERT(cp != NULL);
2406 2398 ASSERT(cpu_is_active(cp));
2407 2399
2408 2400 cpun = cp->cpu_id;
2409 2401 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
2410 2402 zone->zone_ncpus_online++;
2411 2403 ASSERT(zone->zone_ncpus_online <= ncpus_online);
2412 2404 }
2413 2405 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun);
2414 2406 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES))
2415 2407 != NULL) {
2416 2408 kstat_zone_add(ksp, zoneid);
2417 2409 kstat_rele(ksp);
2418 2410 }
2419 2411 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) {
2420 2412 kstat_zone_add(ksp, zoneid);
2421 2413 kstat_rele(ksp);
2422 2414 }
2423 2415 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) {
2424 2416 kstat_zone_add(ksp, zoneid);
2425 2417 kstat_rele(ksp);
2426 2418 }
2427 2419 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) !=
2428 2420 NULL) {
2429 2421 kstat_zone_add(ksp, zoneid);
2430 2422 kstat_rele(ksp);
2431 2423 }
2432 2424 }
2433 2425
2434 2426 /*
2435 2427 * Update relevant kstats such that cpu is now visible to processes
2436 2428 * executing in specified zone.
2437 2429 */
2438 2430 void
2439 2431 cpu_visibility_add(cpu_t *cp, zone_t *zone)
2440 2432 {
2441 2433 cpu_visibility_configure(cp, zone);
2442 2434 if (cpu_is_active(cp))
2443 2435 cpu_visibility_online(cp, zone);
2444 2436 }
2445 2437
2446 2438 /*
2447 2439 * Make invisible to the zone that subset of the cpu information that would be
2448 2440 * torn down when a previously offlined cpu is unconfigured.
2449 2441 */
2450 2442 void
2451 2443 cpu_visibility_unconfigure(cpu_t *cp, zone_t *zone)
2452 2444 {
2453 2445 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
2454 2446
2455 2447 ASSERT(MUTEX_HELD(&cpu_lock));
2456 2448 ASSERT(pool_pset_enabled());
2457 2449 ASSERT(cp != NULL);
2458 2450
2459 2451 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
2460 2452 ASSERT(zone->zone_ncpus != 0);
2461 2453 zone->zone_ncpus--;
2462 2454 }
2463 2455 if (cp->cpu_info_kstat)
2464 2456 kstat_zone_remove(cp->cpu_info_kstat, zoneid);
2465 2457 }
2466 2458
2467 2459 /*
2468 2460 * Make invisible to the zone that subset of the cpu information that would be
2469 2461 * torn down when a cpu is offlined (but still configured).
2470 2462 */
2471 2463 void
2472 2464 cpu_visibility_offline(cpu_t *cp, zone_t *zone)
2473 2465 {
2474 2466 kstat_t *ksp;
2475 2467 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */
2476 2468 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
2477 2469 processorid_t cpun;
2478 2470
2479 2471 ASSERT(MUTEX_HELD(&cpu_lock));
2480 2472 ASSERT(pool_pset_enabled());
2481 2473 ASSERT(cp != NULL);
2482 2474 ASSERT(cpu_is_active(cp));
2483 2475
2484 2476 cpun = cp->cpu_id;
2485 2477 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
2486 2478 ASSERT(zone->zone_ncpus_online != 0);
2487 2479 zone->zone_ncpus_online--;
2488 2480 }
2489 2481
2490 2482 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) !=
2491 2483 NULL) {
2492 2484 kstat_zone_remove(ksp, zoneid);
2493 2485 kstat_rele(ksp);
2494 2486 }
2495 2487 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) {
2496 2488 kstat_zone_remove(ksp, zoneid);
2497 2489 kstat_rele(ksp);
2498 2490 }
2499 2491 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) {
2500 2492 kstat_zone_remove(ksp, zoneid);
2501 2493 kstat_rele(ksp);
2502 2494 }
2503 2495 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun);
2504 2496 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES))
2505 2497 != NULL) {
2506 2498 kstat_zone_remove(ksp, zoneid);
2507 2499 kstat_rele(ksp);
2508 2500 }
2509 2501 }
2510 2502
2511 2503 /*
2512 2504 * Update relevant kstats such that cpu is no longer visible to processes
2513 2505 * executing in specified zone.
2514 2506 */
2515 2507 void
2516 2508 cpu_visibility_remove(cpu_t *cp, zone_t *zone)
2517 2509 {
2518 2510 if (cpu_is_active(cp))
2519 2511 cpu_visibility_offline(cp, zone);
2520 2512 cpu_visibility_unconfigure(cp, zone);
2521 2513 }
2522 2514
2523 2515 /*
2524 2516 * Bind a thread to a CPU as requested.
2525 2517 */
2526 2518 int
2527 2519 cpu_bind_thread(kthread_id_t tp, processorid_t bind, processorid_t *obind,
2528 2520 int *error)
2529 2521 {
2530 2522 processorid_t binding;
2531 2523 cpu_t *cp = NULL;
2532 2524
2533 2525 ASSERT(MUTEX_HELD(&cpu_lock));
2534 2526 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
2535 2527
2536 2528 thread_lock(tp);
2537 2529
2538 2530 /*
2539 2531 * Record old binding, but change the obind, which was initialized
2540 2532 * to PBIND_NONE, only if this thread has a binding. This avoids
2541 2533 * reporting PBIND_NONE for a process when some LWPs are bound.
2542 2534 */
2543 2535 binding = tp->t_bind_cpu;
2544 2536 if (binding != PBIND_NONE)
2545 2537 *obind = binding; /* record old binding */
2546 2538
2547 2539 switch (bind) {
2548 2540 case PBIND_QUERY:
2549 2541 /* Just return the old binding */
2550 2542 thread_unlock(tp);
2551 2543 return (0);
2552 2544
2553 2545 case PBIND_QUERY_TYPE:
2554 2546 /* Return the binding type */
2555 2547 *obind = TB_CPU_IS_SOFT(tp) ? PBIND_SOFT : PBIND_HARD;
2556 2548 thread_unlock(tp);
2557 2549 return (0);
2558 2550
2559 2551 case PBIND_SOFT:
2560 2552 /*
2561 2553 * Set soft binding for this thread and return the actual
2562 2554 * binding
2563 2555 */
2564 2556 TB_CPU_SOFT_SET(tp);
2565 2557 thread_unlock(tp);
2566 2558 return (0);
2567 2559
2568 2560 case PBIND_HARD:
2569 2561 /*
2570 2562 * Set hard binding for this thread and return the actual
2571 2563 * binding
2572 2564 */
2573 2565 TB_CPU_HARD_SET(tp);
2574 2566 thread_unlock(tp);
2575 2567 return (0);
2576 2568
2577 2569 default:
2578 2570 break;
2579 2571 }
2580 2572
2581 2573 /*
2582 2574 * If this thread/LWP cannot be bound because of permission
2583 2575 * problems, just note that and return success so that the
2584 2576 * other threads/LWPs will be bound. This is the way
2585 2577 * processor_bind() is defined to work.
2586 2578 *
2587 2579 * Binding will get EPERM if the thread is of system class
2588 2580 * or hasprocperm() fails.
2589 2581 */
2590 2582 if (tp->t_cid == 0 || !hasprocperm(tp->t_cred, CRED())) {
2591 2583 *error = EPERM;
2592 2584 thread_unlock(tp);
2593 2585 return (0);
2594 2586 }
2595 2587
2596 2588 binding = bind;
2597 2589 if (binding != PBIND_NONE) {
2598 2590 cp = cpu_get((processorid_t)binding);
2599 2591 /*
2600 2592 * Make sure binding is valid and is in right partition.
2601 2593 */
2602 2594 if (cp == NULL || tp->t_cpupart != cp->cpu_part) {
2603 2595 *error = EINVAL;
2604 2596 thread_unlock(tp);
2605 2597 return (0);
2606 2598 }
2607 2599 }
2608 2600 tp->t_bind_cpu = binding; /* set new binding */
2609 2601
2610 2602 /*
2611 2603 * If there is no system-set reason for affinity, set
2612 2604 * the t_bound_cpu field to reflect the binding.
2613 2605 */
2614 2606 if (tp->t_affinitycnt == 0) {
2615 2607 if (binding == PBIND_NONE) {
2616 2608 /*
2617 2609 * We may need to adjust disp_max_unbound_pri
2618 2610 * since we're becoming unbound.
2619 2611 */
2620 2612 disp_adjust_unbound_pri(tp);
2621 2613
2622 2614 tp->t_bound_cpu = NULL; /* set new binding */
2623 2615
2624 2616 /*
2625 2617 * Move thread to lgroup with strongest affinity
2626 2618 * after unbinding
2627 2619 */
2628 2620 if (tp->t_lgrp_affinity)
2629 2621 lgrp_move_thread(tp,
2630 2622 lgrp_choose(tp, tp->t_cpupart), 1);
2631 2623
2632 2624 if (tp->t_state == TS_ONPROC &&
2633 2625 tp->t_cpu->cpu_part != tp->t_cpupart)
2634 2626 cpu_surrender(tp);
2635 2627 } else {
2636 2628 lpl_t *lpl;
2637 2629
2638 2630 tp->t_bound_cpu = cp;
2639 2631 ASSERT(cp->cpu_lpl != NULL);
2640 2632
2641 2633 /*
2642 2634 * Set home to lgroup with most affinity containing CPU
2643 2635 * that thread is being bound or minimum bounding
2644 2636 * lgroup if no affinities set
2645 2637 */
2646 2638 if (tp->t_lgrp_affinity)
2647 2639 lpl = lgrp_affinity_best(tp, tp->t_cpupart,
2648 2640 LGRP_NONE, B_FALSE);
2649 2641 else
2650 2642 lpl = cp->cpu_lpl;
2651 2643
2652 2644 if (tp->t_lpl != lpl) {
2653 2645 /* can't grab cpu_lock */
2654 2646 lgrp_move_thread(tp, lpl, 1);
2655 2647 }
2656 2648
2657 2649 /*
2658 2650 * Make the thread switch to the bound CPU.
2659 2651 * If the thread is runnable, we need to
2660 2652 * requeue it even if t_cpu is already set
2661 2653 * to the right CPU, since it may be on a
2662 2654 * kpreempt queue and need to move to a local
2663 2655 * queue. We could check t_disp_queue to
2664 2656 * avoid unnecessary overhead if it's already
2665 2657 * on the right queue, but since this isn't
2666 2658 * a performance-critical operation it doesn't
2667 2659 * seem worth the extra code and complexity.
2668 2660 *
2669 2661 * If the thread is weakbound to the cpu then it will
2670 2662 * resist the new binding request until the weak
2671 2663 * binding drops. The cpu_surrender or requeueing
2672 2664 * below could be skipped in such cases (since it
2673 2665 * will have no effect), but that would require
2674 2666 * thread_allowmigrate to acquire thread_lock so
↓ open down ↓ |
2325 lines elided |
↑ open up ↑ |
2675 2667 * we'll take the very occasional hit here instead.
2676 2668 */
2677 2669 if (tp->t_state == TS_ONPROC) {
2678 2670 cpu_surrender(tp);
2679 2671 } else if (tp->t_state == TS_RUN) {
2680 2672 cpu_t *ocp = tp->t_cpu;
2681 2673
2682 2674 (void) dispdeq(tp);
2683 2675 setbackdq(tp);
2684 2676 /*
2685 - * Either on the bound CPU's disp queue now,
2686 - * or swapped out or on the swap queue.
2677 + * On the bound CPU's disp queue now.
2687 2678 */
2688 2679 ASSERT(tp->t_disp_queue == cp->cpu_disp ||
2689 - tp->t_weakbound_cpu == ocp ||
2690 - (tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ))
2691 - != TS_LOAD);
2680 + tp->t_weakbound_cpu == ocp);
2692 2681 }
2693 2682 }
2694 2683 }
2695 2684
2696 2685 /*
2697 2686 * Our binding has changed; set TP_CHANGEBIND.
2698 2687 */
2699 2688 tp->t_proc_flag |= TP_CHANGEBIND;
2700 2689 aston(tp);
2701 2690
2702 2691 thread_unlock(tp);
2703 2692
2704 2693 return (0);
2705 2694 }
2706 2695
2707 2696 #if CPUSET_WORDS > 1
2708 2697
2709 2698 /*
2710 2699 * Functions for implementing cpuset operations when a cpuset is more
2711 2700 * than one word. On platforms where a cpuset is a single word these
2712 2701 * are implemented as macros in cpuvar.h.
2713 2702 */
2714 2703
2715 2704 void
2716 2705 cpuset_all(cpuset_t *s)
2717 2706 {
2718 2707 int i;
2719 2708
2720 2709 for (i = 0; i < CPUSET_WORDS; i++)
2721 2710 s->cpub[i] = ~0UL;
2722 2711 }
2723 2712
2724 2713 void
2725 2714 cpuset_all_but(cpuset_t *s, uint_t cpu)
2726 2715 {
2727 2716 cpuset_all(s);
2728 2717 CPUSET_DEL(*s, cpu);
2729 2718 }
2730 2719
2731 2720 void
2732 2721 cpuset_only(cpuset_t *s, uint_t cpu)
2733 2722 {
2734 2723 CPUSET_ZERO(*s);
2735 2724 CPUSET_ADD(*s, cpu);
2736 2725 }
2737 2726
2738 2727 int
2739 2728 cpuset_isnull(cpuset_t *s)
2740 2729 {
2741 2730 int i;
2742 2731
2743 2732 for (i = 0; i < CPUSET_WORDS; i++)
2744 2733 if (s->cpub[i] != 0)
2745 2734 return (0);
2746 2735 return (1);
2747 2736 }
2748 2737
2749 2738 int
2750 2739 cpuset_cmp(cpuset_t *s1, cpuset_t *s2)
2751 2740 {
2752 2741 int i;
2753 2742
2754 2743 for (i = 0; i < CPUSET_WORDS; i++)
2755 2744 if (s1->cpub[i] != s2->cpub[i])
2756 2745 return (0);
2757 2746 return (1);
2758 2747 }
2759 2748
2760 2749 uint_t
2761 2750 cpuset_find(cpuset_t *s)
2762 2751 {
2763 2752
2764 2753 uint_t i;
2765 2754 uint_t cpu = (uint_t)-1;
2766 2755
2767 2756 /*
2768 2757 * Find a cpu in the cpuset
2769 2758 */
2770 2759 for (i = 0; i < CPUSET_WORDS; i++) {
2771 2760 cpu = (uint_t)(lowbit(s->cpub[i]) - 1);
2772 2761 if (cpu != (uint_t)-1) {
2773 2762 cpu += i * BT_NBIPUL;
2774 2763 break;
2775 2764 }
2776 2765 }
2777 2766 return (cpu);
2778 2767 }
2779 2768
2780 2769 void
2781 2770 cpuset_bounds(cpuset_t *s, uint_t *smallestid, uint_t *largestid)
2782 2771 {
2783 2772 int i, j;
2784 2773 uint_t bit;
2785 2774
2786 2775 /*
2787 2776 * First, find the smallest cpu id in the set.
2788 2777 */
2789 2778 for (i = 0; i < CPUSET_WORDS; i++) {
2790 2779 if (s->cpub[i] != 0) {
2791 2780 bit = (uint_t)(lowbit(s->cpub[i]) - 1);
2792 2781 ASSERT(bit != (uint_t)-1);
2793 2782 *smallestid = bit + (i * BT_NBIPUL);
2794 2783
2795 2784 /*
2796 2785 * Now find the largest cpu id in
2797 2786 * the set and return immediately.
2798 2787 * Done in an inner loop to avoid
2799 2788 * having to break out of the first
2800 2789 * loop.
2801 2790 */
2802 2791 for (j = CPUSET_WORDS - 1; j >= i; j--) {
2803 2792 if (s->cpub[j] != 0) {
2804 2793 bit = (uint_t)(highbit(s->cpub[j]) - 1);
2805 2794 ASSERT(bit != (uint_t)-1);
2806 2795 *largestid = bit + (j * BT_NBIPUL);
2807 2796 ASSERT(*largestid >= *smallestid);
2808 2797 return;
2809 2798 }
2810 2799 }
2811 2800
2812 2801 /*
2813 2802 * If this code is reached, a
2814 2803 * smallestid was found, but not a
2815 2804 * largestid. The cpuset must have
2816 2805 * been changed during the course
2817 2806 * of this function call.
2818 2807 */
2819 2808 ASSERT(0);
2820 2809 }
2821 2810 }
2822 2811 *smallestid = *largestid = CPUSET_NOTINSET;
2823 2812 }
2824 2813
2825 2814 #endif /* CPUSET_WORDS */
2826 2815
2827 2816 /*
2828 2817 * Unbind threads bound to specified CPU.
2829 2818 *
2830 2819 * If `unbind_all_threads' is true, unbind all user threads bound to a given
2831 2820 * CPU. Otherwise unbind all soft-bound user threads.
2832 2821 */
2833 2822 int
2834 2823 cpu_unbind(processorid_t cpu, boolean_t unbind_all_threads)
2835 2824 {
2836 2825 processorid_t obind;
2837 2826 kthread_t *tp;
2838 2827 int ret = 0;
2839 2828 proc_t *pp;
2840 2829 int err, berr = 0;
2841 2830
2842 2831 ASSERT(MUTEX_HELD(&cpu_lock));
2843 2832
2844 2833 mutex_enter(&pidlock);
2845 2834 for (pp = practive; pp != NULL; pp = pp->p_next) {
2846 2835 mutex_enter(&pp->p_lock);
2847 2836 tp = pp->p_tlist;
2848 2837 /*
2849 2838 * Skip zombies, kernel processes, and processes in
2850 2839 * other zones, if called from a non-global zone.
2851 2840 */
2852 2841 if (tp == NULL || (pp->p_flag & SSYS) ||
2853 2842 !HASZONEACCESS(curproc, pp->p_zone->zone_id)) {
2854 2843 mutex_exit(&pp->p_lock);
2855 2844 continue;
2856 2845 }
2857 2846 do {
2858 2847 if (tp->t_bind_cpu != cpu)
2859 2848 continue;
2860 2849 /*
2861 2850 * Skip threads with hard binding when
2862 2851 * `unbind_all_threads' is not specified.
2863 2852 */
2864 2853 if (!unbind_all_threads && TB_CPU_IS_HARD(tp))
2865 2854 continue;
2866 2855 err = cpu_bind_thread(tp, PBIND_NONE, &obind, &berr);
2867 2856 if (ret == 0)
2868 2857 ret = err;
2869 2858 } while ((tp = tp->t_forw) != pp->p_tlist);
2870 2859 mutex_exit(&pp->p_lock);
2871 2860 }
2872 2861 mutex_exit(&pidlock);
2873 2862 if (ret == 0)
2874 2863 ret = berr;
2875 2864 return (ret);
2876 2865 }
2877 2866
2878 2867
2879 2868 /*
2880 2869 * Destroy all remaining bound threads on a cpu.
2881 2870 */
2882 2871 void
2883 2872 cpu_destroy_bound_threads(cpu_t *cp)
2884 2873 {
2885 2874 extern id_t syscid;
2886 2875 register kthread_id_t t, tlist, tnext;
2887 2876
2888 2877 /*
2889 2878 * Destroy all remaining bound threads on the cpu. This
2890 2879 * should include both the interrupt threads and the idle thread.
2891 2880 * This requires some care, since we need to traverse the
2892 2881 * thread list with the pidlock mutex locked, but thread_free
2893 2882 * also locks the pidlock mutex. So, we collect the threads
2894 2883 * we're going to reap in a list headed by "tlist", then we
2895 2884 * unlock the pidlock mutex and traverse the tlist list,
2896 2885 * doing thread_free's on the thread's. Simple, n'est pas?
2897 2886 * Also, this depends on thread_free not mucking with the
2898 2887 * t_next and t_prev links of the thread.
2899 2888 */
2900 2889
2901 2890 if ((t = curthread) != NULL) {
2902 2891
2903 2892 tlist = NULL;
2904 2893 mutex_enter(&pidlock);
2905 2894 do {
2906 2895 tnext = t->t_next;
2907 2896 if (t->t_bound_cpu == cp) {
2908 2897
2909 2898 /*
2910 2899 * We've found a bound thread, carefully unlink
2911 2900 * it out of the thread list, and add it to
2912 2901 * our "tlist". We "know" we don't have to
2913 2902 * worry about unlinking curthread (the thread
2914 2903 * that is executing this code).
2915 2904 */
2916 2905 t->t_next->t_prev = t->t_prev;
2917 2906 t->t_prev->t_next = t->t_next;
2918 2907 t->t_next = tlist;
2919 2908 tlist = t;
2920 2909 ASSERT(t->t_cid == syscid);
2921 2910 /* wake up anyone blocked in thread_join */
2922 2911 cv_broadcast(&t->t_joincv);
2923 2912 /*
2924 2913 * t_lwp set by interrupt threads and not
2925 2914 * cleared.
2926 2915 */
2927 2916 t->t_lwp = NULL;
2928 2917 /*
2929 2918 * Pause and idle threads always have
2930 2919 * t_state set to TS_ONPROC.
2931 2920 */
2932 2921 t->t_state = TS_FREE;
2933 2922 t->t_prev = NULL; /* Just in case */
2934 2923 }
2935 2924
2936 2925 } while ((t = tnext) != curthread);
2937 2926
2938 2927 mutex_exit(&pidlock);
2939 2928
2940 2929 mutex_sync();
2941 2930 for (t = tlist; t != NULL; t = tnext) {
2942 2931 tnext = t->t_next;
2943 2932 thread_free(t);
2944 2933 }
2945 2934 }
2946 2935 }
2947 2936
2948 2937 /*
2949 2938 * Update the cpu_supp_freqs of this cpu. This information is returned
2950 2939 * as part of cpu_info kstats. If the cpu_info_kstat exists already, then
2951 2940 * maintain the kstat data size.
2952 2941 */
2953 2942 void
2954 2943 cpu_set_supp_freqs(cpu_t *cp, const char *freqs)
2955 2944 {
2956 2945 char clkstr[sizeof ("18446744073709551615") + 1]; /* ui64 MAX */
2957 2946 const char *lfreqs = clkstr;
2958 2947 boolean_t kstat_exists = B_FALSE;
2959 2948 kstat_t *ksp;
2960 2949 size_t len;
2961 2950
2962 2951 /*
2963 2952 * A NULL pointer means we only support one speed.
2964 2953 */
2965 2954 if (freqs == NULL)
2966 2955 (void) snprintf(clkstr, sizeof (clkstr), "%"PRIu64,
2967 2956 cp->cpu_curr_clock);
2968 2957 else
2969 2958 lfreqs = freqs;
2970 2959
2971 2960 /*
2972 2961 * Make sure the frequency doesn't change while a snapshot is
2973 2962 * going on. Of course, we only need to worry about this if
2974 2963 * the kstat exists.
2975 2964 */
2976 2965 if ((ksp = cp->cpu_info_kstat) != NULL) {
2977 2966 mutex_enter(ksp->ks_lock);
2978 2967 kstat_exists = B_TRUE;
2979 2968 }
2980 2969
2981 2970 /*
2982 2971 * Free any previously allocated string and if the kstat
2983 2972 * already exists, then update its data size.
2984 2973 */
2985 2974 if (cp->cpu_supp_freqs != NULL) {
2986 2975 len = strlen(cp->cpu_supp_freqs) + 1;
2987 2976 kmem_free(cp->cpu_supp_freqs, len);
2988 2977 if (kstat_exists)
2989 2978 ksp->ks_data_size -= len;
2990 2979 }
2991 2980
2992 2981 /*
2993 2982 * Allocate the new string and set the pointer.
2994 2983 */
2995 2984 len = strlen(lfreqs) + 1;
2996 2985 cp->cpu_supp_freqs = kmem_alloc(len, KM_SLEEP);
2997 2986 (void) strcpy(cp->cpu_supp_freqs, lfreqs);
2998 2987
2999 2988 /*
3000 2989 * If the kstat already exists then update the data size and
3001 2990 * free the lock.
3002 2991 */
3003 2992 if (kstat_exists) {
3004 2993 ksp->ks_data_size += len;
3005 2994 mutex_exit(ksp->ks_lock);
3006 2995 }
3007 2996 }
3008 2997
3009 2998 /*
3010 2999 * Indicate the current CPU's clock freqency (in Hz).
3011 3000 * The calling context must be such that CPU references are safe.
3012 3001 */
3013 3002 void
3014 3003 cpu_set_curr_clock(uint64_t new_clk)
3015 3004 {
3016 3005 uint64_t old_clk;
3017 3006
3018 3007 old_clk = CPU->cpu_curr_clock;
3019 3008 CPU->cpu_curr_clock = new_clk;
3020 3009
3021 3010 /*
3022 3011 * The cpu-change-speed DTrace probe exports the frequency in Hz
3023 3012 */
3024 3013 DTRACE_PROBE3(cpu__change__speed, processorid_t, CPU->cpu_id,
3025 3014 uint64_t, old_clk, uint64_t, new_clk);
3026 3015 }
3027 3016
3028 3017 /*
3029 3018 * processor_info(2) and p_online(2) status support functions
3030 3019 * The constants returned by the cpu_get_state() and cpu_get_state_str() are
3031 3020 * for use in communicating processor state information to userland. Kernel
3032 3021 * subsystems should only be using the cpu_flags value directly. Subsystems
3033 3022 * modifying cpu_flags should record the state change via a call to the
3034 3023 * cpu_set_state().
3035 3024 */
3036 3025
3037 3026 /*
3038 3027 * Update the pi_state of this CPU. This function provides the CPU status for
3039 3028 * the information returned by processor_info(2).
3040 3029 */
3041 3030 void
3042 3031 cpu_set_state(cpu_t *cpu)
3043 3032 {
3044 3033 ASSERT(MUTEX_HELD(&cpu_lock));
3045 3034 cpu->cpu_type_info.pi_state = cpu_get_state(cpu);
3046 3035 cpu->cpu_state_begin = gethrestime_sec();
3047 3036 pool_cpu_mod = gethrtime();
3048 3037 }
3049 3038
3050 3039 /*
3051 3040 * Return offline/online/other status for the indicated CPU. Use only for
3052 3041 * communication with user applications; cpu_flags provides the in-kernel
3053 3042 * interface.
3054 3043 */
3055 3044 int
3056 3045 cpu_get_state(cpu_t *cpu)
3057 3046 {
3058 3047 ASSERT(MUTEX_HELD(&cpu_lock));
3059 3048 if (cpu->cpu_flags & CPU_POWEROFF)
3060 3049 return (P_POWEROFF);
3061 3050 else if (cpu->cpu_flags & CPU_FAULTED)
3062 3051 return (P_FAULTED);
3063 3052 else if (cpu->cpu_flags & CPU_SPARE)
3064 3053 return (P_SPARE);
3065 3054 else if ((cpu->cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY)
3066 3055 return (P_OFFLINE);
3067 3056 else if (cpu->cpu_flags & CPU_ENABLE)
3068 3057 return (P_ONLINE);
3069 3058 else
3070 3059 return (P_NOINTR);
3071 3060 }
3072 3061
3073 3062 /*
3074 3063 * Return processor_info(2) state as a string.
3075 3064 */
3076 3065 const char *
3077 3066 cpu_get_state_str(cpu_t *cpu)
3078 3067 {
3079 3068 const char *string;
3080 3069
3081 3070 switch (cpu_get_state(cpu)) {
3082 3071 case P_ONLINE:
3083 3072 string = PS_ONLINE;
3084 3073 break;
3085 3074 case P_POWEROFF:
3086 3075 string = PS_POWEROFF;
3087 3076 break;
3088 3077 case P_NOINTR:
3089 3078 string = PS_NOINTR;
3090 3079 break;
3091 3080 case P_SPARE:
3092 3081 string = PS_SPARE;
3093 3082 break;
3094 3083 case P_FAULTED:
3095 3084 string = PS_FAULTED;
3096 3085 break;
3097 3086 case P_OFFLINE:
3098 3087 string = PS_OFFLINE;
3099 3088 break;
3100 3089 default:
3101 3090 string = "unknown";
3102 3091 break;
3103 3092 }
3104 3093 return (string);
3105 3094 }
3106 3095
3107 3096 /*
3108 3097 * Export this CPU's statistics (cpu_stat_t and cpu_stats_t) as raw and named
3109 3098 * kstats, respectively. This is done when a CPU is initialized or placed
3110 3099 * online via p_online(2).
3111 3100 */
3112 3101 static void
3113 3102 cpu_stats_kstat_create(cpu_t *cp)
3114 3103 {
3115 3104 int instance = cp->cpu_id;
3116 3105 char *module = "cpu";
3117 3106 char *class = "misc";
3118 3107 kstat_t *ksp;
3119 3108 zoneid_t zoneid;
3120 3109
3121 3110 ASSERT(MUTEX_HELD(&cpu_lock));
3122 3111
3123 3112 if (pool_pset_enabled())
3124 3113 zoneid = GLOBAL_ZONEID;
3125 3114 else
3126 3115 zoneid = ALL_ZONES;
3127 3116 /*
3128 3117 * Create named kstats
3129 3118 */
3130 3119 #define CPU_STATS_KS_CREATE(name, tsize, update_func) \
3131 3120 ksp = kstat_create_zone(module, instance, (name), class, \
3132 3121 KSTAT_TYPE_NAMED, (tsize) / sizeof (kstat_named_t), 0, \
3133 3122 zoneid); \
3134 3123 if (ksp != NULL) { \
3135 3124 ksp->ks_private = cp; \
3136 3125 ksp->ks_update = (update_func); \
3137 3126 kstat_install(ksp); \
3138 3127 } else \
3139 3128 cmn_err(CE_WARN, "cpu: unable to create %s:%d:%s kstat", \
3140 3129 module, instance, (name));
3141 3130
3142 3131 CPU_STATS_KS_CREATE("sys", sizeof (cpu_sys_stats_ks_data_template),
3143 3132 cpu_sys_stats_ks_update);
3144 3133 CPU_STATS_KS_CREATE("vm", sizeof (cpu_vm_stats_ks_data_template),
3145 3134 cpu_vm_stats_ks_update);
3146 3135
3147 3136 /*
3148 3137 * Export the familiar cpu_stat_t KSTAT_TYPE_RAW kstat.
3149 3138 */
3150 3139 ksp = kstat_create_zone("cpu_stat", cp->cpu_id, NULL,
3151 3140 "misc", KSTAT_TYPE_RAW, sizeof (cpu_stat_t), 0, zoneid);
3152 3141 if (ksp != NULL) {
3153 3142 ksp->ks_update = cpu_stat_ks_update;
3154 3143 ksp->ks_private = cp;
3155 3144 kstat_install(ksp);
3156 3145 }
3157 3146 }
3158 3147
3159 3148 static void
3160 3149 cpu_stats_kstat_destroy(cpu_t *cp)
3161 3150 {
3162 3151 char ks_name[KSTAT_STRLEN];
3163 3152
3164 3153 (void) sprintf(ks_name, "cpu_stat%d", cp->cpu_id);
3165 3154 kstat_delete_byname("cpu_stat", cp->cpu_id, ks_name);
3166 3155
3167 3156 kstat_delete_byname("cpu", cp->cpu_id, "sys");
3168 3157 kstat_delete_byname("cpu", cp->cpu_id, "vm");
3169 3158 }
3170 3159
3171 3160 static int
3172 3161 cpu_sys_stats_ks_update(kstat_t *ksp, int rw)
3173 3162 {
3174 3163 cpu_t *cp = (cpu_t *)ksp->ks_private;
3175 3164 struct cpu_sys_stats_ks_data *csskd;
3176 3165 cpu_sys_stats_t *css;
3177 3166 hrtime_t msnsecs[NCMSTATES];
3178 3167 int i;
3179 3168
3180 3169 if (rw == KSTAT_WRITE)
3181 3170 return (EACCES);
3182 3171
3183 3172 csskd = ksp->ks_data;
3184 3173 css = &cp->cpu_stats.sys;
3185 3174
3186 3175 /*
3187 3176 * Read CPU mstate, but compare with the last values we
3188 3177 * received to make sure that the returned kstats never
3189 3178 * decrease.
3190 3179 */
3191 3180
3192 3181 get_cpu_mstate(cp, msnsecs);
3193 3182 if (csskd->cpu_nsec_idle.value.ui64 > msnsecs[CMS_IDLE])
3194 3183 msnsecs[CMS_IDLE] = csskd->cpu_nsec_idle.value.ui64;
3195 3184 if (csskd->cpu_nsec_user.value.ui64 > msnsecs[CMS_USER])
3196 3185 msnsecs[CMS_USER] = csskd->cpu_nsec_user.value.ui64;
3197 3186 if (csskd->cpu_nsec_kernel.value.ui64 > msnsecs[CMS_SYSTEM])
3198 3187 msnsecs[CMS_SYSTEM] = csskd->cpu_nsec_kernel.value.ui64;
3199 3188
3200 3189 bcopy(&cpu_sys_stats_ks_data_template, ksp->ks_data,
3201 3190 sizeof (cpu_sys_stats_ks_data_template));
3202 3191
3203 3192 csskd->cpu_ticks_wait.value.ui64 = 0;
3204 3193 csskd->wait_ticks_io.value.ui64 = 0;
3205 3194
3206 3195 csskd->cpu_nsec_idle.value.ui64 = msnsecs[CMS_IDLE];
3207 3196 csskd->cpu_nsec_user.value.ui64 = msnsecs[CMS_USER];
3208 3197 csskd->cpu_nsec_kernel.value.ui64 = msnsecs[CMS_SYSTEM];
3209 3198 csskd->cpu_ticks_idle.value.ui64 =
3210 3199 NSEC_TO_TICK(csskd->cpu_nsec_idle.value.ui64);
3211 3200 csskd->cpu_ticks_user.value.ui64 =
3212 3201 NSEC_TO_TICK(csskd->cpu_nsec_user.value.ui64);
3213 3202 csskd->cpu_ticks_kernel.value.ui64 =
3214 3203 NSEC_TO_TICK(csskd->cpu_nsec_kernel.value.ui64);
3215 3204 csskd->cpu_nsec_dtrace.value.ui64 = cp->cpu_dtrace_nsec;
3216 3205 csskd->dtrace_probes.value.ui64 = cp->cpu_dtrace_probes;
3217 3206 csskd->cpu_nsec_intr.value.ui64 = cp->cpu_intrlast;
3218 3207 csskd->cpu_load_intr.value.ui64 = cp->cpu_intrload;
3219 3208 csskd->bread.value.ui64 = css->bread;
3220 3209 csskd->bwrite.value.ui64 = css->bwrite;
3221 3210 csskd->lread.value.ui64 = css->lread;
3222 3211 csskd->lwrite.value.ui64 = css->lwrite;
3223 3212 csskd->phread.value.ui64 = css->phread;
3224 3213 csskd->phwrite.value.ui64 = css->phwrite;
3225 3214 csskd->pswitch.value.ui64 = css->pswitch;
3226 3215 csskd->trap.value.ui64 = css->trap;
3227 3216 csskd->intr.value.ui64 = 0;
3228 3217 for (i = 0; i < PIL_MAX; i++)
3229 3218 csskd->intr.value.ui64 += css->intr[i];
3230 3219 csskd->syscall.value.ui64 = css->syscall;
3231 3220 csskd->sysread.value.ui64 = css->sysread;
3232 3221 csskd->syswrite.value.ui64 = css->syswrite;
3233 3222 csskd->sysfork.value.ui64 = css->sysfork;
3234 3223 csskd->sysvfork.value.ui64 = css->sysvfork;
3235 3224 csskd->sysexec.value.ui64 = css->sysexec;
3236 3225 csskd->readch.value.ui64 = css->readch;
3237 3226 csskd->writech.value.ui64 = css->writech;
3238 3227 csskd->rcvint.value.ui64 = css->rcvint;
3239 3228 csskd->xmtint.value.ui64 = css->xmtint;
3240 3229 csskd->mdmint.value.ui64 = css->mdmint;
3241 3230 csskd->rawch.value.ui64 = css->rawch;
3242 3231 csskd->canch.value.ui64 = css->canch;
3243 3232 csskd->outch.value.ui64 = css->outch;
3244 3233 csskd->msg.value.ui64 = css->msg;
3245 3234 csskd->sema.value.ui64 = css->sema;
3246 3235 csskd->namei.value.ui64 = css->namei;
3247 3236 csskd->ufsiget.value.ui64 = css->ufsiget;
3248 3237 csskd->ufsdirblk.value.ui64 = css->ufsdirblk;
3249 3238 csskd->ufsipage.value.ui64 = css->ufsipage;
3250 3239 csskd->ufsinopage.value.ui64 = css->ufsinopage;
3251 3240 csskd->procovf.value.ui64 = css->procovf;
3252 3241 csskd->intrthread.value.ui64 = 0;
3253 3242 for (i = 0; i < LOCK_LEVEL - 1; i++)
3254 3243 csskd->intrthread.value.ui64 += css->intr[i];
3255 3244 csskd->intrblk.value.ui64 = css->intrblk;
3256 3245 csskd->intrunpin.value.ui64 = css->intrunpin;
3257 3246 csskd->idlethread.value.ui64 = css->idlethread;
3258 3247 csskd->inv_swtch.value.ui64 = css->inv_swtch;
3259 3248 csskd->nthreads.value.ui64 = css->nthreads;
3260 3249 csskd->cpumigrate.value.ui64 = css->cpumigrate;
3261 3250 csskd->xcalls.value.ui64 = css->xcalls;
3262 3251 csskd->mutex_adenters.value.ui64 = css->mutex_adenters;
3263 3252 csskd->rw_rdfails.value.ui64 = css->rw_rdfails;
3264 3253 csskd->rw_wrfails.value.ui64 = css->rw_wrfails;
3265 3254 csskd->modload.value.ui64 = css->modload;
3266 3255 csskd->modunload.value.ui64 = css->modunload;
3267 3256 csskd->bawrite.value.ui64 = css->bawrite;
3268 3257 csskd->iowait.value.ui64 = css->iowait;
3269 3258
3270 3259 return (0);
3271 3260 }
3272 3261
3273 3262 static int
3274 3263 cpu_vm_stats_ks_update(kstat_t *ksp, int rw)
3275 3264 {
3276 3265 cpu_t *cp = (cpu_t *)ksp->ks_private;
3277 3266 struct cpu_vm_stats_ks_data *cvskd;
3278 3267 cpu_vm_stats_t *cvs;
3279 3268
3280 3269 if (rw == KSTAT_WRITE)
3281 3270 return (EACCES);
3282 3271
3283 3272 cvs = &cp->cpu_stats.vm;
↓ open down ↓ |
582 lines elided |
↑ open up ↑ |
3284 3273 cvskd = ksp->ks_data;
3285 3274
3286 3275 bcopy(&cpu_vm_stats_ks_data_template, ksp->ks_data,
3287 3276 sizeof (cpu_vm_stats_ks_data_template));
3288 3277 cvskd->pgrec.value.ui64 = cvs->pgrec;
3289 3278 cvskd->pgfrec.value.ui64 = cvs->pgfrec;
3290 3279 cvskd->pgin.value.ui64 = cvs->pgin;
3291 3280 cvskd->pgpgin.value.ui64 = cvs->pgpgin;
3292 3281 cvskd->pgout.value.ui64 = cvs->pgout;
3293 3282 cvskd->pgpgout.value.ui64 = cvs->pgpgout;
3294 - cvskd->swapin.value.ui64 = cvs->swapin;
3295 - cvskd->pgswapin.value.ui64 = cvs->pgswapin;
3296 - cvskd->swapout.value.ui64 = cvs->swapout;
3297 - cvskd->pgswapout.value.ui64 = cvs->pgswapout;
3298 3283 cvskd->zfod.value.ui64 = cvs->zfod;
3299 3284 cvskd->dfree.value.ui64 = cvs->dfree;
3300 3285 cvskd->scan.value.ui64 = cvs->scan;
3301 3286 cvskd->rev.value.ui64 = cvs->rev;
3302 3287 cvskd->hat_fault.value.ui64 = cvs->hat_fault;
3303 3288 cvskd->as_fault.value.ui64 = cvs->as_fault;
3304 3289 cvskd->maj_fault.value.ui64 = cvs->maj_fault;
3305 3290 cvskd->cow_fault.value.ui64 = cvs->cow_fault;
3306 3291 cvskd->prot_fault.value.ui64 = cvs->prot_fault;
3307 3292 cvskd->softlock.value.ui64 = cvs->softlock;
3308 3293 cvskd->kernel_asflt.value.ui64 = cvs->kernel_asflt;
3309 3294 cvskd->pgrrun.value.ui64 = cvs->pgrrun;
3310 3295 cvskd->execpgin.value.ui64 = cvs->execpgin;
3311 3296 cvskd->execpgout.value.ui64 = cvs->execpgout;
3312 3297 cvskd->execfree.value.ui64 = cvs->execfree;
3313 3298 cvskd->anonpgin.value.ui64 = cvs->anonpgin;
3314 3299 cvskd->anonpgout.value.ui64 = cvs->anonpgout;
3315 3300 cvskd->anonfree.value.ui64 = cvs->anonfree;
3316 3301 cvskd->fspgin.value.ui64 = cvs->fspgin;
3317 3302 cvskd->fspgout.value.ui64 = cvs->fspgout;
3318 3303 cvskd->fsfree.value.ui64 = cvs->fsfree;
3319 3304
3320 3305 return (0);
3321 3306 }
3322 3307
3323 3308 static int
3324 3309 cpu_stat_ks_update(kstat_t *ksp, int rw)
3325 3310 {
3326 3311 cpu_stat_t *cso;
3327 3312 cpu_t *cp;
3328 3313 int i;
3329 3314 hrtime_t msnsecs[NCMSTATES];
3330 3315
3331 3316 cso = (cpu_stat_t *)ksp->ks_data;
3332 3317 cp = (cpu_t *)ksp->ks_private;
3333 3318
3334 3319 if (rw == KSTAT_WRITE)
3335 3320 return (EACCES);
3336 3321
3337 3322 /*
3338 3323 * Read CPU mstate, but compare with the last values we
3339 3324 * received to make sure that the returned kstats never
3340 3325 * decrease.
3341 3326 */
3342 3327
3343 3328 get_cpu_mstate(cp, msnsecs);
3344 3329 msnsecs[CMS_IDLE] = NSEC_TO_TICK(msnsecs[CMS_IDLE]);
3345 3330 msnsecs[CMS_USER] = NSEC_TO_TICK(msnsecs[CMS_USER]);
3346 3331 msnsecs[CMS_SYSTEM] = NSEC_TO_TICK(msnsecs[CMS_SYSTEM]);
3347 3332 if (cso->cpu_sysinfo.cpu[CPU_IDLE] < msnsecs[CMS_IDLE])
3348 3333 cso->cpu_sysinfo.cpu[CPU_IDLE] = msnsecs[CMS_IDLE];
3349 3334 if (cso->cpu_sysinfo.cpu[CPU_USER] < msnsecs[CMS_USER])
3350 3335 cso->cpu_sysinfo.cpu[CPU_USER] = msnsecs[CMS_USER];
3351 3336 if (cso->cpu_sysinfo.cpu[CPU_KERNEL] < msnsecs[CMS_SYSTEM])
3352 3337 cso->cpu_sysinfo.cpu[CPU_KERNEL] = msnsecs[CMS_SYSTEM];
3353 3338 cso->cpu_sysinfo.cpu[CPU_WAIT] = 0;
3354 3339 cso->cpu_sysinfo.wait[W_IO] = 0;
3355 3340 cso->cpu_sysinfo.wait[W_SWAP] = 0;
3356 3341 cso->cpu_sysinfo.wait[W_PIO] = 0;
3357 3342 cso->cpu_sysinfo.bread = CPU_STATS(cp, sys.bread);
3358 3343 cso->cpu_sysinfo.bwrite = CPU_STATS(cp, sys.bwrite);
3359 3344 cso->cpu_sysinfo.lread = CPU_STATS(cp, sys.lread);
3360 3345 cso->cpu_sysinfo.lwrite = CPU_STATS(cp, sys.lwrite);
3361 3346 cso->cpu_sysinfo.phread = CPU_STATS(cp, sys.phread);
3362 3347 cso->cpu_sysinfo.phwrite = CPU_STATS(cp, sys.phwrite);
3363 3348 cso->cpu_sysinfo.pswitch = CPU_STATS(cp, sys.pswitch);
3364 3349 cso->cpu_sysinfo.trap = CPU_STATS(cp, sys.trap);
3365 3350 cso->cpu_sysinfo.intr = 0;
3366 3351 for (i = 0; i < PIL_MAX; i++)
3367 3352 cso->cpu_sysinfo.intr += CPU_STATS(cp, sys.intr[i]);
3368 3353 cso->cpu_sysinfo.syscall = CPU_STATS(cp, sys.syscall);
3369 3354 cso->cpu_sysinfo.sysread = CPU_STATS(cp, sys.sysread);
3370 3355 cso->cpu_sysinfo.syswrite = CPU_STATS(cp, sys.syswrite);
3371 3356 cso->cpu_sysinfo.sysfork = CPU_STATS(cp, sys.sysfork);
3372 3357 cso->cpu_sysinfo.sysvfork = CPU_STATS(cp, sys.sysvfork);
3373 3358 cso->cpu_sysinfo.sysexec = CPU_STATS(cp, sys.sysexec);
3374 3359 cso->cpu_sysinfo.readch = CPU_STATS(cp, sys.readch);
3375 3360 cso->cpu_sysinfo.writech = CPU_STATS(cp, sys.writech);
3376 3361 cso->cpu_sysinfo.rcvint = CPU_STATS(cp, sys.rcvint);
3377 3362 cso->cpu_sysinfo.xmtint = CPU_STATS(cp, sys.xmtint);
3378 3363 cso->cpu_sysinfo.mdmint = CPU_STATS(cp, sys.mdmint);
3379 3364 cso->cpu_sysinfo.rawch = CPU_STATS(cp, sys.rawch);
3380 3365 cso->cpu_sysinfo.canch = CPU_STATS(cp, sys.canch);
3381 3366 cso->cpu_sysinfo.outch = CPU_STATS(cp, sys.outch);
3382 3367 cso->cpu_sysinfo.msg = CPU_STATS(cp, sys.msg);
3383 3368 cso->cpu_sysinfo.sema = CPU_STATS(cp, sys.sema);
3384 3369 cso->cpu_sysinfo.namei = CPU_STATS(cp, sys.namei);
3385 3370 cso->cpu_sysinfo.ufsiget = CPU_STATS(cp, sys.ufsiget);
3386 3371 cso->cpu_sysinfo.ufsdirblk = CPU_STATS(cp, sys.ufsdirblk);
3387 3372 cso->cpu_sysinfo.ufsipage = CPU_STATS(cp, sys.ufsipage);
3388 3373 cso->cpu_sysinfo.ufsinopage = CPU_STATS(cp, sys.ufsinopage);
3389 3374 cso->cpu_sysinfo.inodeovf = 0;
3390 3375 cso->cpu_sysinfo.fileovf = 0;
3391 3376 cso->cpu_sysinfo.procovf = CPU_STATS(cp, sys.procovf);
3392 3377 cso->cpu_sysinfo.intrthread = 0;
3393 3378 for (i = 0; i < LOCK_LEVEL - 1; i++)
3394 3379 cso->cpu_sysinfo.intrthread += CPU_STATS(cp, sys.intr[i]);
3395 3380 cso->cpu_sysinfo.intrblk = CPU_STATS(cp, sys.intrblk);
3396 3381 cso->cpu_sysinfo.idlethread = CPU_STATS(cp, sys.idlethread);
3397 3382 cso->cpu_sysinfo.inv_swtch = CPU_STATS(cp, sys.inv_swtch);
3398 3383 cso->cpu_sysinfo.nthreads = CPU_STATS(cp, sys.nthreads);
3399 3384 cso->cpu_sysinfo.cpumigrate = CPU_STATS(cp, sys.cpumigrate);
3400 3385 cso->cpu_sysinfo.xcalls = CPU_STATS(cp, sys.xcalls);
3401 3386 cso->cpu_sysinfo.mutex_adenters = CPU_STATS(cp, sys.mutex_adenters);
3402 3387 cso->cpu_sysinfo.rw_rdfails = CPU_STATS(cp, sys.rw_rdfails);
3403 3388 cso->cpu_sysinfo.rw_wrfails = CPU_STATS(cp, sys.rw_wrfails);
3404 3389 cso->cpu_sysinfo.modload = CPU_STATS(cp, sys.modload);
3405 3390 cso->cpu_sysinfo.modunload = CPU_STATS(cp, sys.modunload);
3406 3391 cso->cpu_sysinfo.bawrite = CPU_STATS(cp, sys.bawrite);
3407 3392 cso->cpu_sysinfo.rw_enters = 0;
3408 3393 cso->cpu_sysinfo.win_uo_cnt = 0;
3409 3394 cso->cpu_sysinfo.win_uu_cnt = 0;
3410 3395 cso->cpu_sysinfo.win_so_cnt = 0;
3411 3396 cso->cpu_sysinfo.win_su_cnt = 0;
3412 3397 cso->cpu_sysinfo.win_suo_cnt = 0;
3413 3398
↓ open down ↓ |
106 lines elided |
↑ open up ↑ |
3414 3399 cso->cpu_syswait.iowait = CPU_STATS(cp, sys.iowait);
3415 3400 cso->cpu_syswait.swap = 0;
3416 3401 cso->cpu_syswait.physio = 0;
3417 3402
3418 3403 cso->cpu_vminfo.pgrec = CPU_STATS(cp, vm.pgrec);
3419 3404 cso->cpu_vminfo.pgfrec = CPU_STATS(cp, vm.pgfrec);
3420 3405 cso->cpu_vminfo.pgin = CPU_STATS(cp, vm.pgin);
3421 3406 cso->cpu_vminfo.pgpgin = CPU_STATS(cp, vm.pgpgin);
3422 3407 cso->cpu_vminfo.pgout = CPU_STATS(cp, vm.pgout);
3423 3408 cso->cpu_vminfo.pgpgout = CPU_STATS(cp, vm.pgpgout);
3424 - cso->cpu_vminfo.swapin = CPU_STATS(cp, vm.swapin);
3425 - cso->cpu_vminfo.pgswapin = CPU_STATS(cp, vm.pgswapin);
3426 - cso->cpu_vminfo.swapout = CPU_STATS(cp, vm.swapout);
3427 - cso->cpu_vminfo.pgswapout = CPU_STATS(cp, vm.pgswapout);
3428 3409 cso->cpu_vminfo.zfod = CPU_STATS(cp, vm.zfod);
3429 3410 cso->cpu_vminfo.dfree = CPU_STATS(cp, vm.dfree);
3430 3411 cso->cpu_vminfo.scan = CPU_STATS(cp, vm.scan);
3431 3412 cso->cpu_vminfo.rev = CPU_STATS(cp, vm.rev);
3432 3413 cso->cpu_vminfo.hat_fault = CPU_STATS(cp, vm.hat_fault);
3433 3414 cso->cpu_vminfo.as_fault = CPU_STATS(cp, vm.as_fault);
3434 3415 cso->cpu_vminfo.maj_fault = CPU_STATS(cp, vm.maj_fault);
3435 3416 cso->cpu_vminfo.cow_fault = CPU_STATS(cp, vm.cow_fault);
3436 3417 cso->cpu_vminfo.prot_fault = CPU_STATS(cp, vm.prot_fault);
3437 3418 cso->cpu_vminfo.softlock = CPU_STATS(cp, vm.softlock);
3438 3419 cso->cpu_vminfo.kernel_asflt = CPU_STATS(cp, vm.kernel_asflt);
3439 3420 cso->cpu_vminfo.pgrrun = CPU_STATS(cp, vm.pgrrun);
3440 3421 cso->cpu_vminfo.execpgin = CPU_STATS(cp, vm.execpgin);
3441 3422 cso->cpu_vminfo.execpgout = CPU_STATS(cp, vm.execpgout);
3442 3423 cso->cpu_vminfo.execfree = CPU_STATS(cp, vm.execfree);
3443 3424 cso->cpu_vminfo.anonpgin = CPU_STATS(cp, vm.anonpgin);
3444 3425 cso->cpu_vminfo.anonpgout = CPU_STATS(cp, vm.anonpgout);
3445 3426 cso->cpu_vminfo.anonfree = CPU_STATS(cp, vm.anonfree);
3446 3427 cso->cpu_vminfo.fspgin = CPU_STATS(cp, vm.fspgin);
3447 3428 cso->cpu_vminfo.fspgout = CPU_STATS(cp, vm.fspgout);
3448 3429 cso->cpu_vminfo.fsfree = CPU_STATS(cp, vm.fsfree);
3449 3430
3450 3431 return (0);
3451 3432 }
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX