Print this page
patch remove-load-flag
patch remove-on-swapq-flag
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/cpu.c
+++ new/usr/src/uts/common/os/cpu.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1991, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * Architecture-independent CPU control functions.
28 28 */
29 29
30 30 #include <sys/types.h>
31 31 #include <sys/param.h>
32 32 #include <sys/var.h>
33 33 #include <sys/thread.h>
34 34 #include <sys/cpuvar.h>
35 35 #include <sys/cpu_event.h>
36 36 #include <sys/kstat.h>
37 37 #include <sys/uadmin.h>
38 38 #include <sys/systm.h>
39 39 #include <sys/errno.h>
40 40 #include <sys/cmn_err.h>
41 41 #include <sys/procset.h>
42 42 #include <sys/processor.h>
43 43 #include <sys/debug.h>
44 44 #include <sys/cpupart.h>
45 45 #include <sys/lgrp.h>
46 46 #include <sys/pset.h>
47 47 #include <sys/pghw.h>
48 48 #include <sys/kmem.h>
49 49 #include <sys/kmem_impl.h> /* to set per-cpu kmem_cache offset */
50 50 #include <sys/atomic.h>
51 51 #include <sys/callb.h>
52 52 #include <sys/vtrace.h>
53 53 #include <sys/cyclic.h>
54 54 #include <sys/bitmap.h>
55 55 #include <sys/nvpair.h>
56 56 #include <sys/pool_pset.h>
57 57 #include <sys/msacct.h>
58 58 #include <sys/time.h>
59 59 #include <sys/archsystm.h>
60 60 #include <sys/sdt.h>
61 61 #if defined(__x86) || defined(__amd64)
62 62 #include <sys/x86_archext.h>
63 63 #endif
64 64 #include <sys/callo.h>
65 65
66 66 extern int mp_cpu_start(cpu_t *);
67 67 extern int mp_cpu_stop(cpu_t *);
68 68 extern int mp_cpu_poweron(cpu_t *);
69 69 extern int mp_cpu_poweroff(cpu_t *);
70 70 extern int mp_cpu_configure(int);
71 71 extern int mp_cpu_unconfigure(int);
72 72 extern void mp_cpu_faulted_enter(cpu_t *);
73 73 extern void mp_cpu_faulted_exit(cpu_t *);
74 74
75 75 extern int cmp_cpu_to_chip(processorid_t cpuid);
76 76 #ifdef __sparcv9
77 77 extern char *cpu_fru_fmri(cpu_t *cp);
78 78 #endif
79 79
80 80 static void cpu_add_active_internal(cpu_t *cp);
81 81 static void cpu_remove_active(cpu_t *cp);
82 82 static void cpu_info_kstat_create(cpu_t *cp);
83 83 static void cpu_info_kstat_destroy(cpu_t *cp);
84 84 static void cpu_stats_kstat_create(cpu_t *cp);
85 85 static void cpu_stats_kstat_destroy(cpu_t *cp);
86 86
87 87 static int cpu_sys_stats_ks_update(kstat_t *ksp, int rw);
88 88 static int cpu_vm_stats_ks_update(kstat_t *ksp, int rw);
89 89 static int cpu_stat_ks_update(kstat_t *ksp, int rw);
90 90 static int cpu_state_change_hooks(int, cpu_setup_t, cpu_setup_t);
91 91
92 92 /*
93 93 * cpu_lock protects ncpus, ncpus_online, cpu_flag, cpu_list, cpu_active,
94 94 * max_cpu_seqid_ever, and dispatch queue reallocations. The lock ordering with
95 95 * respect to related locks is:
96 96 *
97 97 * cpu_lock --> thread_free_lock ---> p_lock ---> thread_lock()
98 98 *
99 99 * Warning: Certain sections of code do not use the cpu_lock when
100 100 * traversing the cpu_list (e.g. mutex_vector_enter(), clock()). Since
101 101 * all cpus are paused during modifications to this list, a solution
102 102 * to protect the list is too either disable kernel preemption while
103 103 * walking the list, *or* recheck the cpu_next pointer at each
104 104 * iteration in the loop. Note that in no cases can any cached
105 105 * copies of the cpu pointers be kept as they may become invalid.
106 106 */
107 107 kmutex_t cpu_lock;
108 108 cpu_t *cpu_list; /* list of all CPUs */
109 109 cpu_t *clock_cpu_list; /* used by clock to walk CPUs */
110 110 cpu_t *cpu_active; /* list of active CPUs */
111 111 static cpuset_t cpu_available; /* set of available CPUs */
112 112 cpuset_t cpu_seqid_inuse; /* which cpu_seqids are in use */
113 113
114 114 cpu_t **cpu_seq; /* ptrs to CPUs, indexed by seq_id */
115 115
116 116 /*
117 117 * max_ncpus keeps the max cpus the system can have. Initially
118 118 * it's NCPU, but since most archs scan the devtree for cpus
119 119 * fairly early on during boot, the real max can be known before
120 120 * ncpus is set (useful for early NCPU based allocations).
121 121 */
122 122 int max_ncpus = NCPU;
123 123 /*
124 124 * platforms that set max_ncpus to maxiumum number of cpus that can be
125 125 * dynamically added will set boot_max_ncpus to the number of cpus found
126 126 * at device tree scan time during boot.
127 127 */
128 128 int boot_max_ncpus = -1;
129 129 int boot_ncpus = -1;
130 130 /*
131 131 * Maximum possible CPU id. This can never be >= NCPU since NCPU is
132 132 * used to size arrays that are indexed by CPU id.
133 133 */
134 134 processorid_t max_cpuid = NCPU - 1;
135 135
136 136 /*
137 137 * Maximum cpu_seqid was given. This number can only grow and never shrink. It
138 138 * can be used to optimize NCPU loops to avoid going through CPUs which were
139 139 * never on-line.
140 140 */
141 141 processorid_t max_cpu_seqid_ever = 0;
142 142
143 143 int ncpus = 1;
144 144 int ncpus_online = 1;
145 145
146 146 /*
147 147 * CPU that we're trying to offline. Protected by cpu_lock.
148 148 */
149 149 cpu_t *cpu_inmotion;
150 150
151 151 /*
152 152 * Can be raised to suppress further weakbinding, which are instead
153 153 * satisfied by disabling preemption. Must be raised/lowered under cpu_lock,
154 154 * while individual thread weakbinding synchronization is done under thread
155 155 * lock.
156 156 */
157 157 int weakbindingbarrier;
158 158
159 159 /*
160 160 * Variables used in pause_cpus().
161 161 */
162 162 static volatile char safe_list[NCPU];
163 163
164 164 static struct _cpu_pause_info {
165 165 int cp_spl; /* spl saved in pause_cpus() */
166 166 volatile int cp_go; /* Go signal sent after all ready */
167 167 int cp_count; /* # of CPUs to pause */
168 168 ksema_t cp_sem; /* synch pause_cpus & cpu_pause */
169 169 kthread_id_t cp_paused;
170 170 } cpu_pause_info;
171 171
172 172 static kmutex_t pause_free_mutex;
173 173 static kcondvar_t pause_free_cv;
174 174
175 175 void *(*cpu_pause_func)(void *) = NULL;
176 176
177 177
178 178 static struct cpu_sys_stats_ks_data {
179 179 kstat_named_t cpu_ticks_idle;
180 180 kstat_named_t cpu_ticks_user;
181 181 kstat_named_t cpu_ticks_kernel;
182 182 kstat_named_t cpu_ticks_wait;
183 183 kstat_named_t cpu_nsec_idle;
184 184 kstat_named_t cpu_nsec_user;
185 185 kstat_named_t cpu_nsec_kernel;
186 186 kstat_named_t cpu_nsec_dtrace;
187 187 kstat_named_t cpu_nsec_intr;
188 188 kstat_named_t cpu_load_intr;
189 189 kstat_named_t wait_ticks_io;
190 190 kstat_named_t dtrace_probes;
191 191 kstat_named_t bread;
192 192 kstat_named_t bwrite;
193 193 kstat_named_t lread;
194 194 kstat_named_t lwrite;
195 195 kstat_named_t phread;
196 196 kstat_named_t phwrite;
197 197 kstat_named_t pswitch;
198 198 kstat_named_t trap;
199 199 kstat_named_t intr;
200 200 kstat_named_t syscall;
201 201 kstat_named_t sysread;
202 202 kstat_named_t syswrite;
203 203 kstat_named_t sysfork;
204 204 kstat_named_t sysvfork;
205 205 kstat_named_t sysexec;
206 206 kstat_named_t readch;
207 207 kstat_named_t writech;
208 208 kstat_named_t rcvint;
209 209 kstat_named_t xmtint;
210 210 kstat_named_t mdmint;
211 211 kstat_named_t rawch;
212 212 kstat_named_t canch;
213 213 kstat_named_t outch;
214 214 kstat_named_t msg;
215 215 kstat_named_t sema;
216 216 kstat_named_t namei;
217 217 kstat_named_t ufsiget;
218 218 kstat_named_t ufsdirblk;
219 219 kstat_named_t ufsipage;
220 220 kstat_named_t ufsinopage;
221 221 kstat_named_t procovf;
222 222 kstat_named_t intrthread;
223 223 kstat_named_t intrblk;
224 224 kstat_named_t intrunpin;
225 225 kstat_named_t idlethread;
226 226 kstat_named_t inv_swtch;
227 227 kstat_named_t nthreads;
228 228 kstat_named_t cpumigrate;
229 229 kstat_named_t xcalls;
230 230 kstat_named_t mutex_adenters;
231 231 kstat_named_t rw_rdfails;
232 232 kstat_named_t rw_wrfails;
233 233 kstat_named_t modload;
234 234 kstat_named_t modunload;
235 235 kstat_named_t bawrite;
236 236 kstat_named_t iowait;
237 237 } cpu_sys_stats_ks_data_template = {
238 238 { "cpu_ticks_idle", KSTAT_DATA_UINT64 },
239 239 { "cpu_ticks_user", KSTAT_DATA_UINT64 },
240 240 { "cpu_ticks_kernel", KSTAT_DATA_UINT64 },
241 241 { "cpu_ticks_wait", KSTAT_DATA_UINT64 },
242 242 { "cpu_nsec_idle", KSTAT_DATA_UINT64 },
243 243 { "cpu_nsec_user", KSTAT_DATA_UINT64 },
244 244 { "cpu_nsec_kernel", KSTAT_DATA_UINT64 },
245 245 { "cpu_nsec_dtrace", KSTAT_DATA_UINT64 },
246 246 { "cpu_nsec_intr", KSTAT_DATA_UINT64 },
247 247 { "cpu_load_intr", KSTAT_DATA_UINT64 },
248 248 { "wait_ticks_io", KSTAT_DATA_UINT64 },
249 249 { "dtrace_probes", KSTAT_DATA_UINT64 },
250 250 { "bread", KSTAT_DATA_UINT64 },
251 251 { "bwrite", KSTAT_DATA_UINT64 },
252 252 { "lread", KSTAT_DATA_UINT64 },
253 253 { "lwrite", KSTAT_DATA_UINT64 },
254 254 { "phread", KSTAT_DATA_UINT64 },
255 255 { "phwrite", KSTAT_DATA_UINT64 },
256 256 { "pswitch", KSTAT_DATA_UINT64 },
257 257 { "trap", KSTAT_DATA_UINT64 },
258 258 { "intr", KSTAT_DATA_UINT64 },
259 259 { "syscall", KSTAT_DATA_UINT64 },
260 260 { "sysread", KSTAT_DATA_UINT64 },
261 261 { "syswrite", KSTAT_DATA_UINT64 },
262 262 { "sysfork", KSTAT_DATA_UINT64 },
263 263 { "sysvfork", KSTAT_DATA_UINT64 },
264 264 { "sysexec", KSTAT_DATA_UINT64 },
265 265 { "readch", KSTAT_DATA_UINT64 },
266 266 { "writech", KSTAT_DATA_UINT64 },
267 267 { "rcvint", KSTAT_DATA_UINT64 },
268 268 { "xmtint", KSTAT_DATA_UINT64 },
269 269 { "mdmint", KSTAT_DATA_UINT64 },
270 270 { "rawch", KSTAT_DATA_UINT64 },
271 271 { "canch", KSTAT_DATA_UINT64 },
272 272 { "outch", KSTAT_DATA_UINT64 },
273 273 { "msg", KSTAT_DATA_UINT64 },
274 274 { "sema", KSTAT_DATA_UINT64 },
275 275 { "namei", KSTAT_DATA_UINT64 },
276 276 { "ufsiget", KSTAT_DATA_UINT64 },
277 277 { "ufsdirblk", KSTAT_DATA_UINT64 },
278 278 { "ufsipage", KSTAT_DATA_UINT64 },
279 279 { "ufsinopage", KSTAT_DATA_UINT64 },
280 280 { "procovf", KSTAT_DATA_UINT64 },
281 281 { "intrthread", KSTAT_DATA_UINT64 },
282 282 { "intrblk", KSTAT_DATA_UINT64 },
283 283 { "intrunpin", KSTAT_DATA_UINT64 },
284 284 { "idlethread", KSTAT_DATA_UINT64 },
285 285 { "inv_swtch", KSTAT_DATA_UINT64 },
286 286 { "nthreads", KSTAT_DATA_UINT64 },
287 287 { "cpumigrate", KSTAT_DATA_UINT64 },
288 288 { "xcalls", KSTAT_DATA_UINT64 },
289 289 { "mutex_adenters", KSTAT_DATA_UINT64 },
290 290 { "rw_rdfails", KSTAT_DATA_UINT64 },
291 291 { "rw_wrfails", KSTAT_DATA_UINT64 },
292 292 { "modload", KSTAT_DATA_UINT64 },
293 293 { "modunload", KSTAT_DATA_UINT64 },
294 294 { "bawrite", KSTAT_DATA_UINT64 },
295 295 { "iowait", KSTAT_DATA_UINT64 },
296 296 };
297 297
298 298 static struct cpu_vm_stats_ks_data {
299 299 kstat_named_t pgrec;
300 300 kstat_named_t pgfrec;
301 301 kstat_named_t pgin;
302 302 kstat_named_t pgpgin;
303 303 kstat_named_t pgout;
304 304 kstat_named_t pgpgout;
305 305 kstat_named_t swapin;
306 306 kstat_named_t pgswapin;
307 307 kstat_named_t swapout;
308 308 kstat_named_t pgswapout;
309 309 kstat_named_t zfod;
310 310 kstat_named_t dfree;
311 311 kstat_named_t scan;
312 312 kstat_named_t rev;
313 313 kstat_named_t hat_fault;
314 314 kstat_named_t as_fault;
315 315 kstat_named_t maj_fault;
316 316 kstat_named_t cow_fault;
317 317 kstat_named_t prot_fault;
318 318 kstat_named_t softlock;
319 319 kstat_named_t kernel_asflt;
320 320 kstat_named_t pgrrun;
321 321 kstat_named_t execpgin;
322 322 kstat_named_t execpgout;
323 323 kstat_named_t execfree;
324 324 kstat_named_t anonpgin;
325 325 kstat_named_t anonpgout;
326 326 kstat_named_t anonfree;
327 327 kstat_named_t fspgin;
328 328 kstat_named_t fspgout;
329 329 kstat_named_t fsfree;
330 330 } cpu_vm_stats_ks_data_template = {
331 331 { "pgrec", KSTAT_DATA_UINT64 },
332 332 { "pgfrec", KSTAT_DATA_UINT64 },
333 333 { "pgin", KSTAT_DATA_UINT64 },
334 334 { "pgpgin", KSTAT_DATA_UINT64 },
335 335 { "pgout", KSTAT_DATA_UINT64 },
336 336 { "pgpgout", KSTAT_DATA_UINT64 },
337 337 { "swapin", KSTAT_DATA_UINT64 },
338 338 { "pgswapin", KSTAT_DATA_UINT64 },
339 339 { "swapout", KSTAT_DATA_UINT64 },
340 340 { "pgswapout", KSTAT_DATA_UINT64 },
341 341 { "zfod", KSTAT_DATA_UINT64 },
342 342 { "dfree", KSTAT_DATA_UINT64 },
343 343 { "scan", KSTAT_DATA_UINT64 },
344 344 { "rev", KSTAT_DATA_UINT64 },
345 345 { "hat_fault", KSTAT_DATA_UINT64 },
346 346 { "as_fault", KSTAT_DATA_UINT64 },
347 347 { "maj_fault", KSTAT_DATA_UINT64 },
348 348 { "cow_fault", KSTAT_DATA_UINT64 },
349 349 { "prot_fault", KSTAT_DATA_UINT64 },
350 350 { "softlock", KSTAT_DATA_UINT64 },
351 351 { "kernel_asflt", KSTAT_DATA_UINT64 },
352 352 { "pgrrun", KSTAT_DATA_UINT64 },
353 353 { "execpgin", KSTAT_DATA_UINT64 },
354 354 { "execpgout", KSTAT_DATA_UINT64 },
355 355 { "execfree", KSTAT_DATA_UINT64 },
356 356 { "anonpgin", KSTAT_DATA_UINT64 },
357 357 { "anonpgout", KSTAT_DATA_UINT64 },
358 358 { "anonfree", KSTAT_DATA_UINT64 },
359 359 { "fspgin", KSTAT_DATA_UINT64 },
360 360 { "fspgout", KSTAT_DATA_UINT64 },
361 361 { "fsfree", KSTAT_DATA_UINT64 },
362 362 };
363 363
364 364 /*
365 365 * Force the specified thread to migrate to the appropriate processor.
366 366 * Called with thread lock held, returns with it dropped.
367 367 */
368 368 static void
369 369 force_thread_migrate(kthread_id_t tp)
370 370 {
371 371 ASSERT(THREAD_LOCK_HELD(tp));
372 372 if (tp == curthread) {
373 373 THREAD_TRANSITION(tp);
374 374 CL_SETRUN(tp);
375 375 thread_unlock_nopreempt(tp);
376 376 swtch();
377 377 } else {
378 378 if (tp->t_state == TS_ONPROC) {
379 379 cpu_surrender(tp);
380 380 } else if (tp->t_state == TS_RUN) {
381 381 (void) dispdeq(tp);
382 382 setbackdq(tp);
383 383 }
384 384 thread_unlock(tp);
385 385 }
386 386 }
387 387
388 388 /*
389 389 * Set affinity for a specified CPU.
390 390 * A reference count is incremented and the affinity is held until the
391 391 * reference count is decremented to zero by thread_affinity_clear().
392 392 * This is so regions of code requiring affinity can be nested.
393 393 * Caller needs to ensure that cpu_id remains valid, which can be
394 394 * done by holding cpu_lock across this call, unless the caller
395 395 * specifies CPU_CURRENT in which case the cpu_lock will be acquired
396 396 * by thread_affinity_set and CPU->cpu_id will be the target CPU.
397 397 */
398 398 void
399 399 thread_affinity_set(kthread_id_t t, int cpu_id)
400 400 {
401 401 cpu_t *cp;
402 402 int c;
403 403
404 404 ASSERT(!(t == curthread && t->t_weakbound_cpu != NULL));
405 405
406 406 if ((c = cpu_id) == CPU_CURRENT) {
407 407 mutex_enter(&cpu_lock);
408 408 cpu_id = CPU->cpu_id;
409 409 }
410 410 /*
411 411 * We should be asserting that cpu_lock is held here, but
412 412 * the NCA code doesn't acquire it. The following assert
413 413 * should be uncommented when the NCA code is fixed.
414 414 *
415 415 * ASSERT(MUTEX_HELD(&cpu_lock));
416 416 */
417 417 ASSERT((cpu_id >= 0) && (cpu_id < NCPU));
418 418 cp = cpu[cpu_id];
419 419 ASSERT(cp != NULL); /* user must provide a good cpu_id */
420 420 /*
421 421 * If there is already a hard affinity requested, and this affinity
422 422 * conflicts with that, panic.
423 423 */
424 424 thread_lock(t);
425 425 if (t->t_affinitycnt > 0 && t->t_bound_cpu != cp) {
426 426 panic("affinity_set: setting %p but already bound to %p",
427 427 (void *)cp, (void *)t->t_bound_cpu);
428 428 }
429 429 t->t_affinitycnt++;
430 430 t->t_bound_cpu = cp;
431 431
432 432 /*
433 433 * Make sure we're running on the right CPU.
434 434 */
435 435 if (cp != t->t_cpu || t != curthread) {
436 436 force_thread_migrate(t); /* drops thread lock */
437 437 } else {
438 438 thread_unlock(t);
439 439 }
440 440
441 441 if (c == CPU_CURRENT)
442 442 mutex_exit(&cpu_lock);
443 443 }
444 444
445 445 /*
446 446 * Wrapper for backward compatibility.
447 447 */
448 448 void
449 449 affinity_set(int cpu_id)
450 450 {
451 451 thread_affinity_set(curthread, cpu_id);
452 452 }
453 453
454 454 /*
455 455 * Decrement the affinity reservation count and if it becomes zero,
456 456 * clear the CPU affinity for the current thread, or set it to the user's
457 457 * software binding request.
458 458 */
459 459 void
460 460 thread_affinity_clear(kthread_id_t t)
461 461 {
462 462 register processorid_t binding;
463 463
464 464 thread_lock(t);
465 465 if (--t->t_affinitycnt == 0) {
466 466 if ((binding = t->t_bind_cpu) == PBIND_NONE) {
467 467 /*
468 468 * Adjust disp_max_unbound_pri if necessary.
469 469 */
470 470 disp_adjust_unbound_pri(t);
471 471 t->t_bound_cpu = NULL;
472 472 if (t->t_cpu->cpu_part != t->t_cpupart) {
473 473 force_thread_migrate(t);
474 474 return;
475 475 }
476 476 } else {
477 477 t->t_bound_cpu = cpu[binding];
478 478 /*
479 479 * Make sure the thread is running on the bound CPU.
480 480 */
481 481 if (t->t_cpu != t->t_bound_cpu) {
482 482 force_thread_migrate(t);
483 483 return; /* already dropped lock */
484 484 }
485 485 }
486 486 }
487 487 thread_unlock(t);
488 488 }
489 489
490 490 /*
491 491 * Wrapper for backward compatibility.
492 492 */
493 493 void
494 494 affinity_clear(void)
495 495 {
496 496 thread_affinity_clear(curthread);
497 497 }
498 498
499 499 /*
500 500 * Weak cpu affinity. Bind to the "current" cpu for short periods
501 501 * of time during which the thread must not block (but may be preempted).
502 502 * Use this instead of kpreempt_disable() when it is only "no migration"
503 503 * rather than "no preemption" semantics that are required - disabling
504 504 * preemption holds higher priority threads off of cpu and if the
505 505 * operation that is protected is more than momentary this is not good
506 506 * for realtime etc.
507 507 *
508 508 * Weakly bound threads will not prevent a cpu from being offlined -
509 509 * we'll only run them on the cpu to which they are weakly bound but
510 510 * (because they do not block) we'll always be able to move them on to
511 511 * another cpu at offline time if we give them just a short moment to
512 512 * run during which they will unbind. To give a cpu a chance of offlining,
513 513 * however, we require a barrier to weak bindings that may be raised for a
514 514 * given cpu (offline/move code may set this and then wait a short time for
515 515 * existing weak bindings to drop); the cpu_inmotion pointer is that barrier.
516 516 *
517 517 * There are few restrictions on the calling context of thread_nomigrate.
518 518 * The caller must not hold the thread lock. Calls may be nested.
519 519 *
520 520 * After weakbinding a thread must not perform actions that may block.
521 521 * In particular it must not call thread_affinity_set; calling that when
522 522 * already weakbound is nonsensical anyway.
523 523 *
524 524 * If curthread is prevented from migrating for other reasons
525 525 * (kernel preemption disabled; high pil; strongly bound; interrupt thread)
526 526 * then the weak binding will succeed even if this cpu is the target of an
527 527 * offline/move request.
528 528 */
529 529 void
530 530 thread_nomigrate(void)
531 531 {
532 532 cpu_t *cp;
533 533 kthread_id_t t = curthread;
534 534
535 535 again:
536 536 kpreempt_disable();
537 537 cp = CPU;
538 538
539 539 /*
540 540 * A highlevel interrupt must not modify t_nomigrate or
541 541 * t_weakbound_cpu of the thread it has interrupted. A lowlevel
542 542 * interrupt thread cannot migrate and we can avoid the
543 543 * thread_lock call below by short-circuiting here. In either
544 544 * case we can just return since no migration is possible and
545 545 * the condition will persist (ie, when we test for these again
546 546 * in thread_allowmigrate they can't have changed). Migration
547 547 * is also impossible if we're at or above DISP_LEVEL pil.
548 548 */
549 549 if (CPU_ON_INTR(cp) || t->t_flag & T_INTR_THREAD ||
550 550 getpil() >= DISP_LEVEL) {
551 551 kpreempt_enable();
552 552 return;
553 553 }
554 554
555 555 /*
556 556 * We must be consistent with existing weak bindings. Since we
557 557 * may be interrupted between the increment of t_nomigrate and
558 558 * the store to t_weakbound_cpu below we cannot assume that
559 559 * t_weakbound_cpu will be set if t_nomigrate is. Note that we
560 560 * cannot assert t_weakbound_cpu == t_bind_cpu since that is not
561 561 * always the case.
562 562 */
563 563 if (t->t_nomigrate && t->t_weakbound_cpu && t->t_weakbound_cpu != cp) {
564 564 if (!panicstr)
565 565 panic("thread_nomigrate: binding to %p but already "
566 566 "bound to %p", (void *)cp,
567 567 (void *)t->t_weakbound_cpu);
568 568 }
569 569
570 570 /*
571 571 * At this point we have preemption disabled and we don't yet hold
572 572 * the thread lock. So it's possible that somebody else could
573 573 * set t_bind_cpu here and not be able to force us across to the
574 574 * new cpu (since we have preemption disabled).
575 575 */
576 576 thread_lock(curthread);
577 577
578 578 /*
579 579 * If further weak bindings are being (temporarily) suppressed then
580 580 * we'll settle for disabling kernel preemption (which assures
581 581 * no migration provided the thread does not block which it is
582 582 * not allowed to if using thread_nomigrate). We must remember
583 583 * this disposition so we can take appropriate action in
584 584 * thread_allowmigrate. If this is a nested call and the
585 585 * thread is already weakbound then fall through as normal.
586 586 * We remember the decision to settle for kpreempt_disable through
587 587 * negative nesting counting in t_nomigrate. Once a thread has had one
588 588 * weakbinding request satisfied in this way any further (nested)
589 589 * requests will continue to be satisfied in the same way,
590 590 * even if weak bindings have recommenced.
591 591 */
592 592 if (t->t_nomigrate < 0 || weakbindingbarrier && t->t_nomigrate == 0) {
593 593 --t->t_nomigrate;
594 594 thread_unlock(curthread);
595 595 return; /* with kpreempt_disable still active */
596 596 }
597 597
598 598 /*
599 599 * We hold thread_lock so t_bind_cpu cannot change. We could,
600 600 * however, be running on a different cpu to which we are t_bound_cpu
601 601 * to (as explained above). If we grant the weak binding request
602 602 * in that case then the dispatcher must favour our weak binding
603 603 * over our strong (in which case, just as when preemption is
604 604 * disabled, we can continue to run on a cpu other than the one to
605 605 * which we are strongbound; the difference in this case is that
606 606 * this thread can be preempted and so can appear on the dispatch
607 607 * queues of a cpu other than the one it is strongbound to).
608 608 *
609 609 * If the cpu we are running on does not appear to be a current
610 610 * offline target (we check cpu_inmotion to determine this - since
611 611 * we don't hold cpu_lock we may not see a recent store to that,
612 612 * so it's possible that we at times can grant a weak binding to a
613 613 * cpu that is an offline target, but that one request will not
614 614 * prevent the offline from succeeding) then we will always grant
615 615 * the weak binding request. This includes the case above where
616 616 * we grant a weakbinding not commensurate with our strong binding.
617 617 *
618 618 * If our cpu does appear to be an offline target then we're inclined
619 619 * not to grant the weakbinding request just yet - we'd prefer to
620 620 * migrate to another cpu and grant the request there. The
621 621 * exceptions are those cases where going through preemption code
622 622 * will not result in us changing cpu:
623 623 *
624 624 * . interrupts have already bypassed this case (see above)
625 625 * . we are already weakbound to this cpu (dispatcher code will
626 626 * always return us to the weakbound cpu)
627 627 * . preemption was disabled even before we disabled it above
628 628 * . we are strongbound to this cpu (if we're strongbound to
629 629 * another and not yet running there the trip through the
630 630 * dispatcher will move us to the strongbound cpu and we
631 631 * will grant the weak binding there)
632 632 */
633 633 if (cp != cpu_inmotion || t->t_nomigrate > 0 || t->t_preempt > 1 ||
634 634 t->t_bound_cpu == cp) {
635 635 /*
636 636 * Don't be tempted to store to t_weakbound_cpu only on
637 637 * the first nested bind request - if we're interrupted
638 638 * after the increment of t_nomigrate and before the
639 639 * store to t_weakbound_cpu and the interrupt calls
640 640 * thread_nomigrate then the assertion in thread_allowmigrate
641 641 * would fail.
642 642 */
643 643 t->t_nomigrate++;
644 644 t->t_weakbound_cpu = cp;
645 645 membar_producer();
646 646 thread_unlock(curthread);
647 647 /*
648 648 * Now that we have dropped the thread_lock another thread
649 649 * can set our t_weakbound_cpu, and will try to migrate us
650 650 * to the strongbound cpu (which will not be prevented by
651 651 * preemption being disabled since we're about to enable
652 652 * preemption). We have granted the weakbinding to the current
653 653 * cpu, so again we are in the position that is is is possible
654 654 * that our weak and strong bindings differ. Again this
655 655 * is catered for by dispatcher code which will favour our
656 656 * weak binding.
657 657 */
658 658 kpreempt_enable();
659 659 } else {
660 660 /*
661 661 * Move to another cpu before granting the request by
662 662 * forcing this thread through preemption code. When we
663 663 * get to set{front,back}dq called from CL_PREEMPT()
664 664 * cpu_choose() will be used to select a cpu to queue
665 665 * us on - that will see cpu_inmotion and take
666 666 * steps to avoid returning us to this cpu.
667 667 */
668 668 cp->cpu_kprunrun = 1;
669 669 thread_unlock(curthread);
670 670 kpreempt_enable(); /* will call preempt() */
671 671 goto again;
672 672 }
673 673 }
674 674
675 675 void
676 676 thread_allowmigrate(void)
677 677 {
678 678 kthread_id_t t = curthread;
679 679
680 680 ASSERT(t->t_weakbound_cpu == CPU ||
681 681 (t->t_nomigrate < 0 && t->t_preempt > 0) ||
682 682 CPU_ON_INTR(CPU) || t->t_flag & T_INTR_THREAD ||
683 683 getpil() >= DISP_LEVEL);
684 684
685 685 if (CPU_ON_INTR(CPU) || (t->t_flag & T_INTR_THREAD) ||
686 686 getpil() >= DISP_LEVEL)
687 687 return;
688 688
689 689 if (t->t_nomigrate < 0) {
690 690 /*
691 691 * This thread was granted "weak binding" in the
692 692 * stronger form of kernel preemption disabling.
693 693 * Undo a level of nesting for both t_nomigrate
694 694 * and t_preempt.
695 695 */
696 696 ++t->t_nomigrate;
697 697 kpreempt_enable();
698 698 } else if (--t->t_nomigrate == 0) {
699 699 /*
700 700 * Time to drop the weak binding. We need to cater
701 701 * for the case where we're weakbound to a different
702 702 * cpu than that to which we're strongbound (a very
703 703 * temporary arrangement that must only persist until
704 704 * weak binding drops). We don't acquire thread_lock
705 705 * here so even as this code executes t_bound_cpu
706 706 * may be changing. So we disable preemption and
707 707 * a) in the case that t_bound_cpu changes while we
708 708 * have preemption disabled kprunrun will be set
709 709 * asynchronously, and b) if before disabling
710 710 * preemption we were already on a different cpu to
711 711 * our t_bound_cpu then we set kprunrun ourselves
712 712 * to force a trip through the dispatcher when
713 713 * preemption is enabled.
714 714 */
715 715 kpreempt_disable();
716 716 if (t->t_bound_cpu &&
717 717 t->t_weakbound_cpu != t->t_bound_cpu)
718 718 CPU->cpu_kprunrun = 1;
719 719 t->t_weakbound_cpu = NULL;
720 720 membar_producer();
721 721 kpreempt_enable();
722 722 }
723 723 }
724 724
725 725 /*
726 726 * weakbinding_stop can be used to temporarily cause weakbindings made
727 727 * with thread_nomigrate to be satisfied through the stronger action of
728 728 * kpreempt_disable. weakbinding_start recommences normal weakbinding.
729 729 */
730 730
731 731 void
732 732 weakbinding_stop(void)
733 733 {
734 734 ASSERT(MUTEX_HELD(&cpu_lock));
735 735 weakbindingbarrier = 1;
736 736 membar_producer(); /* make visible before subsequent thread_lock */
737 737 }
738 738
739 739 void
740 740 weakbinding_start(void)
741 741 {
742 742 ASSERT(MUTEX_HELD(&cpu_lock));
743 743 weakbindingbarrier = 0;
744 744 }
745 745
746 746 void
747 747 null_xcall(void)
748 748 {
749 749 }
750 750
751 751 /*
752 752 * This routine is called to place the CPUs in a safe place so that
753 753 * one of them can be taken off line or placed on line. What we are
754 754 * trying to do here is prevent a thread from traversing the list
755 755 * of active CPUs while we are changing it or from getting placed on
756 756 * the run queue of a CPU that has just gone off line. We do this by
757 757 * creating a thread with the highest possible prio for each CPU and
758 758 * having it call this routine. The advantage of this method is that
759 759 * we can eliminate all checks for CPU_ACTIVE in the disp routines.
760 760 * This makes disp faster at the expense of making p_online() slower
761 761 * which is a good trade off.
762 762 */
763 763 static void
764 764 cpu_pause(int index)
765 765 {
766 766 int s;
767 767 struct _cpu_pause_info *cpi = &cpu_pause_info;
768 768 volatile char *safe = &safe_list[index];
769 769 long lindex = index;
770 770
771 771 ASSERT((curthread->t_bound_cpu != NULL) || (*safe == PAUSE_DIE));
772 772
773 773 while (*safe != PAUSE_DIE) {
774 774 *safe = PAUSE_READY;
775 775 membar_enter(); /* make sure stores are flushed */
776 776 sema_v(&cpi->cp_sem); /* signal requesting thread */
777 777
778 778 /*
779 779 * Wait here until all pause threads are running. That
780 780 * indicates that it's safe to do the spl. Until
781 781 * cpu_pause_info.cp_go is set, we don't want to spl
782 782 * because that might block clock interrupts needed
783 783 * to preempt threads on other CPUs.
784 784 */
785 785 while (cpi->cp_go == 0)
786 786 ;
787 787 /*
788 788 * Even though we are at the highest disp prio, we need
789 789 * to block out all interrupts below LOCK_LEVEL so that
790 790 * an intr doesn't come in, wake up a thread, and call
791 791 * setbackdq/setfrontdq.
792 792 */
793 793 s = splhigh();
794 794 /*
795 795 * if cpu_pause_func() has been set then call it using
796 796 * index as the argument, currently only used by
797 797 * cpr_suspend_cpus(). This function is used as the
798 798 * code to execute on the "paused" cpu's when a machine
799 799 * comes out of a sleep state and CPU's were powered off.
800 800 * (could also be used for hotplugging CPU's).
801 801 */
802 802 if (cpu_pause_func != NULL)
803 803 (*cpu_pause_func)((void *)lindex);
804 804
805 805 mach_cpu_pause(safe);
806 806
807 807 splx(s);
808 808 /*
809 809 * Waiting is at an end. Switch out of cpu_pause
810 810 * loop and resume useful work.
811 811 */
812 812 swtch();
813 813 }
814 814
815 815 mutex_enter(&pause_free_mutex);
816 816 *safe = PAUSE_DEAD;
817 817 cv_broadcast(&pause_free_cv);
818 818 mutex_exit(&pause_free_mutex);
819 819 }
820 820
821 821 /*
822 822 * Allow the cpus to start running again.
823 823 */
824 824 void
825 825 start_cpus()
826 826 {
827 827 int i;
828 828
829 829 ASSERT(MUTEX_HELD(&cpu_lock));
830 830 ASSERT(cpu_pause_info.cp_paused);
831 831 cpu_pause_info.cp_paused = NULL;
832 832 for (i = 0; i < NCPU; i++)
833 833 safe_list[i] = PAUSE_IDLE;
834 834 membar_enter(); /* make sure stores are flushed */
835 835 affinity_clear();
836 836 splx(cpu_pause_info.cp_spl);
837 837 kpreempt_enable();
838 838 }
839 839
840 840 /*
841 841 * Allocate a pause thread for a CPU.
842 842 */
843 843 static void
844 844 cpu_pause_alloc(cpu_t *cp)
845 845 {
846 846 kthread_id_t t;
847 847 long cpun = cp->cpu_id;
848 848
849 849 /*
850 850 * Note, v.v_nglobpris will not change value as long as I hold
851 851 * cpu_lock.
852 852 */
853 853 t = thread_create(NULL, 0, cpu_pause, (void *)cpun,
854 854 0, &p0, TS_STOPPED, v.v_nglobpris - 1);
855 855 thread_lock(t);
856 856 t->t_bound_cpu = cp;
857 857 t->t_disp_queue = cp->cpu_disp;
858 858 t->t_affinitycnt = 1;
859 859 t->t_preempt = 1;
860 860 thread_unlock(t);
861 861 cp->cpu_pause_thread = t;
862 862 /*
863 863 * Registering a thread in the callback table is usually done
864 864 * in the initialization code of the thread. In this
865 865 * case, we do it right after thread creation because the
866 866 * thread itself may never run, and we need to register the
867 867 * fact that it is safe for cpr suspend.
868 868 */
869 869 CALLB_CPR_INIT_SAFE(t, "cpu_pause");
870 870 }
871 871
872 872 /*
873 873 * Free a pause thread for a CPU.
874 874 */
875 875 static void
876 876 cpu_pause_free(cpu_t *cp)
877 877 {
878 878 kthread_id_t t;
879 879 int cpun = cp->cpu_id;
880 880
881 881 ASSERT(MUTEX_HELD(&cpu_lock));
882 882 /*
883 883 * We have to get the thread and tell him to die.
884 884 */
885 885 if ((t = cp->cpu_pause_thread) == NULL) {
886 886 ASSERT(safe_list[cpun] == PAUSE_IDLE);
887 887 return;
888 888 }
889 889 thread_lock(t);
890 890 t->t_cpu = CPU; /* disp gets upset if last cpu is quiesced. */
891 891 t->t_bound_cpu = NULL; /* Must un-bind; cpu may not be running. */
892 892 t->t_pri = v.v_nglobpris - 1;
893 893 ASSERT(safe_list[cpun] == PAUSE_IDLE);
894 894 safe_list[cpun] = PAUSE_DIE;
895 895 THREAD_TRANSITION(t);
896 896 setbackdq(t);
897 897 thread_unlock_nopreempt(t);
898 898
899 899 /*
900 900 * If we don't wait for the thread to actually die, it may try to
901 901 * run on the wrong cpu as part of an actual call to pause_cpus().
902 902 */
903 903 mutex_enter(&pause_free_mutex);
904 904 while (safe_list[cpun] != PAUSE_DEAD) {
905 905 cv_wait(&pause_free_cv, &pause_free_mutex);
906 906 }
907 907 mutex_exit(&pause_free_mutex);
908 908 safe_list[cpun] = PAUSE_IDLE;
909 909
910 910 cp->cpu_pause_thread = NULL;
911 911 }
912 912
913 913 /*
914 914 * Initialize basic structures for pausing CPUs.
915 915 */
916 916 void
917 917 cpu_pause_init()
918 918 {
919 919 sema_init(&cpu_pause_info.cp_sem, 0, NULL, SEMA_DEFAULT, NULL);
920 920 /*
921 921 * Create initial CPU pause thread.
922 922 */
923 923 cpu_pause_alloc(CPU);
924 924 }
925 925
926 926 /*
927 927 * Start the threads used to pause another CPU.
928 928 */
929 929 static int
930 930 cpu_pause_start(processorid_t cpu_id)
931 931 {
932 932 int i;
933 933 int cpu_count = 0;
934 934
935 935 for (i = 0; i < NCPU; i++) {
936 936 cpu_t *cp;
937 937 kthread_id_t t;
938 938
939 939 cp = cpu[i];
940 940 if (!CPU_IN_SET(cpu_available, i) || (i == cpu_id)) {
941 941 safe_list[i] = PAUSE_WAIT;
942 942 continue;
943 943 }
944 944
945 945 /*
946 946 * Skip CPU if it is quiesced or not yet started.
947 947 */
948 948 if ((cp->cpu_flags & (CPU_QUIESCED | CPU_READY)) != CPU_READY) {
949 949 safe_list[i] = PAUSE_WAIT;
950 950 continue;
951 951 }
952 952
953 953 /*
954 954 * Start this CPU's pause thread.
955 955 */
956 956 t = cp->cpu_pause_thread;
957 957 thread_lock(t);
958 958 /*
959 959 * Reset the priority, since nglobpris may have
960 960 * changed since the thread was created, if someone
961 961 * has loaded the RT (or some other) scheduling
962 962 * class.
963 963 */
964 964 t->t_pri = v.v_nglobpris - 1;
965 965 THREAD_TRANSITION(t);
966 966 setbackdq(t);
967 967 thread_unlock_nopreempt(t);
968 968 ++cpu_count;
969 969 }
970 970 return (cpu_count);
971 971 }
972 972
973 973
974 974 /*
975 975 * Pause all of the CPUs except the one we are on by creating a high
976 976 * priority thread bound to those CPUs.
977 977 *
978 978 * Note that one must be extremely careful regarding code
979 979 * executed while CPUs are paused. Since a CPU may be paused
980 980 * while a thread scheduling on that CPU is holding an adaptive
981 981 * lock, code executed with CPUs paused must not acquire adaptive
982 982 * (or low-level spin) locks. Also, such code must not block,
983 983 * since the thread that is supposed to initiate the wakeup may
984 984 * never run.
985 985 *
986 986 * With a few exceptions, the restrictions on code executed with CPUs
987 987 * paused match those for code executed at high-level interrupt
988 988 * context.
989 989 */
990 990 void
991 991 pause_cpus(cpu_t *off_cp)
992 992 {
993 993 processorid_t cpu_id;
994 994 int i;
995 995 struct _cpu_pause_info *cpi = &cpu_pause_info;
996 996
997 997 ASSERT(MUTEX_HELD(&cpu_lock));
998 998 ASSERT(cpi->cp_paused == NULL);
999 999 cpi->cp_count = 0;
1000 1000 cpi->cp_go = 0;
1001 1001 for (i = 0; i < NCPU; i++)
1002 1002 safe_list[i] = PAUSE_IDLE;
1003 1003 kpreempt_disable();
1004 1004
1005 1005 /*
1006 1006 * If running on the cpu that is going offline, get off it.
1007 1007 * This is so that it won't be necessary to rechoose a CPU
1008 1008 * when done.
1009 1009 */
1010 1010 if (CPU == off_cp)
1011 1011 cpu_id = off_cp->cpu_next_part->cpu_id;
1012 1012 else
1013 1013 cpu_id = CPU->cpu_id;
1014 1014 affinity_set(cpu_id);
1015 1015
1016 1016 /*
1017 1017 * Start the pause threads and record how many were started
1018 1018 */
1019 1019 cpi->cp_count = cpu_pause_start(cpu_id);
1020 1020
1021 1021 /*
1022 1022 * Now wait for all CPUs to be running the pause thread.
1023 1023 */
1024 1024 while (cpi->cp_count > 0) {
1025 1025 /*
1026 1026 * Spin reading the count without grabbing the disp
1027 1027 * lock to make sure we don't prevent the pause
1028 1028 * threads from getting the lock.
1029 1029 */
1030 1030 while (sema_held(&cpi->cp_sem))
1031 1031 ;
1032 1032 if (sema_tryp(&cpi->cp_sem))
1033 1033 --cpi->cp_count;
1034 1034 }
1035 1035 cpi->cp_go = 1; /* all have reached cpu_pause */
1036 1036
1037 1037 /*
1038 1038 * Now wait for all CPUs to spl. (Transition from PAUSE_READY
1039 1039 * to PAUSE_WAIT.)
1040 1040 */
1041 1041 for (i = 0; i < NCPU; i++) {
1042 1042 while (safe_list[i] != PAUSE_WAIT)
1043 1043 ;
1044 1044 }
1045 1045 cpi->cp_spl = splhigh(); /* block dispatcher on this CPU */
1046 1046 cpi->cp_paused = curthread;
1047 1047 }
1048 1048
1049 1049 /*
1050 1050 * Check whether the current thread has CPUs paused
1051 1051 */
1052 1052 int
1053 1053 cpus_paused(void)
1054 1054 {
1055 1055 if (cpu_pause_info.cp_paused != NULL) {
1056 1056 ASSERT(cpu_pause_info.cp_paused == curthread);
1057 1057 return (1);
1058 1058 }
1059 1059 return (0);
1060 1060 }
1061 1061
1062 1062 static cpu_t *
1063 1063 cpu_get_all(processorid_t cpun)
1064 1064 {
1065 1065 ASSERT(MUTEX_HELD(&cpu_lock));
1066 1066
1067 1067 if (cpun >= NCPU || cpun < 0 || !CPU_IN_SET(cpu_available, cpun))
1068 1068 return (NULL);
1069 1069 return (cpu[cpun]);
1070 1070 }
1071 1071
1072 1072 /*
1073 1073 * Check whether cpun is a valid processor id and whether it should be
1074 1074 * visible from the current zone. If it is, return a pointer to the
1075 1075 * associated CPU structure.
1076 1076 */
1077 1077 cpu_t *
1078 1078 cpu_get(processorid_t cpun)
1079 1079 {
1080 1080 cpu_t *c;
1081 1081
1082 1082 ASSERT(MUTEX_HELD(&cpu_lock));
1083 1083 c = cpu_get_all(cpun);
1084 1084 if (c != NULL && !INGLOBALZONE(curproc) && pool_pset_enabled() &&
1085 1085 zone_pset_get(curproc->p_zone) != cpupart_query_cpu(c))
1086 1086 return (NULL);
1087 1087 return (c);
1088 1088 }
1089 1089
1090 1090 /*
1091 1091 * The following functions should be used to check CPU states in the kernel.
1092 1092 * They should be invoked with cpu_lock held. Kernel subsystems interested
1093 1093 * in CPU states should *not* use cpu_get_state() and various P_ONLINE/etc
1094 1094 * states. Those are for user-land (and system call) use only.
1095 1095 */
1096 1096
1097 1097 /*
1098 1098 * Determine whether the CPU is online and handling interrupts.
1099 1099 */
1100 1100 int
1101 1101 cpu_is_online(cpu_t *cpu)
1102 1102 {
1103 1103 ASSERT(MUTEX_HELD(&cpu_lock));
1104 1104 return (cpu_flagged_online(cpu->cpu_flags));
1105 1105 }
1106 1106
1107 1107 /*
1108 1108 * Determine whether the CPU is offline (this includes spare and faulted).
1109 1109 */
1110 1110 int
1111 1111 cpu_is_offline(cpu_t *cpu)
1112 1112 {
1113 1113 ASSERT(MUTEX_HELD(&cpu_lock));
1114 1114 return (cpu_flagged_offline(cpu->cpu_flags));
1115 1115 }
1116 1116
1117 1117 /*
1118 1118 * Determine whether the CPU is powered off.
1119 1119 */
1120 1120 int
1121 1121 cpu_is_poweredoff(cpu_t *cpu)
1122 1122 {
1123 1123 ASSERT(MUTEX_HELD(&cpu_lock));
1124 1124 return (cpu_flagged_poweredoff(cpu->cpu_flags));
1125 1125 }
1126 1126
1127 1127 /*
1128 1128 * Determine whether the CPU is handling interrupts.
1129 1129 */
1130 1130 int
1131 1131 cpu_is_nointr(cpu_t *cpu)
1132 1132 {
1133 1133 ASSERT(MUTEX_HELD(&cpu_lock));
1134 1134 return (cpu_flagged_nointr(cpu->cpu_flags));
1135 1135 }
1136 1136
1137 1137 /*
1138 1138 * Determine whether the CPU is active (scheduling threads).
1139 1139 */
1140 1140 int
1141 1141 cpu_is_active(cpu_t *cpu)
1142 1142 {
1143 1143 ASSERT(MUTEX_HELD(&cpu_lock));
1144 1144 return (cpu_flagged_active(cpu->cpu_flags));
1145 1145 }
1146 1146
1147 1147 /*
1148 1148 * Same as above, but these require cpu_flags instead of cpu_t pointers.
1149 1149 */
1150 1150 int
1151 1151 cpu_flagged_online(cpu_flag_t cpu_flags)
1152 1152 {
1153 1153 return (cpu_flagged_active(cpu_flags) &&
1154 1154 (cpu_flags & CPU_ENABLE));
1155 1155 }
1156 1156
1157 1157 int
1158 1158 cpu_flagged_offline(cpu_flag_t cpu_flags)
1159 1159 {
1160 1160 return (((cpu_flags & CPU_POWEROFF) == 0) &&
1161 1161 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY));
1162 1162 }
1163 1163
1164 1164 int
1165 1165 cpu_flagged_poweredoff(cpu_flag_t cpu_flags)
1166 1166 {
1167 1167 return ((cpu_flags & CPU_POWEROFF) == CPU_POWEROFF);
1168 1168 }
1169 1169
1170 1170 int
1171 1171 cpu_flagged_nointr(cpu_flag_t cpu_flags)
1172 1172 {
1173 1173 return (cpu_flagged_active(cpu_flags) &&
1174 1174 (cpu_flags & CPU_ENABLE) == 0);
1175 1175 }
1176 1176
1177 1177 int
1178 1178 cpu_flagged_active(cpu_flag_t cpu_flags)
1179 1179 {
1180 1180 return (((cpu_flags & (CPU_POWEROFF | CPU_FAULTED | CPU_SPARE)) == 0) &&
1181 1181 ((cpu_flags & (CPU_READY | CPU_OFFLINE)) == CPU_READY));
1182 1182 }
1183 1183
1184 1184 /*
1185 1185 * Bring the indicated CPU online.
1186 1186 */
1187 1187 int
1188 1188 cpu_online(cpu_t *cp)
1189 1189 {
1190 1190 int error = 0;
1191 1191
1192 1192 /*
1193 1193 * Handle on-line request.
1194 1194 * This code must put the new CPU on the active list before
1195 1195 * starting it because it will not be paused, and will start
1196 1196 * using the active list immediately. The real start occurs
1197 1197 * when the CPU_QUIESCED flag is turned off.
1198 1198 */
1199 1199
1200 1200 ASSERT(MUTEX_HELD(&cpu_lock));
1201 1201
1202 1202 /*
1203 1203 * Put all the cpus into a known safe place.
1204 1204 * No mutexes can be entered while CPUs are paused.
1205 1205 */
1206 1206 error = mp_cpu_start(cp); /* arch-dep hook */
1207 1207 if (error == 0) {
1208 1208 pg_cpupart_in(cp, cp->cpu_part);
1209 1209 pause_cpus(NULL);
1210 1210 cpu_add_active_internal(cp);
1211 1211 if (cp->cpu_flags & CPU_FAULTED) {
1212 1212 cp->cpu_flags &= ~CPU_FAULTED;
1213 1213 mp_cpu_faulted_exit(cp);
1214 1214 }
1215 1215 cp->cpu_flags &= ~(CPU_QUIESCED | CPU_OFFLINE | CPU_FROZEN |
1216 1216 CPU_SPARE);
1217 1217 CPU_NEW_GENERATION(cp);
1218 1218 start_cpus();
1219 1219 cpu_stats_kstat_create(cp);
1220 1220 cpu_create_intrstat(cp);
1221 1221 lgrp_kstat_create(cp);
1222 1222 cpu_state_change_notify(cp->cpu_id, CPU_ON);
1223 1223 cpu_intr_enable(cp); /* arch-dep hook */
1224 1224 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON);
1225 1225 cpu_set_state(cp);
1226 1226 cyclic_online(cp);
1227 1227 /*
1228 1228 * This has to be called only after cyclic_online(). This
1229 1229 * function uses cyclics.
1230 1230 */
1231 1231 callout_cpu_online(cp);
1232 1232 poke_cpu(cp->cpu_id);
1233 1233 }
1234 1234
1235 1235 return (error);
1236 1236 }
1237 1237
1238 1238 /*
1239 1239 * Take the indicated CPU offline.
1240 1240 */
1241 1241 int
1242 1242 cpu_offline(cpu_t *cp, int flags)
1243 1243 {
1244 1244 cpupart_t *pp;
1245 1245 int error = 0;
1246 1246 cpu_t *ncp;
1247 1247 int intr_enable;
1248 1248 int cyclic_off = 0;
1249 1249 int callout_off = 0;
1250 1250 int loop_count;
1251 1251 int no_quiesce = 0;
1252 1252 int (*bound_func)(struct cpu *, int);
1253 1253 kthread_t *t;
1254 1254 lpl_t *cpu_lpl;
1255 1255 proc_t *p;
1256 1256 int lgrp_diff_lpl;
1257 1257 boolean_t unbind_all_threads = (flags & CPU_FORCED) != 0;
1258 1258
1259 1259 ASSERT(MUTEX_HELD(&cpu_lock));
1260 1260
1261 1261 /*
1262 1262 * If we're going from faulted or spare to offline, just
1263 1263 * clear these flags and update CPU state.
1264 1264 */
1265 1265 if (cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) {
1266 1266 if (cp->cpu_flags & CPU_FAULTED) {
1267 1267 cp->cpu_flags &= ~CPU_FAULTED;
1268 1268 mp_cpu_faulted_exit(cp);
1269 1269 }
1270 1270 cp->cpu_flags &= ~CPU_SPARE;
1271 1271 cpu_set_state(cp);
1272 1272 return (0);
1273 1273 }
1274 1274
1275 1275 /*
1276 1276 * Handle off-line request.
1277 1277 */
1278 1278 pp = cp->cpu_part;
1279 1279 /*
1280 1280 * Don't offline last online CPU in partition
1281 1281 */
1282 1282 if (ncpus_online <= 1 || pp->cp_ncpus <= 1 || cpu_intr_count(cp) < 2)
1283 1283 return (EBUSY);
1284 1284 /*
1285 1285 * Unbind all soft-bound threads bound to our CPU and hard bound threads
1286 1286 * if we were asked to.
1287 1287 */
1288 1288 error = cpu_unbind(cp->cpu_id, unbind_all_threads);
1289 1289 if (error != 0)
1290 1290 return (error);
1291 1291 /*
1292 1292 * We shouldn't be bound to this CPU ourselves.
1293 1293 */
1294 1294 if (curthread->t_bound_cpu == cp)
1295 1295 return (EBUSY);
1296 1296
1297 1297 /*
1298 1298 * Tell interested parties that this CPU is going offline.
1299 1299 */
1300 1300 CPU_NEW_GENERATION(cp);
1301 1301 cpu_state_change_notify(cp->cpu_id, CPU_OFF);
1302 1302
1303 1303 /*
1304 1304 * Tell the PG subsystem that the CPU is leaving the partition
1305 1305 */
1306 1306 pg_cpupart_out(cp, pp);
1307 1307
1308 1308 /*
1309 1309 * Take the CPU out of interrupt participation so we won't find
1310 1310 * bound kernel threads. If the architecture cannot completely
1311 1311 * shut off interrupts on the CPU, don't quiesce it, but don't
1312 1312 * run anything but interrupt thread... this is indicated by
1313 1313 * the CPU_OFFLINE flag being on but the CPU_QUIESCE flag being
1314 1314 * off.
1315 1315 */
1316 1316 intr_enable = cp->cpu_flags & CPU_ENABLE;
1317 1317 if (intr_enable)
1318 1318 no_quiesce = cpu_intr_disable(cp);
1319 1319
1320 1320 /*
1321 1321 * Record that we are aiming to offline this cpu. This acts as
1322 1322 * a barrier to further weak binding requests in thread_nomigrate
1323 1323 * and also causes cpu_choose, disp_lowpri_cpu and setfrontdq to
1324 1324 * lean away from this cpu. Further strong bindings are already
1325 1325 * avoided since we hold cpu_lock. Since threads that are set
1326 1326 * runnable around now and others coming off the target cpu are
1327 1327 * directed away from the target, existing strong and weak bindings
1328 1328 * (especially the latter) to the target cpu stand maximum chance of
1329 1329 * being able to unbind during the short delay loop below (if other
1330 1330 * unbound threads compete they may not see cpu in time to unbind
1331 1331 * even if they would do so immediately.
1332 1332 */
1333 1333 cpu_inmotion = cp;
1334 1334 membar_enter();
1335 1335
1336 1336 /*
1337 1337 * Check for kernel threads (strong or weak) bound to that CPU.
1338 1338 * Strongly bound threads may not unbind, and we'll have to return
1339 1339 * EBUSY. Weakly bound threads should always disappear - we've
1340 1340 * stopped more weak binding with cpu_inmotion and existing
1341 1341 * bindings will drain imminently (they may not block). Nonetheless
1342 1342 * we will wait for a fixed period for all bound threads to disappear.
1343 1343 * Inactive interrupt threads are OK (they'll be in TS_FREE
1344 1344 * state). If test finds some bound threads, wait a few ticks
1345 1345 * to give short-lived threads (such as interrupts) chance to
1346 1346 * complete. Note that if no_quiesce is set, i.e. this cpu
1347 1347 * is required to service interrupts, then we take the route
1348 1348 * that permits interrupt threads to be active (or bypassed).
1349 1349 */
1350 1350 bound_func = no_quiesce ? disp_bound_threads : disp_bound_anythreads;
1351 1351
1352 1352 again: for (loop_count = 0; (*bound_func)(cp, 0); loop_count++) {
1353 1353 if (loop_count >= 5) {
1354 1354 error = EBUSY; /* some threads still bound */
1355 1355 break;
1356 1356 }
1357 1357
1358 1358 /*
1359 1359 * If some threads were assigned, give them
1360 1360 * a chance to complete or move.
1361 1361 *
1362 1362 * This assumes that the clock_thread is not bound
1363 1363 * to any CPU, because the clock_thread is needed to
1364 1364 * do the delay(hz/100).
1365 1365 *
1366 1366 * Note: we still hold the cpu_lock while waiting for
1367 1367 * the next clock tick. This is OK since it isn't
1368 1368 * needed for anything else except processor_bind(2),
1369 1369 * and system initialization. If we drop the lock,
1370 1370 * we would risk another p_online disabling the last
1371 1371 * processor.
1372 1372 */
1373 1373 delay(hz/100);
1374 1374 }
1375 1375
1376 1376 if (error == 0 && callout_off == 0) {
1377 1377 callout_cpu_offline(cp);
1378 1378 callout_off = 1;
1379 1379 }
1380 1380
1381 1381 if (error == 0 && cyclic_off == 0) {
1382 1382 if (!cyclic_offline(cp)) {
1383 1383 /*
1384 1384 * We must have bound cyclics...
1385 1385 */
1386 1386 error = EBUSY;
1387 1387 goto out;
1388 1388 }
1389 1389 cyclic_off = 1;
1390 1390 }
1391 1391
1392 1392 /*
1393 1393 * Call mp_cpu_stop() to perform any special operations
1394 1394 * needed for this machine architecture to offline a CPU.
1395 1395 */
1396 1396 if (error == 0)
1397 1397 error = mp_cpu_stop(cp); /* arch-dep hook */
1398 1398
1399 1399 /*
1400 1400 * If that all worked, take the CPU offline and decrement
1401 1401 * ncpus_online.
1402 1402 */
1403 1403 if (error == 0) {
1404 1404 /*
1405 1405 * Put all the cpus into a known safe place.
1406 1406 * No mutexes can be entered while CPUs are paused.
1407 1407 */
1408 1408 pause_cpus(cp);
1409 1409 /*
1410 1410 * Repeat the operation, if necessary, to make sure that
1411 1411 * all outstanding low-level interrupts run to completion
1412 1412 * before we set the CPU_QUIESCED flag. It's also possible
1413 1413 * that a thread has weak bound to the cpu despite our raising
1414 1414 * cpu_inmotion above since it may have loaded that
1415 1415 * value before the barrier became visible (this would have
1416 1416 * to be the thread that was on the target cpu at the time
1417 1417 * we raised the barrier).
1418 1418 */
1419 1419 if ((!no_quiesce && cp->cpu_intr_actv != 0) ||
1420 1420 (*bound_func)(cp, 1)) {
1421 1421 start_cpus();
1422 1422 (void) mp_cpu_start(cp);
1423 1423 goto again;
1424 1424 }
1425 1425 ncp = cp->cpu_next_part;
1426 1426 cpu_lpl = cp->cpu_lpl;
1427 1427 ASSERT(cpu_lpl != NULL);
1428 1428
1429 1429 /*
1430 1430 * Remove the CPU from the list of active CPUs.
1431 1431 */
1432 1432 cpu_remove_active(cp);
1433 1433
1434 1434 /*
1435 1435 * Walk the active process list and look for threads
1436 1436 * whose home lgroup needs to be updated, or
1437 1437 * the last CPU they run on is the one being offlined now.
1438 1438 */
1439 1439
1440 1440 ASSERT(curthread->t_cpu != cp);
1441 1441 for (p = practive; p != NULL; p = p->p_next) {
1442 1442
1443 1443 t = p->p_tlist;
1444 1444
1445 1445 if (t == NULL)
1446 1446 continue;
1447 1447
1448 1448 lgrp_diff_lpl = 0;
1449 1449
1450 1450 do {
1451 1451 ASSERT(t->t_lpl != NULL);
1452 1452 /*
1453 1453 * Taking last CPU in lpl offline
1454 1454 * Rehome thread if it is in this lpl
1455 1455 * Otherwise, update the count of how many
1456 1456 * threads are in this CPU's lgroup but have
1457 1457 * a different lpl.
1458 1458 */
1459 1459
1460 1460 if (cpu_lpl->lpl_ncpu == 0) {
1461 1461 if (t->t_lpl == cpu_lpl)
1462 1462 lgrp_move_thread(t,
1463 1463 lgrp_choose(t,
1464 1464 t->t_cpupart), 0);
1465 1465 else if (t->t_lpl->lpl_lgrpid ==
1466 1466 cpu_lpl->lpl_lgrpid)
1467 1467 lgrp_diff_lpl++;
1468 1468 }
1469 1469 ASSERT(t->t_lpl->lpl_ncpu > 0);
1470 1470
1471 1471 /*
1472 1472 * Update CPU last ran on if it was this CPU
1473 1473 */
1474 1474 if (t->t_cpu == cp && t->t_bound_cpu != cp)
1475 1475 t->t_cpu = disp_lowpri_cpu(ncp,
1476 1476 t->t_lpl, t->t_pri, NULL);
1477 1477 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1478 1478 t->t_weakbound_cpu == cp);
1479 1479
1480 1480 t = t->t_forw;
1481 1481 } while (t != p->p_tlist);
1482 1482
1483 1483 /*
1484 1484 * Didn't find any threads in the same lgroup as this
1485 1485 * CPU with a different lpl, so remove the lgroup from
1486 1486 * the process lgroup bitmask.
1487 1487 */
1488 1488
1489 1489 if (lgrp_diff_lpl == 0)
1490 1490 klgrpset_del(p->p_lgrpset, cpu_lpl->lpl_lgrpid);
1491 1491 }
1492 1492
1493 1493 /*
1494 1494 * Walk thread list looking for threads that need to be
1495 1495 * rehomed, since there are some threads that are not in
1496 1496 * their process's p_tlist.
1497 1497 */
1498 1498
1499 1499 t = curthread;
1500 1500 do {
1501 1501 ASSERT(t != NULL && t->t_lpl != NULL);
1502 1502
1503 1503 /*
1504 1504 * Rehome threads with same lpl as this CPU when this
1505 1505 * is the last CPU in the lpl.
1506 1506 */
1507 1507
1508 1508 if ((cpu_lpl->lpl_ncpu == 0) && (t->t_lpl == cpu_lpl))
1509 1509 lgrp_move_thread(t,
1510 1510 lgrp_choose(t, t->t_cpupart), 1);
1511 1511
1512 1512 ASSERT(t->t_lpl->lpl_ncpu > 0);
1513 1513
1514 1514 /*
1515 1515 * Update CPU last ran on if it was this CPU
1516 1516 */
1517 1517
1518 1518 if (t->t_cpu == cp && t->t_bound_cpu != cp) {
1519 1519 t->t_cpu = disp_lowpri_cpu(ncp,
1520 1520 t->t_lpl, t->t_pri, NULL);
1521 1521 }
1522 1522 ASSERT(t->t_cpu != cp || t->t_bound_cpu == cp ||
1523 1523 t->t_weakbound_cpu == cp);
1524 1524 t = t->t_next;
1525 1525
1526 1526 } while (t != curthread);
1527 1527 ASSERT((cp->cpu_flags & (CPU_FAULTED | CPU_SPARE)) == 0);
1528 1528 cp->cpu_flags |= CPU_OFFLINE;
1529 1529 disp_cpu_inactive(cp);
1530 1530 if (!no_quiesce)
1531 1531 cp->cpu_flags |= CPU_QUIESCED;
1532 1532 ncpus_online--;
1533 1533 cpu_set_state(cp);
1534 1534 cpu_inmotion = NULL;
1535 1535 start_cpus();
1536 1536 cpu_stats_kstat_destroy(cp);
1537 1537 cpu_delete_intrstat(cp);
1538 1538 lgrp_kstat_destroy(cp);
1539 1539 }
1540 1540
1541 1541 out:
1542 1542 cpu_inmotion = NULL;
1543 1543
1544 1544 /*
1545 1545 * If we failed, re-enable interrupts.
1546 1546 * Do this even if cpu_intr_disable returned an error, because
1547 1547 * it may have partially disabled interrupts.
1548 1548 */
1549 1549 if (error && intr_enable)
1550 1550 cpu_intr_enable(cp);
1551 1551
1552 1552 /*
1553 1553 * If we failed, but managed to offline the cyclic subsystem on this
1554 1554 * CPU, bring it back online.
1555 1555 */
1556 1556 if (error && cyclic_off)
1557 1557 cyclic_online(cp);
1558 1558
1559 1559 /*
1560 1560 * If we failed, but managed to offline callouts on this CPU,
1561 1561 * bring it back online.
1562 1562 */
1563 1563 if (error && callout_off)
1564 1564 callout_cpu_online(cp);
1565 1565
1566 1566 /*
1567 1567 * If we failed, tell the PG subsystem that the CPU is back
1568 1568 */
1569 1569 pg_cpupart_in(cp, pp);
1570 1570
1571 1571 /*
1572 1572 * If we failed, we need to notify everyone that this CPU is back on.
1573 1573 */
1574 1574 if (error != 0) {
1575 1575 CPU_NEW_GENERATION(cp);
1576 1576 cpu_state_change_notify(cp->cpu_id, CPU_ON);
1577 1577 cpu_state_change_notify(cp->cpu_id, CPU_INTR_ON);
1578 1578 }
1579 1579
1580 1580 return (error);
1581 1581 }
1582 1582
1583 1583 /*
1584 1584 * Mark the indicated CPU as faulted, taking it offline.
1585 1585 */
1586 1586 int
1587 1587 cpu_faulted(cpu_t *cp, int flags)
1588 1588 {
1589 1589 int error = 0;
1590 1590
1591 1591 ASSERT(MUTEX_HELD(&cpu_lock));
1592 1592 ASSERT(!cpu_is_poweredoff(cp));
1593 1593
1594 1594 if (cpu_is_offline(cp)) {
1595 1595 cp->cpu_flags &= ~CPU_SPARE;
1596 1596 cp->cpu_flags |= CPU_FAULTED;
1597 1597 mp_cpu_faulted_enter(cp);
1598 1598 cpu_set_state(cp);
1599 1599 return (0);
1600 1600 }
1601 1601
1602 1602 if ((error = cpu_offline(cp, flags)) == 0) {
1603 1603 cp->cpu_flags |= CPU_FAULTED;
1604 1604 mp_cpu_faulted_enter(cp);
1605 1605 cpu_set_state(cp);
1606 1606 }
1607 1607
1608 1608 return (error);
1609 1609 }
1610 1610
1611 1611 /*
1612 1612 * Mark the indicated CPU as a spare, taking it offline.
1613 1613 */
1614 1614 int
1615 1615 cpu_spare(cpu_t *cp, int flags)
1616 1616 {
1617 1617 int error = 0;
1618 1618
1619 1619 ASSERT(MUTEX_HELD(&cpu_lock));
1620 1620 ASSERT(!cpu_is_poweredoff(cp));
1621 1621
1622 1622 if (cpu_is_offline(cp)) {
1623 1623 if (cp->cpu_flags & CPU_FAULTED) {
1624 1624 cp->cpu_flags &= ~CPU_FAULTED;
1625 1625 mp_cpu_faulted_exit(cp);
1626 1626 }
1627 1627 cp->cpu_flags |= CPU_SPARE;
1628 1628 cpu_set_state(cp);
1629 1629 return (0);
1630 1630 }
1631 1631
1632 1632 if ((error = cpu_offline(cp, flags)) == 0) {
1633 1633 cp->cpu_flags |= CPU_SPARE;
1634 1634 cpu_set_state(cp);
1635 1635 }
1636 1636
1637 1637 return (error);
1638 1638 }
1639 1639
1640 1640 /*
1641 1641 * Take the indicated CPU from poweroff to offline.
1642 1642 */
1643 1643 int
1644 1644 cpu_poweron(cpu_t *cp)
1645 1645 {
1646 1646 int error = ENOTSUP;
1647 1647
1648 1648 ASSERT(MUTEX_HELD(&cpu_lock));
1649 1649 ASSERT(cpu_is_poweredoff(cp));
1650 1650
1651 1651 error = mp_cpu_poweron(cp); /* arch-dep hook */
1652 1652 if (error == 0)
1653 1653 cpu_set_state(cp);
1654 1654
1655 1655 return (error);
1656 1656 }
1657 1657
1658 1658 /*
1659 1659 * Take the indicated CPU from any inactive state to powered off.
1660 1660 */
1661 1661 int
1662 1662 cpu_poweroff(cpu_t *cp)
1663 1663 {
1664 1664 int error = ENOTSUP;
1665 1665
1666 1666 ASSERT(MUTEX_HELD(&cpu_lock));
1667 1667 ASSERT(cpu_is_offline(cp));
1668 1668
1669 1669 if (!(cp->cpu_flags & CPU_QUIESCED))
1670 1670 return (EBUSY); /* not completely idle */
1671 1671
1672 1672 error = mp_cpu_poweroff(cp); /* arch-dep hook */
1673 1673 if (error == 0)
1674 1674 cpu_set_state(cp);
1675 1675
1676 1676 return (error);
1677 1677 }
1678 1678
1679 1679 /*
1680 1680 * Initialize the Sequential CPU id lookup table
1681 1681 */
1682 1682 void
1683 1683 cpu_seq_tbl_init()
1684 1684 {
1685 1685 cpu_t **tbl;
1686 1686
1687 1687 tbl = kmem_zalloc(sizeof (struct cpu *) * max_ncpus, KM_SLEEP);
1688 1688 tbl[0] = CPU;
1689 1689
1690 1690 cpu_seq = tbl;
1691 1691 }
1692 1692
1693 1693 /*
1694 1694 * Initialize the CPU lists for the first CPU.
1695 1695 */
1696 1696 void
1697 1697 cpu_list_init(cpu_t *cp)
1698 1698 {
1699 1699 cp->cpu_next = cp;
1700 1700 cp->cpu_prev = cp;
1701 1701 cpu_list = cp;
1702 1702 clock_cpu_list = cp;
1703 1703
1704 1704 cp->cpu_next_onln = cp;
1705 1705 cp->cpu_prev_onln = cp;
1706 1706 cpu_active = cp;
1707 1707
1708 1708 cp->cpu_seqid = 0;
1709 1709 CPUSET_ADD(cpu_seqid_inuse, 0);
1710 1710
1711 1711 /*
1712 1712 * Bootstrap cpu_seq using cpu_list
1713 1713 * The cpu_seq[] table will be dynamically allocated
1714 1714 * when kmem later becomes available (but before going MP)
1715 1715 */
1716 1716 cpu_seq = &cpu_list;
1717 1717
1718 1718 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid);
1719 1719 cp_default.cp_cpulist = cp;
1720 1720 cp_default.cp_ncpus = 1;
1721 1721 cp->cpu_next_part = cp;
1722 1722 cp->cpu_prev_part = cp;
1723 1723 cp->cpu_part = &cp_default;
1724 1724
1725 1725 CPUSET_ADD(cpu_available, cp->cpu_id);
1726 1726 }
1727 1727
1728 1728 /*
1729 1729 * Insert a CPU into the list of available CPUs.
1730 1730 */
1731 1731 void
1732 1732 cpu_add_unit(cpu_t *cp)
1733 1733 {
1734 1734 int seqid;
1735 1735
1736 1736 ASSERT(MUTEX_HELD(&cpu_lock));
1737 1737 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */
1738 1738
1739 1739 lgrp_config(LGRP_CONFIG_CPU_ADD, (uintptr_t)cp, 0);
1740 1740
1741 1741 /*
1742 1742 * Note: most users of the cpu_list will grab the
1743 1743 * cpu_lock to insure that it isn't modified. However,
1744 1744 * certain users can't or won't do that. To allow this
1745 1745 * we pause the other cpus. Users who walk the list
1746 1746 * without cpu_lock, must disable kernel preemption
1747 1747 * to insure that the list isn't modified underneath
1748 1748 * them. Also, any cached pointers to cpu structures
1749 1749 * must be revalidated by checking to see if the
1750 1750 * cpu_next pointer points to itself. This check must
1751 1751 * be done with the cpu_lock held or kernel preemption
1752 1752 * disabled. This check relies upon the fact that
1753 1753 * old cpu structures are not free'ed or cleared after
1754 1754 * then are removed from the cpu_list.
1755 1755 *
1756 1756 * Note that the clock code walks the cpu list dereferencing
1757 1757 * the cpu_part pointer, so we need to initialize it before
1758 1758 * adding the cpu to the list.
1759 1759 */
1760 1760 cp->cpu_part = &cp_default;
1761 1761 (void) pause_cpus(NULL);
1762 1762 cp->cpu_next = cpu_list;
1763 1763 cp->cpu_prev = cpu_list->cpu_prev;
1764 1764 cpu_list->cpu_prev->cpu_next = cp;
1765 1765 cpu_list->cpu_prev = cp;
1766 1766 start_cpus();
1767 1767
1768 1768 for (seqid = 0; CPU_IN_SET(cpu_seqid_inuse, seqid); seqid++)
1769 1769 continue;
1770 1770 CPUSET_ADD(cpu_seqid_inuse, seqid);
1771 1771 cp->cpu_seqid = seqid;
1772 1772
1773 1773 if (seqid > max_cpu_seqid_ever)
1774 1774 max_cpu_seqid_ever = seqid;
1775 1775
1776 1776 ASSERT(ncpus < max_ncpus);
1777 1777 ncpus++;
1778 1778 cp->cpu_cache_offset = KMEM_CPU_CACHE_OFFSET(cp->cpu_seqid);
1779 1779 cpu[cp->cpu_id] = cp;
1780 1780 CPUSET_ADD(cpu_available, cp->cpu_id);
1781 1781 cpu_seq[cp->cpu_seqid] = cp;
1782 1782
1783 1783 /*
1784 1784 * allocate a pause thread for this CPU.
1785 1785 */
1786 1786 cpu_pause_alloc(cp);
1787 1787
1788 1788 /*
1789 1789 * So that new CPUs won't have NULL prev_onln and next_onln pointers,
1790 1790 * link them into a list of just that CPU.
1791 1791 * This is so that disp_lowpri_cpu will work for thread_create in
1792 1792 * pause_cpus() when called from the startup thread in a new CPU.
1793 1793 */
1794 1794 cp->cpu_next_onln = cp;
1795 1795 cp->cpu_prev_onln = cp;
1796 1796 cpu_info_kstat_create(cp);
1797 1797 cp->cpu_next_part = cp;
1798 1798 cp->cpu_prev_part = cp;
1799 1799
1800 1800 init_cpu_mstate(cp, CMS_SYSTEM);
1801 1801
1802 1802 pool_pset_mod = gethrtime();
1803 1803 }
1804 1804
1805 1805 /*
1806 1806 * Do the opposite of cpu_add_unit().
1807 1807 */
1808 1808 void
1809 1809 cpu_del_unit(int cpuid)
1810 1810 {
1811 1811 struct cpu *cp, *cpnext;
1812 1812
1813 1813 ASSERT(MUTEX_HELD(&cpu_lock));
1814 1814 cp = cpu[cpuid];
1815 1815 ASSERT(cp != NULL);
1816 1816
1817 1817 ASSERT(cp->cpu_next_onln == cp);
1818 1818 ASSERT(cp->cpu_prev_onln == cp);
1819 1819 ASSERT(cp->cpu_next_part == cp);
1820 1820 ASSERT(cp->cpu_prev_part == cp);
1821 1821
1822 1822 /*
1823 1823 * Tear down the CPU's physical ID cache, and update any
1824 1824 * processor groups
1825 1825 */
1826 1826 pg_cpu_fini(cp, NULL);
1827 1827 pghw_physid_destroy(cp);
1828 1828
1829 1829 /*
1830 1830 * Destroy kstat stuff.
1831 1831 */
1832 1832 cpu_info_kstat_destroy(cp);
1833 1833 term_cpu_mstate(cp);
1834 1834 /*
1835 1835 * Free up pause thread.
1836 1836 */
1837 1837 cpu_pause_free(cp);
1838 1838 CPUSET_DEL(cpu_available, cp->cpu_id);
1839 1839 cpu[cp->cpu_id] = NULL;
1840 1840 cpu_seq[cp->cpu_seqid] = NULL;
1841 1841
1842 1842 /*
1843 1843 * The clock thread and mutex_vector_enter cannot hold the
1844 1844 * cpu_lock while traversing the cpu list, therefore we pause
1845 1845 * all other threads by pausing the other cpus. These, and any
1846 1846 * other routines holding cpu pointers while possibly sleeping
1847 1847 * must be sure to call kpreempt_disable before processing the
1848 1848 * list and be sure to check that the cpu has not been deleted
1849 1849 * after any sleeps (check cp->cpu_next != NULL). We guarantee
1850 1850 * to keep the deleted cpu structure around.
1851 1851 *
1852 1852 * Note that this MUST be done AFTER cpu_available
1853 1853 * has been updated so that we don't waste time
1854 1854 * trying to pause the cpu we're trying to delete.
1855 1855 */
1856 1856 (void) pause_cpus(NULL);
1857 1857
1858 1858 cpnext = cp->cpu_next;
1859 1859 cp->cpu_prev->cpu_next = cp->cpu_next;
1860 1860 cp->cpu_next->cpu_prev = cp->cpu_prev;
1861 1861 if (cp == cpu_list)
1862 1862 cpu_list = cpnext;
1863 1863
1864 1864 /*
1865 1865 * Signals that the cpu has been deleted (see above).
1866 1866 */
1867 1867 cp->cpu_next = NULL;
1868 1868 cp->cpu_prev = NULL;
1869 1869
1870 1870 start_cpus();
1871 1871
1872 1872 CPUSET_DEL(cpu_seqid_inuse, cp->cpu_seqid);
1873 1873 ncpus--;
1874 1874 lgrp_config(LGRP_CONFIG_CPU_DEL, (uintptr_t)cp, 0);
1875 1875
1876 1876 pool_pset_mod = gethrtime();
1877 1877 }
1878 1878
1879 1879 /*
1880 1880 * Add a CPU to the list of active CPUs.
1881 1881 * This routine must not get any locks, because other CPUs are paused.
1882 1882 */
1883 1883 static void
1884 1884 cpu_add_active_internal(cpu_t *cp)
1885 1885 {
1886 1886 cpupart_t *pp = cp->cpu_part;
1887 1887
1888 1888 ASSERT(MUTEX_HELD(&cpu_lock));
1889 1889 ASSERT(cpu_list != NULL); /* list started in cpu_list_init */
1890 1890
1891 1891 ncpus_online++;
1892 1892 cpu_set_state(cp);
1893 1893 cp->cpu_next_onln = cpu_active;
1894 1894 cp->cpu_prev_onln = cpu_active->cpu_prev_onln;
1895 1895 cpu_active->cpu_prev_onln->cpu_next_onln = cp;
1896 1896 cpu_active->cpu_prev_onln = cp;
1897 1897
1898 1898 if (pp->cp_cpulist) {
1899 1899 cp->cpu_next_part = pp->cp_cpulist;
1900 1900 cp->cpu_prev_part = pp->cp_cpulist->cpu_prev_part;
1901 1901 pp->cp_cpulist->cpu_prev_part->cpu_next_part = cp;
1902 1902 pp->cp_cpulist->cpu_prev_part = cp;
1903 1903 } else {
1904 1904 ASSERT(pp->cp_ncpus == 0);
1905 1905 pp->cp_cpulist = cp->cpu_next_part = cp->cpu_prev_part = cp;
1906 1906 }
1907 1907 pp->cp_ncpus++;
1908 1908 if (pp->cp_ncpus == 1) {
1909 1909 cp_numparts_nonempty++;
1910 1910 ASSERT(cp_numparts_nonempty != 0);
1911 1911 }
1912 1912
1913 1913 pg_cpu_active(cp);
1914 1914 lgrp_config(LGRP_CONFIG_CPU_ONLINE, (uintptr_t)cp, 0);
1915 1915
1916 1916 bzero(&cp->cpu_loadavg, sizeof (cp->cpu_loadavg));
1917 1917 }
1918 1918
1919 1919 /*
1920 1920 * Add a CPU to the list of active CPUs.
1921 1921 * This is called from machine-dependent layers when a new CPU is started.
1922 1922 */
1923 1923 void
1924 1924 cpu_add_active(cpu_t *cp)
1925 1925 {
1926 1926 pg_cpupart_in(cp, cp->cpu_part);
1927 1927
1928 1928 pause_cpus(NULL);
1929 1929 cpu_add_active_internal(cp);
1930 1930 start_cpus();
1931 1931
1932 1932 cpu_stats_kstat_create(cp);
1933 1933 cpu_create_intrstat(cp);
1934 1934 lgrp_kstat_create(cp);
1935 1935 cpu_state_change_notify(cp->cpu_id, CPU_INIT);
1936 1936 }
1937 1937
1938 1938
1939 1939 /*
1940 1940 * Remove a CPU from the list of active CPUs.
1941 1941 * This routine must not get any locks, because other CPUs are paused.
1942 1942 */
1943 1943 /* ARGSUSED */
1944 1944 static void
1945 1945 cpu_remove_active(cpu_t *cp)
1946 1946 {
1947 1947 cpupart_t *pp = cp->cpu_part;
1948 1948
1949 1949 ASSERT(MUTEX_HELD(&cpu_lock));
1950 1950 ASSERT(cp->cpu_next_onln != cp); /* not the last one */
1951 1951 ASSERT(cp->cpu_prev_onln != cp); /* not the last one */
1952 1952
1953 1953 pg_cpu_inactive(cp);
1954 1954
1955 1955 lgrp_config(LGRP_CONFIG_CPU_OFFLINE, (uintptr_t)cp, 0);
1956 1956
1957 1957 if (cp == clock_cpu_list)
1958 1958 clock_cpu_list = cp->cpu_next_onln;
1959 1959
1960 1960 cp->cpu_prev_onln->cpu_next_onln = cp->cpu_next_onln;
1961 1961 cp->cpu_next_onln->cpu_prev_onln = cp->cpu_prev_onln;
1962 1962 if (cpu_active == cp) {
1963 1963 cpu_active = cp->cpu_next_onln;
1964 1964 }
1965 1965 cp->cpu_next_onln = cp;
1966 1966 cp->cpu_prev_onln = cp;
1967 1967
1968 1968 cp->cpu_prev_part->cpu_next_part = cp->cpu_next_part;
1969 1969 cp->cpu_next_part->cpu_prev_part = cp->cpu_prev_part;
1970 1970 if (pp->cp_cpulist == cp) {
1971 1971 pp->cp_cpulist = cp->cpu_next_part;
1972 1972 ASSERT(pp->cp_cpulist != cp);
1973 1973 }
1974 1974 cp->cpu_next_part = cp;
1975 1975 cp->cpu_prev_part = cp;
1976 1976 pp->cp_ncpus--;
1977 1977 if (pp->cp_ncpus == 0) {
1978 1978 cp_numparts_nonempty--;
1979 1979 ASSERT(cp_numparts_nonempty != 0);
1980 1980 }
1981 1981 }
1982 1982
1983 1983 /*
1984 1984 * Routine used to setup a newly inserted CPU in preparation for starting
1985 1985 * it running code.
1986 1986 */
1987 1987 int
1988 1988 cpu_configure(int cpuid)
1989 1989 {
1990 1990 int retval = 0;
1991 1991
1992 1992 ASSERT(MUTEX_HELD(&cpu_lock));
1993 1993
1994 1994 /*
1995 1995 * Some structures are statically allocated based upon
1996 1996 * the maximum number of cpus the system supports. Do not
1997 1997 * try to add anything beyond this limit.
1998 1998 */
1999 1999 if (cpuid < 0 || cpuid >= NCPU) {
2000 2000 return (EINVAL);
2001 2001 }
2002 2002
2003 2003 if ((cpu[cpuid] != NULL) && (cpu[cpuid]->cpu_flags != 0)) {
2004 2004 return (EALREADY);
2005 2005 }
2006 2006
2007 2007 if ((retval = mp_cpu_configure(cpuid)) != 0) {
2008 2008 return (retval);
2009 2009 }
2010 2010
2011 2011 cpu[cpuid]->cpu_flags = CPU_QUIESCED | CPU_OFFLINE | CPU_POWEROFF;
2012 2012 cpu_set_state(cpu[cpuid]);
2013 2013 retval = cpu_state_change_hooks(cpuid, CPU_CONFIG, CPU_UNCONFIG);
2014 2014 if (retval != 0)
2015 2015 (void) mp_cpu_unconfigure(cpuid);
2016 2016
2017 2017 return (retval);
2018 2018 }
2019 2019
2020 2020 /*
2021 2021 * Routine used to cleanup a CPU that has been powered off. This will
2022 2022 * destroy all per-cpu information related to this cpu.
2023 2023 */
2024 2024 int
2025 2025 cpu_unconfigure(int cpuid)
2026 2026 {
2027 2027 int error;
2028 2028
2029 2029 ASSERT(MUTEX_HELD(&cpu_lock));
2030 2030
2031 2031 if (cpu[cpuid] == NULL) {
2032 2032 return (ENODEV);
2033 2033 }
2034 2034
2035 2035 if (cpu[cpuid]->cpu_flags == 0) {
2036 2036 return (EALREADY);
2037 2037 }
2038 2038
2039 2039 if ((cpu[cpuid]->cpu_flags & CPU_POWEROFF) == 0) {
2040 2040 return (EBUSY);
2041 2041 }
2042 2042
2043 2043 if (cpu[cpuid]->cpu_props != NULL) {
2044 2044 (void) nvlist_free(cpu[cpuid]->cpu_props);
2045 2045 cpu[cpuid]->cpu_props = NULL;
2046 2046 }
2047 2047
2048 2048 error = cpu_state_change_hooks(cpuid, CPU_UNCONFIG, CPU_CONFIG);
2049 2049
2050 2050 if (error != 0)
2051 2051 return (error);
2052 2052
2053 2053 return (mp_cpu_unconfigure(cpuid));
2054 2054 }
2055 2055
2056 2056 /*
2057 2057 * Routines for registering and de-registering cpu_setup callback functions.
2058 2058 *
2059 2059 * Caller's context
2060 2060 * These routines must not be called from a driver's attach(9E) or
2061 2061 * detach(9E) entry point.
2062 2062 *
2063 2063 * NOTE: CPU callbacks should not block. They are called with cpu_lock held.
2064 2064 */
2065 2065
2066 2066 /*
2067 2067 * Ideally, these would be dynamically allocated and put into a linked
2068 2068 * list; however that is not feasible because the registration routine
2069 2069 * has to be available before the kmem allocator is working (in fact,
2070 2070 * it is called by the kmem allocator init code). In any case, there
2071 2071 * are quite a few extra entries for future users.
2072 2072 */
2073 2073 #define NCPU_SETUPS 20
2074 2074
2075 2075 struct cpu_setup {
2076 2076 cpu_setup_func_t *func;
2077 2077 void *arg;
2078 2078 } cpu_setups[NCPU_SETUPS];
2079 2079
2080 2080 void
2081 2081 register_cpu_setup_func(cpu_setup_func_t *func, void *arg)
2082 2082 {
2083 2083 int i;
2084 2084
2085 2085 ASSERT(MUTEX_HELD(&cpu_lock));
2086 2086
2087 2087 for (i = 0; i < NCPU_SETUPS; i++)
2088 2088 if (cpu_setups[i].func == NULL)
2089 2089 break;
2090 2090 if (i >= NCPU_SETUPS)
2091 2091 cmn_err(CE_PANIC, "Ran out of cpu_setup callback entries");
2092 2092
2093 2093 cpu_setups[i].func = func;
2094 2094 cpu_setups[i].arg = arg;
2095 2095 }
2096 2096
2097 2097 void
2098 2098 unregister_cpu_setup_func(cpu_setup_func_t *func, void *arg)
2099 2099 {
2100 2100 int i;
2101 2101
2102 2102 ASSERT(MUTEX_HELD(&cpu_lock));
2103 2103
2104 2104 for (i = 0; i < NCPU_SETUPS; i++)
2105 2105 if ((cpu_setups[i].func == func) &&
2106 2106 (cpu_setups[i].arg == arg))
2107 2107 break;
2108 2108 if (i >= NCPU_SETUPS)
2109 2109 cmn_err(CE_PANIC, "Could not find cpu_setup callback to "
2110 2110 "deregister");
2111 2111
2112 2112 cpu_setups[i].func = NULL;
2113 2113 cpu_setups[i].arg = 0;
2114 2114 }
2115 2115
2116 2116 /*
2117 2117 * Call any state change hooks for this CPU, ignore any errors.
2118 2118 */
2119 2119 void
2120 2120 cpu_state_change_notify(int id, cpu_setup_t what)
2121 2121 {
2122 2122 int i;
2123 2123
2124 2124 ASSERT(MUTEX_HELD(&cpu_lock));
2125 2125
2126 2126 for (i = 0; i < NCPU_SETUPS; i++) {
2127 2127 if (cpu_setups[i].func != NULL) {
2128 2128 cpu_setups[i].func(what, id, cpu_setups[i].arg);
2129 2129 }
2130 2130 }
2131 2131 }
2132 2132
2133 2133 /*
2134 2134 * Call any state change hooks for this CPU, undo it if error found.
2135 2135 */
2136 2136 static int
2137 2137 cpu_state_change_hooks(int id, cpu_setup_t what, cpu_setup_t undo)
2138 2138 {
2139 2139 int i;
2140 2140 int retval = 0;
2141 2141
2142 2142 ASSERT(MUTEX_HELD(&cpu_lock));
2143 2143
2144 2144 for (i = 0; i < NCPU_SETUPS; i++) {
2145 2145 if (cpu_setups[i].func != NULL) {
2146 2146 retval = cpu_setups[i].func(what, id,
2147 2147 cpu_setups[i].arg);
2148 2148 if (retval) {
2149 2149 for (i--; i >= 0; i--) {
2150 2150 if (cpu_setups[i].func != NULL)
2151 2151 cpu_setups[i].func(undo,
2152 2152 id, cpu_setups[i].arg);
2153 2153 }
2154 2154 break;
2155 2155 }
2156 2156 }
2157 2157 }
2158 2158 return (retval);
2159 2159 }
2160 2160
2161 2161 /*
2162 2162 * Export information about this CPU via the kstat mechanism.
2163 2163 */
2164 2164 static struct {
2165 2165 kstat_named_t ci_state;
2166 2166 kstat_named_t ci_state_begin;
2167 2167 kstat_named_t ci_cpu_type;
2168 2168 kstat_named_t ci_fpu_type;
2169 2169 kstat_named_t ci_clock_MHz;
2170 2170 kstat_named_t ci_chip_id;
2171 2171 kstat_named_t ci_implementation;
2172 2172 kstat_named_t ci_brandstr;
2173 2173 kstat_named_t ci_core_id;
2174 2174 kstat_named_t ci_curr_clock_Hz;
2175 2175 kstat_named_t ci_supp_freq_Hz;
2176 2176 kstat_named_t ci_pg_id;
2177 2177 #if defined(__sparcv9)
2178 2178 kstat_named_t ci_device_ID;
2179 2179 kstat_named_t ci_cpu_fru;
2180 2180 #endif
2181 2181 #if defined(__x86)
2182 2182 kstat_named_t ci_vendorstr;
2183 2183 kstat_named_t ci_family;
2184 2184 kstat_named_t ci_model;
2185 2185 kstat_named_t ci_step;
2186 2186 kstat_named_t ci_clogid;
2187 2187 kstat_named_t ci_pkg_core_id;
2188 2188 kstat_named_t ci_ncpuperchip;
2189 2189 kstat_named_t ci_ncoreperchip;
2190 2190 kstat_named_t ci_max_cstates;
2191 2191 kstat_named_t ci_curr_cstate;
2192 2192 kstat_named_t ci_cacheid;
2193 2193 kstat_named_t ci_sktstr;
2194 2194 #endif
2195 2195 } cpu_info_template = {
2196 2196 { "state", KSTAT_DATA_CHAR },
2197 2197 { "state_begin", KSTAT_DATA_LONG },
2198 2198 { "cpu_type", KSTAT_DATA_CHAR },
2199 2199 { "fpu_type", KSTAT_DATA_CHAR },
2200 2200 { "clock_MHz", KSTAT_DATA_LONG },
2201 2201 { "chip_id", KSTAT_DATA_LONG },
2202 2202 { "implementation", KSTAT_DATA_STRING },
2203 2203 { "brand", KSTAT_DATA_STRING },
2204 2204 { "core_id", KSTAT_DATA_LONG },
2205 2205 { "current_clock_Hz", KSTAT_DATA_UINT64 },
2206 2206 { "supported_frequencies_Hz", KSTAT_DATA_STRING },
2207 2207 { "pg_id", KSTAT_DATA_LONG },
2208 2208 #if defined(__sparcv9)
2209 2209 { "device_ID", KSTAT_DATA_UINT64 },
2210 2210 { "cpu_fru", KSTAT_DATA_STRING },
2211 2211 #endif
2212 2212 #if defined(__x86)
2213 2213 { "vendor_id", KSTAT_DATA_STRING },
2214 2214 { "family", KSTAT_DATA_INT32 },
2215 2215 { "model", KSTAT_DATA_INT32 },
2216 2216 { "stepping", KSTAT_DATA_INT32 },
2217 2217 { "clog_id", KSTAT_DATA_INT32 },
2218 2218 { "pkg_core_id", KSTAT_DATA_LONG },
2219 2219 { "ncpu_per_chip", KSTAT_DATA_INT32 },
2220 2220 { "ncore_per_chip", KSTAT_DATA_INT32 },
2221 2221 { "supported_max_cstates", KSTAT_DATA_INT32 },
2222 2222 { "current_cstate", KSTAT_DATA_INT32 },
2223 2223 { "cache_id", KSTAT_DATA_INT32 },
2224 2224 { "socket_type", KSTAT_DATA_STRING },
2225 2225 #endif
2226 2226 };
2227 2227
2228 2228 static kmutex_t cpu_info_template_lock;
2229 2229
2230 2230 static int
2231 2231 cpu_info_kstat_update(kstat_t *ksp, int rw)
2232 2232 {
2233 2233 cpu_t *cp = ksp->ks_private;
2234 2234 const char *pi_state;
2235 2235
2236 2236 if (rw == KSTAT_WRITE)
2237 2237 return (EACCES);
2238 2238
2239 2239 #if defined(__x86)
2240 2240 /* Is the cpu still initialising itself? */
2241 2241 if (cpuid_checkpass(cp, 1) == 0)
2242 2242 return (ENXIO);
2243 2243 #endif
2244 2244 switch (cp->cpu_type_info.pi_state) {
2245 2245 case P_ONLINE:
2246 2246 pi_state = PS_ONLINE;
2247 2247 break;
2248 2248 case P_POWEROFF:
2249 2249 pi_state = PS_POWEROFF;
2250 2250 break;
2251 2251 case P_NOINTR:
2252 2252 pi_state = PS_NOINTR;
2253 2253 break;
2254 2254 case P_FAULTED:
2255 2255 pi_state = PS_FAULTED;
2256 2256 break;
2257 2257 case P_SPARE:
2258 2258 pi_state = PS_SPARE;
2259 2259 break;
2260 2260 case P_OFFLINE:
2261 2261 pi_state = PS_OFFLINE;
2262 2262 break;
2263 2263 default:
2264 2264 pi_state = "unknown";
2265 2265 }
2266 2266 (void) strcpy(cpu_info_template.ci_state.value.c, pi_state);
2267 2267 cpu_info_template.ci_state_begin.value.l = cp->cpu_state_begin;
2268 2268 (void) strncpy(cpu_info_template.ci_cpu_type.value.c,
2269 2269 cp->cpu_type_info.pi_processor_type, 15);
2270 2270 (void) strncpy(cpu_info_template.ci_fpu_type.value.c,
2271 2271 cp->cpu_type_info.pi_fputypes, 15);
2272 2272 cpu_info_template.ci_clock_MHz.value.l = cp->cpu_type_info.pi_clock;
2273 2273 cpu_info_template.ci_chip_id.value.l =
2274 2274 pg_plat_hw_instance_id(cp, PGHW_CHIP);
2275 2275 kstat_named_setstr(&cpu_info_template.ci_implementation,
2276 2276 cp->cpu_idstr);
2277 2277 kstat_named_setstr(&cpu_info_template.ci_brandstr, cp->cpu_brandstr);
2278 2278 cpu_info_template.ci_core_id.value.l = pg_plat_get_core_id(cp);
2279 2279 cpu_info_template.ci_curr_clock_Hz.value.ui64 =
2280 2280 cp->cpu_curr_clock;
2281 2281 cpu_info_template.ci_pg_id.value.l =
2282 2282 cp->cpu_pg && cp->cpu_pg->cmt_lineage ?
2283 2283 cp->cpu_pg->cmt_lineage->pg_id : -1;
2284 2284 kstat_named_setstr(&cpu_info_template.ci_supp_freq_Hz,
2285 2285 cp->cpu_supp_freqs);
2286 2286 #if defined(__sparcv9)
2287 2287 cpu_info_template.ci_device_ID.value.ui64 =
2288 2288 cpunodes[cp->cpu_id].device_id;
2289 2289 kstat_named_setstr(&cpu_info_template.ci_cpu_fru, cpu_fru_fmri(cp));
2290 2290 #endif
2291 2291 #if defined(__x86)
2292 2292 kstat_named_setstr(&cpu_info_template.ci_vendorstr,
2293 2293 cpuid_getvendorstr(cp));
2294 2294 cpu_info_template.ci_family.value.l = cpuid_getfamily(cp);
2295 2295 cpu_info_template.ci_model.value.l = cpuid_getmodel(cp);
2296 2296 cpu_info_template.ci_step.value.l = cpuid_getstep(cp);
2297 2297 cpu_info_template.ci_clogid.value.l = cpuid_get_clogid(cp);
2298 2298 cpu_info_template.ci_ncpuperchip.value.l = cpuid_get_ncpu_per_chip(cp);
2299 2299 cpu_info_template.ci_ncoreperchip.value.l =
2300 2300 cpuid_get_ncore_per_chip(cp);
2301 2301 cpu_info_template.ci_pkg_core_id.value.l = cpuid_get_pkgcoreid(cp);
2302 2302 cpu_info_template.ci_max_cstates.value.l = cp->cpu_m.max_cstates;
2303 2303 cpu_info_template.ci_curr_cstate.value.l = cpu_idle_get_cpu_state(cp);
2304 2304 cpu_info_template.ci_cacheid.value.i32 = cpuid_get_cacheid(cp);
2305 2305 kstat_named_setstr(&cpu_info_template.ci_sktstr,
2306 2306 cpuid_getsocketstr(cp));
2307 2307 #endif
2308 2308
2309 2309 return (0);
2310 2310 }
2311 2311
2312 2312 static void
2313 2313 cpu_info_kstat_create(cpu_t *cp)
2314 2314 {
2315 2315 zoneid_t zoneid;
2316 2316
2317 2317 ASSERT(MUTEX_HELD(&cpu_lock));
2318 2318
2319 2319 if (pool_pset_enabled())
2320 2320 zoneid = GLOBAL_ZONEID;
2321 2321 else
2322 2322 zoneid = ALL_ZONES;
2323 2323 if ((cp->cpu_info_kstat = kstat_create_zone("cpu_info", cp->cpu_id,
2324 2324 NULL, "misc", KSTAT_TYPE_NAMED,
2325 2325 sizeof (cpu_info_template) / sizeof (kstat_named_t),
2326 2326 KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE, zoneid)) != NULL) {
2327 2327 cp->cpu_info_kstat->ks_data_size += 2 * CPU_IDSTRLEN;
2328 2328 #if defined(__sparcv9)
2329 2329 cp->cpu_info_kstat->ks_data_size +=
2330 2330 strlen(cpu_fru_fmri(cp)) + 1;
2331 2331 #endif
2332 2332 #if defined(__x86)
2333 2333 cp->cpu_info_kstat->ks_data_size += X86_VENDOR_STRLEN;
2334 2334 #endif
2335 2335 if (cp->cpu_supp_freqs != NULL)
2336 2336 cp->cpu_info_kstat->ks_data_size +=
2337 2337 strlen(cp->cpu_supp_freqs) + 1;
2338 2338 cp->cpu_info_kstat->ks_lock = &cpu_info_template_lock;
2339 2339 cp->cpu_info_kstat->ks_data = &cpu_info_template;
2340 2340 cp->cpu_info_kstat->ks_private = cp;
2341 2341 cp->cpu_info_kstat->ks_update = cpu_info_kstat_update;
2342 2342 kstat_install(cp->cpu_info_kstat);
2343 2343 }
2344 2344 }
2345 2345
2346 2346 static void
2347 2347 cpu_info_kstat_destroy(cpu_t *cp)
2348 2348 {
2349 2349 ASSERT(MUTEX_HELD(&cpu_lock));
2350 2350
2351 2351 kstat_delete(cp->cpu_info_kstat);
2352 2352 cp->cpu_info_kstat = NULL;
2353 2353 }
2354 2354
2355 2355 /*
2356 2356 * Create and install kstats for the boot CPU.
2357 2357 */
2358 2358 void
2359 2359 cpu_kstat_init(cpu_t *cp)
2360 2360 {
2361 2361 mutex_enter(&cpu_lock);
2362 2362 cpu_info_kstat_create(cp);
2363 2363 cpu_stats_kstat_create(cp);
2364 2364 cpu_create_intrstat(cp);
2365 2365 cpu_set_state(cp);
2366 2366 mutex_exit(&cpu_lock);
2367 2367 }
2368 2368
2369 2369 /*
2370 2370 * Make visible to the zone that subset of the cpu information that would be
2371 2371 * initialized when a cpu is configured (but still offline).
2372 2372 */
2373 2373 void
2374 2374 cpu_visibility_configure(cpu_t *cp, zone_t *zone)
2375 2375 {
2376 2376 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
2377 2377
2378 2378 ASSERT(MUTEX_HELD(&cpu_lock));
2379 2379 ASSERT(pool_pset_enabled());
2380 2380 ASSERT(cp != NULL);
2381 2381
2382 2382 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
2383 2383 zone->zone_ncpus++;
2384 2384 ASSERT(zone->zone_ncpus <= ncpus);
2385 2385 }
2386 2386 if (cp->cpu_info_kstat != NULL)
2387 2387 kstat_zone_add(cp->cpu_info_kstat, zoneid);
2388 2388 }
2389 2389
2390 2390 /*
2391 2391 * Make visible to the zone that subset of the cpu information that would be
2392 2392 * initialized when a previously configured cpu is onlined.
2393 2393 */
2394 2394 void
2395 2395 cpu_visibility_online(cpu_t *cp, zone_t *zone)
2396 2396 {
2397 2397 kstat_t *ksp;
2398 2398 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */
2399 2399 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
2400 2400 processorid_t cpun;
2401 2401
2402 2402 ASSERT(MUTEX_HELD(&cpu_lock));
2403 2403 ASSERT(pool_pset_enabled());
2404 2404 ASSERT(cp != NULL);
2405 2405 ASSERT(cpu_is_active(cp));
2406 2406
2407 2407 cpun = cp->cpu_id;
2408 2408 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
2409 2409 zone->zone_ncpus_online++;
2410 2410 ASSERT(zone->zone_ncpus_online <= ncpus_online);
2411 2411 }
2412 2412 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun);
2413 2413 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES))
2414 2414 != NULL) {
2415 2415 kstat_zone_add(ksp, zoneid);
2416 2416 kstat_rele(ksp);
2417 2417 }
2418 2418 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) {
2419 2419 kstat_zone_add(ksp, zoneid);
2420 2420 kstat_rele(ksp);
2421 2421 }
2422 2422 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) {
2423 2423 kstat_zone_add(ksp, zoneid);
2424 2424 kstat_rele(ksp);
2425 2425 }
2426 2426 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) !=
2427 2427 NULL) {
2428 2428 kstat_zone_add(ksp, zoneid);
2429 2429 kstat_rele(ksp);
2430 2430 }
2431 2431 }
2432 2432
2433 2433 /*
2434 2434 * Update relevant kstats such that cpu is now visible to processes
2435 2435 * executing in specified zone.
2436 2436 */
2437 2437 void
2438 2438 cpu_visibility_add(cpu_t *cp, zone_t *zone)
2439 2439 {
2440 2440 cpu_visibility_configure(cp, zone);
2441 2441 if (cpu_is_active(cp))
2442 2442 cpu_visibility_online(cp, zone);
2443 2443 }
2444 2444
2445 2445 /*
2446 2446 * Make invisible to the zone that subset of the cpu information that would be
2447 2447 * torn down when a previously offlined cpu is unconfigured.
2448 2448 */
2449 2449 void
2450 2450 cpu_visibility_unconfigure(cpu_t *cp, zone_t *zone)
2451 2451 {
2452 2452 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
2453 2453
2454 2454 ASSERT(MUTEX_HELD(&cpu_lock));
2455 2455 ASSERT(pool_pset_enabled());
2456 2456 ASSERT(cp != NULL);
2457 2457
2458 2458 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
2459 2459 ASSERT(zone->zone_ncpus != 0);
2460 2460 zone->zone_ncpus--;
2461 2461 }
2462 2462 if (cp->cpu_info_kstat)
2463 2463 kstat_zone_remove(cp->cpu_info_kstat, zoneid);
2464 2464 }
2465 2465
2466 2466 /*
2467 2467 * Make invisible to the zone that subset of the cpu information that would be
2468 2468 * torn down when a cpu is offlined (but still configured).
2469 2469 */
2470 2470 void
2471 2471 cpu_visibility_offline(cpu_t *cp, zone_t *zone)
2472 2472 {
2473 2473 kstat_t *ksp;
2474 2474 char name[sizeof ("cpu_stat") + 10]; /* enough for 32-bit cpuids */
2475 2475 zoneid_t zoneid = zone ? zone->zone_id : ALL_ZONES;
2476 2476 processorid_t cpun;
2477 2477
2478 2478 ASSERT(MUTEX_HELD(&cpu_lock));
2479 2479 ASSERT(pool_pset_enabled());
2480 2480 ASSERT(cp != NULL);
2481 2481 ASSERT(cpu_is_active(cp));
2482 2482
2483 2483 cpun = cp->cpu_id;
2484 2484 if (zoneid != ALL_ZONES && zoneid != GLOBAL_ZONEID) {
2485 2485 ASSERT(zone->zone_ncpus_online != 0);
2486 2486 zone->zone_ncpus_online--;
2487 2487 }
2488 2488
2489 2489 if ((ksp = kstat_hold_byname("cpu", cpun, "intrstat", ALL_ZONES)) !=
2490 2490 NULL) {
2491 2491 kstat_zone_remove(ksp, zoneid);
2492 2492 kstat_rele(ksp);
2493 2493 }
2494 2494 if ((ksp = kstat_hold_byname("cpu", cpun, "vm", ALL_ZONES)) != NULL) {
2495 2495 kstat_zone_remove(ksp, zoneid);
2496 2496 kstat_rele(ksp);
2497 2497 }
2498 2498 if ((ksp = kstat_hold_byname("cpu", cpun, "sys", ALL_ZONES)) != NULL) {
2499 2499 kstat_zone_remove(ksp, zoneid);
2500 2500 kstat_rele(ksp);
2501 2501 }
2502 2502 (void) snprintf(name, sizeof (name), "cpu_stat%d", cpun);
2503 2503 if ((ksp = kstat_hold_byname("cpu_stat", cpun, name, ALL_ZONES))
2504 2504 != NULL) {
2505 2505 kstat_zone_remove(ksp, zoneid);
2506 2506 kstat_rele(ksp);
2507 2507 }
2508 2508 }
2509 2509
2510 2510 /*
2511 2511 * Update relevant kstats such that cpu is no longer visible to processes
2512 2512 * executing in specified zone.
2513 2513 */
2514 2514 void
2515 2515 cpu_visibility_remove(cpu_t *cp, zone_t *zone)
2516 2516 {
2517 2517 if (cpu_is_active(cp))
2518 2518 cpu_visibility_offline(cp, zone);
2519 2519 cpu_visibility_unconfigure(cp, zone);
2520 2520 }
2521 2521
2522 2522 /*
2523 2523 * Bind a thread to a CPU as requested.
2524 2524 */
2525 2525 int
2526 2526 cpu_bind_thread(kthread_id_t tp, processorid_t bind, processorid_t *obind,
2527 2527 int *error)
2528 2528 {
2529 2529 processorid_t binding;
2530 2530 cpu_t *cp = NULL;
2531 2531
2532 2532 ASSERT(MUTEX_HELD(&cpu_lock));
2533 2533 ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));
2534 2534
2535 2535 thread_lock(tp);
2536 2536
2537 2537 /*
2538 2538 * Record old binding, but change the obind, which was initialized
2539 2539 * to PBIND_NONE, only if this thread has a binding. This avoids
2540 2540 * reporting PBIND_NONE for a process when some LWPs are bound.
2541 2541 */
2542 2542 binding = tp->t_bind_cpu;
2543 2543 if (binding != PBIND_NONE)
2544 2544 *obind = binding; /* record old binding */
2545 2545
2546 2546 switch (bind) {
2547 2547 case PBIND_QUERY:
2548 2548 /* Just return the old binding */
2549 2549 thread_unlock(tp);
2550 2550 return (0);
2551 2551
2552 2552 case PBIND_QUERY_TYPE:
2553 2553 /* Return the binding type */
2554 2554 *obind = TB_CPU_IS_SOFT(tp) ? PBIND_SOFT : PBIND_HARD;
2555 2555 thread_unlock(tp);
2556 2556 return (0);
2557 2557
2558 2558 case PBIND_SOFT:
2559 2559 /*
2560 2560 * Set soft binding for this thread and return the actual
2561 2561 * binding
2562 2562 */
2563 2563 TB_CPU_SOFT_SET(tp);
2564 2564 thread_unlock(tp);
2565 2565 return (0);
2566 2566
2567 2567 case PBIND_HARD:
2568 2568 /*
2569 2569 * Set hard binding for this thread and return the actual
2570 2570 * binding
2571 2571 */
2572 2572 TB_CPU_HARD_SET(tp);
2573 2573 thread_unlock(tp);
2574 2574 return (0);
2575 2575
2576 2576 default:
2577 2577 break;
2578 2578 }
2579 2579
2580 2580 /*
2581 2581 * If this thread/LWP cannot be bound because of permission
2582 2582 * problems, just note that and return success so that the
2583 2583 * other threads/LWPs will be bound. This is the way
2584 2584 * processor_bind() is defined to work.
2585 2585 *
2586 2586 * Binding will get EPERM if the thread is of system class
2587 2587 * or hasprocperm() fails.
2588 2588 */
2589 2589 if (tp->t_cid == 0 || !hasprocperm(tp->t_cred, CRED())) {
2590 2590 *error = EPERM;
2591 2591 thread_unlock(tp);
2592 2592 return (0);
2593 2593 }
2594 2594
2595 2595 binding = bind;
2596 2596 if (binding != PBIND_NONE) {
2597 2597 cp = cpu_get((processorid_t)binding);
2598 2598 /*
2599 2599 * Make sure binding is valid and is in right partition.
2600 2600 */
2601 2601 if (cp == NULL || tp->t_cpupart != cp->cpu_part) {
2602 2602 *error = EINVAL;
2603 2603 thread_unlock(tp);
2604 2604 return (0);
2605 2605 }
2606 2606 }
2607 2607 tp->t_bind_cpu = binding; /* set new binding */
2608 2608
2609 2609 /*
2610 2610 * If there is no system-set reason for affinity, set
2611 2611 * the t_bound_cpu field to reflect the binding.
2612 2612 */
2613 2613 if (tp->t_affinitycnt == 0) {
2614 2614 if (binding == PBIND_NONE) {
2615 2615 /*
2616 2616 * We may need to adjust disp_max_unbound_pri
2617 2617 * since we're becoming unbound.
2618 2618 */
2619 2619 disp_adjust_unbound_pri(tp);
2620 2620
2621 2621 tp->t_bound_cpu = NULL; /* set new binding */
2622 2622
2623 2623 /*
2624 2624 * Move thread to lgroup with strongest affinity
2625 2625 * after unbinding
2626 2626 */
2627 2627 if (tp->t_lgrp_affinity)
2628 2628 lgrp_move_thread(tp,
2629 2629 lgrp_choose(tp, tp->t_cpupart), 1);
2630 2630
2631 2631 if (tp->t_state == TS_ONPROC &&
2632 2632 tp->t_cpu->cpu_part != tp->t_cpupart)
2633 2633 cpu_surrender(tp);
2634 2634 } else {
2635 2635 lpl_t *lpl;
2636 2636
2637 2637 tp->t_bound_cpu = cp;
2638 2638 ASSERT(cp->cpu_lpl != NULL);
2639 2639
2640 2640 /*
2641 2641 * Set home to lgroup with most affinity containing CPU
2642 2642 * that thread is being bound or minimum bounding
2643 2643 * lgroup if no affinities set
2644 2644 */
2645 2645 if (tp->t_lgrp_affinity)
2646 2646 lpl = lgrp_affinity_best(tp, tp->t_cpupart,
2647 2647 LGRP_NONE, B_FALSE);
2648 2648 else
2649 2649 lpl = cp->cpu_lpl;
2650 2650
2651 2651 if (tp->t_lpl != lpl) {
2652 2652 /* can't grab cpu_lock */
2653 2653 lgrp_move_thread(tp, lpl, 1);
2654 2654 }
2655 2655
2656 2656 /*
2657 2657 * Make the thread switch to the bound CPU.
2658 2658 * If the thread is runnable, we need to
2659 2659 * requeue it even if t_cpu is already set
2660 2660 * to the right CPU, since it may be on a
2661 2661 * kpreempt queue and need to move to a local
2662 2662 * queue. We could check t_disp_queue to
2663 2663 * avoid unnecessary overhead if it's already
2664 2664 * on the right queue, but since this isn't
2665 2665 * a performance-critical operation it doesn't
2666 2666 * seem worth the extra code and complexity.
2667 2667 *
2668 2668 * If the thread is weakbound to the cpu then it will
2669 2669 * resist the new binding request until the weak
2670 2670 * binding drops. The cpu_surrender or requeueing
2671 2671 * below could be skipped in such cases (since it
2672 2672 * will have no effect), but that would require
2673 2673 * thread_allowmigrate to acquire thread_lock so
↓ open down ↓ |
2673 lines elided |
↑ open up ↑ |
2674 2674 * we'll take the very occasional hit here instead.
2675 2675 */
2676 2676 if (tp->t_state == TS_ONPROC) {
2677 2677 cpu_surrender(tp);
2678 2678 } else if (tp->t_state == TS_RUN) {
2679 2679 cpu_t *ocp = tp->t_cpu;
2680 2680
2681 2681 (void) dispdeq(tp);
2682 2682 setbackdq(tp);
2683 2683 /*
2684 - * Either on the bound CPU's disp queue now,
2685 - * or swapped out or on the swap queue.
2684 + * On the bound CPU's disp queue now.
2686 2685 */
2687 2686 ASSERT(tp->t_disp_queue == cp->cpu_disp ||
2688 - tp->t_weakbound_cpu == ocp ||
2689 - (tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ))
2690 - != TS_LOAD);
2687 + tp->t_weakbound_cpu == ocp);
2691 2688 }
2692 2689 }
2693 2690 }
2694 2691
2695 2692 /*
2696 2693 * Our binding has changed; set TP_CHANGEBIND.
2697 2694 */
2698 2695 tp->t_proc_flag |= TP_CHANGEBIND;
2699 2696 aston(tp);
2700 2697
2701 2698 thread_unlock(tp);
2702 2699
2703 2700 return (0);
2704 2701 }
2705 2702
2706 2703 #if CPUSET_WORDS > 1
2707 2704
2708 2705 /*
2709 2706 * Functions for implementing cpuset operations when a cpuset is more
2710 2707 * than one word. On platforms where a cpuset is a single word these
2711 2708 * are implemented as macros in cpuvar.h.
2712 2709 */
2713 2710
2714 2711 void
2715 2712 cpuset_all(cpuset_t *s)
2716 2713 {
2717 2714 int i;
2718 2715
2719 2716 for (i = 0; i < CPUSET_WORDS; i++)
2720 2717 s->cpub[i] = ~0UL;
2721 2718 }
2722 2719
2723 2720 void
2724 2721 cpuset_all_but(cpuset_t *s, uint_t cpu)
2725 2722 {
2726 2723 cpuset_all(s);
2727 2724 CPUSET_DEL(*s, cpu);
2728 2725 }
2729 2726
2730 2727 void
2731 2728 cpuset_only(cpuset_t *s, uint_t cpu)
2732 2729 {
2733 2730 CPUSET_ZERO(*s);
2734 2731 CPUSET_ADD(*s, cpu);
2735 2732 }
2736 2733
2737 2734 int
2738 2735 cpuset_isnull(cpuset_t *s)
2739 2736 {
2740 2737 int i;
2741 2738
2742 2739 for (i = 0; i < CPUSET_WORDS; i++)
2743 2740 if (s->cpub[i] != 0)
2744 2741 return (0);
2745 2742 return (1);
2746 2743 }
2747 2744
2748 2745 int
2749 2746 cpuset_cmp(cpuset_t *s1, cpuset_t *s2)
2750 2747 {
2751 2748 int i;
2752 2749
2753 2750 for (i = 0; i < CPUSET_WORDS; i++)
2754 2751 if (s1->cpub[i] != s2->cpub[i])
2755 2752 return (0);
2756 2753 return (1);
2757 2754 }
2758 2755
2759 2756 uint_t
2760 2757 cpuset_find(cpuset_t *s)
2761 2758 {
2762 2759
2763 2760 uint_t i;
2764 2761 uint_t cpu = (uint_t)-1;
2765 2762
2766 2763 /*
2767 2764 * Find a cpu in the cpuset
2768 2765 */
2769 2766 for (i = 0; i < CPUSET_WORDS; i++) {
2770 2767 cpu = (uint_t)(lowbit(s->cpub[i]) - 1);
2771 2768 if (cpu != (uint_t)-1) {
2772 2769 cpu += i * BT_NBIPUL;
2773 2770 break;
2774 2771 }
2775 2772 }
2776 2773 return (cpu);
2777 2774 }
2778 2775
2779 2776 void
2780 2777 cpuset_bounds(cpuset_t *s, uint_t *smallestid, uint_t *largestid)
2781 2778 {
2782 2779 int i, j;
2783 2780 uint_t bit;
2784 2781
2785 2782 /*
2786 2783 * First, find the smallest cpu id in the set.
2787 2784 */
2788 2785 for (i = 0; i < CPUSET_WORDS; i++) {
2789 2786 if (s->cpub[i] != 0) {
2790 2787 bit = (uint_t)(lowbit(s->cpub[i]) - 1);
2791 2788 ASSERT(bit != (uint_t)-1);
2792 2789 *smallestid = bit + (i * BT_NBIPUL);
2793 2790
2794 2791 /*
2795 2792 * Now find the largest cpu id in
2796 2793 * the set and return immediately.
2797 2794 * Done in an inner loop to avoid
2798 2795 * having to break out of the first
2799 2796 * loop.
2800 2797 */
2801 2798 for (j = CPUSET_WORDS - 1; j >= i; j--) {
2802 2799 if (s->cpub[j] != 0) {
2803 2800 bit = (uint_t)(highbit(s->cpub[j]) - 1);
2804 2801 ASSERT(bit != (uint_t)-1);
2805 2802 *largestid = bit + (j * BT_NBIPUL);
2806 2803 ASSERT(*largestid >= *smallestid);
2807 2804 return;
2808 2805 }
2809 2806 }
2810 2807
2811 2808 /*
2812 2809 * If this code is reached, a
2813 2810 * smallestid was found, but not a
2814 2811 * largestid. The cpuset must have
2815 2812 * been changed during the course
2816 2813 * of this function call.
2817 2814 */
2818 2815 ASSERT(0);
2819 2816 }
2820 2817 }
2821 2818 *smallestid = *largestid = CPUSET_NOTINSET;
2822 2819 }
2823 2820
2824 2821 #endif /* CPUSET_WORDS */
2825 2822
2826 2823 /*
2827 2824 * Unbind threads bound to specified CPU.
2828 2825 *
2829 2826 * If `unbind_all_threads' is true, unbind all user threads bound to a given
2830 2827 * CPU. Otherwise unbind all soft-bound user threads.
2831 2828 */
2832 2829 int
2833 2830 cpu_unbind(processorid_t cpu, boolean_t unbind_all_threads)
2834 2831 {
2835 2832 processorid_t obind;
2836 2833 kthread_t *tp;
2837 2834 int ret = 0;
2838 2835 proc_t *pp;
2839 2836 int err, berr = 0;
2840 2837
2841 2838 ASSERT(MUTEX_HELD(&cpu_lock));
2842 2839
2843 2840 mutex_enter(&pidlock);
2844 2841 for (pp = practive; pp != NULL; pp = pp->p_next) {
2845 2842 mutex_enter(&pp->p_lock);
2846 2843 tp = pp->p_tlist;
2847 2844 /*
2848 2845 * Skip zombies, kernel processes, and processes in
2849 2846 * other zones, if called from a non-global zone.
2850 2847 */
2851 2848 if (tp == NULL || (pp->p_flag & SSYS) ||
2852 2849 !HASZONEACCESS(curproc, pp->p_zone->zone_id)) {
2853 2850 mutex_exit(&pp->p_lock);
2854 2851 continue;
2855 2852 }
2856 2853 do {
2857 2854 if (tp->t_bind_cpu != cpu)
2858 2855 continue;
2859 2856 /*
2860 2857 * Skip threads with hard binding when
2861 2858 * `unbind_all_threads' is not specified.
2862 2859 */
2863 2860 if (!unbind_all_threads && TB_CPU_IS_HARD(tp))
2864 2861 continue;
2865 2862 err = cpu_bind_thread(tp, PBIND_NONE, &obind, &berr);
2866 2863 if (ret == 0)
2867 2864 ret = err;
2868 2865 } while ((tp = tp->t_forw) != pp->p_tlist);
2869 2866 mutex_exit(&pp->p_lock);
2870 2867 }
2871 2868 mutex_exit(&pidlock);
2872 2869 if (ret == 0)
2873 2870 ret = berr;
2874 2871 return (ret);
2875 2872 }
2876 2873
2877 2874
2878 2875 /*
2879 2876 * Destroy all remaining bound threads on a cpu.
2880 2877 */
2881 2878 void
2882 2879 cpu_destroy_bound_threads(cpu_t *cp)
2883 2880 {
2884 2881 extern id_t syscid;
2885 2882 register kthread_id_t t, tlist, tnext;
2886 2883
2887 2884 /*
2888 2885 * Destroy all remaining bound threads on the cpu. This
2889 2886 * should include both the interrupt threads and the idle thread.
2890 2887 * This requires some care, since we need to traverse the
2891 2888 * thread list with the pidlock mutex locked, but thread_free
2892 2889 * also locks the pidlock mutex. So, we collect the threads
2893 2890 * we're going to reap in a list headed by "tlist", then we
2894 2891 * unlock the pidlock mutex and traverse the tlist list,
2895 2892 * doing thread_free's on the thread's. Simple, n'est pas?
2896 2893 * Also, this depends on thread_free not mucking with the
2897 2894 * t_next and t_prev links of the thread.
2898 2895 */
2899 2896
2900 2897 if ((t = curthread) != NULL) {
2901 2898
2902 2899 tlist = NULL;
2903 2900 mutex_enter(&pidlock);
2904 2901 do {
2905 2902 tnext = t->t_next;
2906 2903 if (t->t_bound_cpu == cp) {
2907 2904
2908 2905 /*
2909 2906 * We've found a bound thread, carefully unlink
2910 2907 * it out of the thread list, and add it to
2911 2908 * our "tlist". We "know" we don't have to
2912 2909 * worry about unlinking curthread (the thread
2913 2910 * that is executing this code).
2914 2911 */
2915 2912 t->t_next->t_prev = t->t_prev;
2916 2913 t->t_prev->t_next = t->t_next;
2917 2914 t->t_next = tlist;
2918 2915 tlist = t;
2919 2916 ASSERT(t->t_cid == syscid);
2920 2917 /* wake up anyone blocked in thread_join */
2921 2918 cv_broadcast(&t->t_joincv);
2922 2919 /*
2923 2920 * t_lwp set by interrupt threads and not
2924 2921 * cleared.
2925 2922 */
2926 2923 t->t_lwp = NULL;
2927 2924 /*
2928 2925 * Pause and idle threads always have
2929 2926 * t_state set to TS_ONPROC.
2930 2927 */
2931 2928 t->t_state = TS_FREE;
2932 2929 t->t_prev = NULL; /* Just in case */
2933 2930 }
2934 2931
2935 2932 } while ((t = tnext) != curthread);
2936 2933
2937 2934 mutex_exit(&pidlock);
2938 2935
2939 2936 mutex_sync();
2940 2937 for (t = tlist; t != NULL; t = tnext) {
2941 2938 tnext = t->t_next;
2942 2939 thread_free(t);
2943 2940 }
2944 2941 }
2945 2942 }
2946 2943
2947 2944 /*
2948 2945 * Update the cpu_supp_freqs of this cpu. This information is returned
2949 2946 * as part of cpu_info kstats. If the cpu_info_kstat exists already, then
2950 2947 * maintain the kstat data size.
2951 2948 */
2952 2949 void
2953 2950 cpu_set_supp_freqs(cpu_t *cp, const char *freqs)
2954 2951 {
2955 2952 char clkstr[sizeof ("18446744073709551615") + 1]; /* ui64 MAX */
2956 2953 const char *lfreqs = clkstr;
2957 2954 boolean_t kstat_exists = B_FALSE;
2958 2955 kstat_t *ksp;
2959 2956 size_t len;
2960 2957
2961 2958 /*
2962 2959 * A NULL pointer means we only support one speed.
2963 2960 */
2964 2961 if (freqs == NULL)
2965 2962 (void) snprintf(clkstr, sizeof (clkstr), "%"PRIu64,
2966 2963 cp->cpu_curr_clock);
2967 2964 else
2968 2965 lfreqs = freqs;
2969 2966
2970 2967 /*
2971 2968 * Make sure the frequency doesn't change while a snapshot is
2972 2969 * going on. Of course, we only need to worry about this if
2973 2970 * the kstat exists.
2974 2971 */
2975 2972 if ((ksp = cp->cpu_info_kstat) != NULL) {
2976 2973 mutex_enter(ksp->ks_lock);
2977 2974 kstat_exists = B_TRUE;
2978 2975 }
2979 2976
2980 2977 /*
2981 2978 * Free any previously allocated string and if the kstat
2982 2979 * already exists, then update its data size.
2983 2980 */
2984 2981 if (cp->cpu_supp_freqs != NULL) {
2985 2982 len = strlen(cp->cpu_supp_freqs) + 1;
2986 2983 kmem_free(cp->cpu_supp_freqs, len);
2987 2984 if (kstat_exists)
2988 2985 ksp->ks_data_size -= len;
2989 2986 }
2990 2987
2991 2988 /*
2992 2989 * Allocate the new string and set the pointer.
2993 2990 */
2994 2991 len = strlen(lfreqs) + 1;
2995 2992 cp->cpu_supp_freqs = kmem_alloc(len, KM_SLEEP);
2996 2993 (void) strcpy(cp->cpu_supp_freqs, lfreqs);
2997 2994
2998 2995 /*
2999 2996 * If the kstat already exists then update the data size and
3000 2997 * free the lock.
3001 2998 */
3002 2999 if (kstat_exists) {
3003 3000 ksp->ks_data_size += len;
3004 3001 mutex_exit(ksp->ks_lock);
3005 3002 }
3006 3003 }
3007 3004
3008 3005 /*
3009 3006 * Indicate the current CPU's clock freqency (in Hz).
3010 3007 * The calling context must be such that CPU references are safe.
3011 3008 */
3012 3009 void
3013 3010 cpu_set_curr_clock(uint64_t new_clk)
3014 3011 {
3015 3012 uint64_t old_clk;
3016 3013
3017 3014 old_clk = CPU->cpu_curr_clock;
3018 3015 CPU->cpu_curr_clock = new_clk;
3019 3016
3020 3017 /*
3021 3018 * The cpu-change-speed DTrace probe exports the frequency in Hz
3022 3019 */
3023 3020 DTRACE_PROBE3(cpu__change__speed, processorid_t, CPU->cpu_id,
3024 3021 uint64_t, old_clk, uint64_t, new_clk);
3025 3022 }
3026 3023
3027 3024 /*
3028 3025 * processor_info(2) and p_online(2) status support functions
3029 3026 * The constants returned by the cpu_get_state() and cpu_get_state_str() are
3030 3027 * for use in communicating processor state information to userland. Kernel
3031 3028 * subsystems should only be using the cpu_flags value directly. Subsystems
3032 3029 * modifying cpu_flags should record the state change via a call to the
3033 3030 * cpu_set_state().
3034 3031 */
3035 3032
3036 3033 /*
3037 3034 * Update the pi_state of this CPU. This function provides the CPU status for
3038 3035 * the information returned by processor_info(2).
3039 3036 */
3040 3037 void
3041 3038 cpu_set_state(cpu_t *cpu)
3042 3039 {
3043 3040 ASSERT(MUTEX_HELD(&cpu_lock));
3044 3041 cpu->cpu_type_info.pi_state = cpu_get_state(cpu);
3045 3042 cpu->cpu_state_begin = gethrestime_sec();
3046 3043 pool_cpu_mod = gethrtime();
3047 3044 }
3048 3045
3049 3046 /*
3050 3047 * Return offline/online/other status for the indicated CPU. Use only for
3051 3048 * communication with user applications; cpu_flags provides the in-kernel
3052 3049 * interface.
3053 3050 */
3054 3051 int
3055 3052 cpu_get_state(cpu_t *cpu)
3056 3053 {
3057 3054 ASSERT(MUTEX_HELD(&cpu_lock));
3058 3055 if (cpu->cpu_flags & CPU_POWEROFF)
3059 3056 return (P_POWEROFF);
3060 3057 else if (cpu->cpu_flags & CPU_FAULTED)
3061 3058 return (P_FAULTED);
3062 3059 else if (cpu->cpu_flags & CPU_SPARE)
3063 3060 return (P_SPARE);
3064 3061 else if ((cpu->cpu_flags & (CPU_READY | CPU_OFFLINE)) != CPU_READY)
3065 3062 return (P_OFFLINE);
3066 3063 else if (cpu->cpu_flags & CPU_ENABLE)
3067 3064 return (P_ONLINE);
3068 3065 else
3069 3066 return (P_NOINTR);
3070 3067 }
3071 3068
3072 3069 /*
3073 3070 * Return processor_info(2) state as a string.
3074 3071 */
3075 3072 const char *
3076 3073 cpu_get_state_str(cpu_t *cpu)
3077 3074 {
3078 3075 const char *string;
3079 3076
3080 3077 switch (cpu_get_state(cpu)) {
3081 3078 case P_ONLINE:
3082 3079 string = PS_ONLINE;
3083 3080 break;
3084 3081 case P_POWEROFF:
3085 3082 string = PS_POWEROFF;
3086 3083 break;
3087 3084 case P_NOINTR:
3088 3085 string = PS_NOINTR;
3089 3086 break;
3090 3087 case P_SPARE:
3091 3088 string = PS_SPARE;
3092 3089 break;
3093 3090 case P_FAULTED:
3094 3091 string = PS_FAULTED;
3095 3092 break;
3096 3093 case P_OFFLINE:
3097 3094 string = PS_OFFLINE;
3098 3095 break;
3099 3096 default:
3100 3097 string = "unknown";
3101 3098 break;
3102 3099 }
3103 3100 return (string);
3104 3101 }
3105 3102
3106 3103 /*
3107 3104 * Export this CPU's statistics (cpu_stat_t and cpu_stats_t) as raw and named
3108 3105 * kstats, respectively. This is done when a CPU is initialized or placed
3109 3106 * online via p_online(2).
3110 3107 */
3111 3108 static void
3112 3109 cpu_stats_kstat_create(cpu_t *cp)
3113 3110 {
3114 3111 int instance = cp->cpu_id;
3115 3112 char *module = "cpu";
3116 3113 char *class = "misc";
3117 3114 kstat_t *ksp;
3118 3115 zoneid_t zoneid;
3119 3116
3120 3117 ASSERT(MUTEX_HELD(&cpu_lock));
3121 3118
3122 3119 if (pool_pset_enabled())
3123 3120 zoneid = GLOBAL_ZONEID;
3124 3121 else
3125 3122 zoneid = ALL_ZONES;
3126 3123 /*
3127 3124 * Create named kstats
3128 3125 */
3129 3126 #define CPU_STATS_KS_CREATE(name, tsize, update_func) \
3130 3127 ksp = kstat_create_zone(module, instance, (name), class, \
3131 3128 KSTAT_TYPE_NAMED, (tsize) / sizeof (kstat_named_t), 0, \
3132 3129 zoneid); \
3133 3130 if (ksp != NULL) { \
3134 3131 ksp->ks_private = cp; \
3135 3132 ksp->ks_update = (update_func); \
3136 3133 kstat_install(ksp); \
3137 3134 } else \
3138 3135 cmn_err(CE_WARN, "cpu: unable to create %s:%d:%s kstat", \
3139 3136 module, instance, (name));
3140 3137
3141 3138 CPU_STATS_KS_CREATE("sys", sizeof (cpu_sys_stats_ks_data_template),
3142 3139 cpu_sys_stats_ks_update);
3143 3140 CPU_STATS_KS_CREATE("vm", sizeof (cpu_vm_stats_ks_data_template),
3144 3141 cpu_vm_stats_ks_update);
3145 3142
3146 3143 /*
3147 3144 * Export the familiar cpu_stat_t KSTAT_TYPE_RAW kstat.
3148 3145 */
3149 3146 ksp = kstat_create_zone("cpu_stat", cp->cpu_id, NULL,
3150 3147 "misc", KSTAT_TYPE_RAW, sizeof (cpu_stat_t), 0, zoneid);
3151 3148 if (ksp != NULL) {
3152 3149 ksp->ks_update = cpu_stat_ks_update;
3153 3150 ksp->ks_private = cp;
3154 3151 kstat_install(ksp);
3155 3152 }
3156 3153 }
3157 3154
3158 3155 static void
3159 3156 cpu_stats_kstat_destroy(cpu_t *cp)
3160 3157 {
3161 3158 char ks_name[KSTAT_STRLEN];
3162 3159
3163 3160 (void) sprintf(ks_name, "cpu_stat%d", cp->cpu_id);
3164 3161 kstat_delete_byname("cpu_stat", cp->cpu_id, ks_name);
3165 3162
3166 3163 kstat_delete_byname("cpu", cp->cpu_id, "sys");
3167 3164 kstat_delete_byname("cpu", cp->cpu_id, "vm");
3168 3165 }
3169 3166
3170 3167 static int
3171 3168 cpu_sys_stats_ks_update(kstat_t *ksp, int rw)
3172 3169 {
3173 3170 cpu_t *cp = (cpu_t *)ksp->ks_private;
3174 3171 struct cpu_sys_stats_ks_data *csskd;
3175 3172 cpu_sys_stats_t *css;
3176 3173 hrtime_t msnsecs[NCMSTATES];
3177 3174 int i;
3178 3175
3179 3176 if (rw == KSTAT_WRITE)
3180 3177 return (EACCES);
3181 3178
3182 3179 csskd = ksp->ks_data;
3183 3180 css = &cp->cpu_stats.sys;
3184 3181
3185 3182 /*
3186 3183 * Read CPU mstate, but compare with the last values we
3187 3184 * received to make sure that the returned kstats never
3188 3185 * decrease.
3189 3186 */
3190 3187
3191 3188 get_cpu_mstate(cp, msnsecs);
3192 3189 if (csskd->cpu_nsec_idle.value.ui64 > msnsecs[CMS_IDLE])
3193 3190 msnsecs[CMS_IDLE] = csskd->cpu_nsec_idle.value.ui64;
3194 3191 if (csskd->cpu_nsec_user.value.ui64 > msnsecs[CMS_USER])
3195 3192 msnsecs[CMS_USER] = csskd->cpu_nsec_user.value.ui64;
3196 3193 if (csskd->cpu_nsec_kernel.value.ui64 > msnsecs[CMS_SYSTEM])
3197 3194 msnsecs[CMS_SYSTEM] = csskd->cpu_nsec_kernel.value.ui64;
3198 3195
3199 3196 bcopy(&cpu_sys_stats_ks_data_template, ksp->ks_data,
3200 3197 sizeof (cpu_sys_stats_ks_data_template));
3201 3198
3202 3199 csskd->cpu_ticks_wait.value.ui64 = 0;
3203 3200 csskd->wait_ticks_io.value.ui64 = 0;
3204 3201
3205 3202 csskd->cpu_nsec_idle.value.ui64 = msnsecs[CMS_IDLE];
3206 3203 csskd->cpu_nsec_user.value.ui64 = msnsecs[CMS_USER];
3207 3204 csskd->cpu_nsec_kernel.value.ui64 = msnsecs[CMS_SYSTEM];
3208 3205 csskd->cpu_ticks_idle.value.ui64 =
3209 3206 NSEC_TO_TICK(csskd->cpu_nsec_idle.value.ui64);
3210 3207 csskd->cpu_ticks_user.value.ui64 =
3211 3208 NSEC_TO_TICK(csskd->cpu_nsec_user.value.ui64);
3212 3209 csskd->cpu_ticks_kernel.value.ui64 =
3213 3210 NSEC_TO_TICK(csskd->cpu_nsec_kernel.value.ui64);
3214 3211 csskd->cpu_nsec_dtrace.value.ui64 = cp->cpu_dtrace_nsec;
3215 3212 csskd->dtrace_probes.value.ui64 = cp->cpu_dtrace_probes;
3216 3213 csskd->cpu_nsec_intr.value.ui64 = cp->cpu_intrlast;
3217 3214 csskd->cpu_load_intr.value.ui64 = cp->cpu_intrload;
3218 3215 csskd->bread.value.ui64 = css->bread;
3219 3216 csskd->bwrite.value.ui64 = css->bwrite;
3220 3217 csskd->lread.value.ui64 = css->lread;
3221 3218 csskd->lwrite.value.ui64 = css->lwrite;
3222 3219 csskd->phread.value.ui64 = css->phread;
3223 3220 csskd->phwrite.value.ui64 = css->phwrite;
3224 3221 csskd->pswitch.value.ui64 = css->pswitch;
3225 3222 csskd->trap.value.ui64 = css->trap;
3226 3223 csskd->intr.value.ui64 = 0;
3227 3224 for (i = 0; i < PIL_MAX; i++)
3228 3225 csskd->intr.value.ui64 += css->intr[i];
3229 3226 csskd->syscall.value.ui64 = css->syscall;
3230 3227 csskd->sysread.value.ui64 = css->sysread;
3231 3228 csskd->syswrite.value.ui64 = css->syswrite;
3232 3229 csskd->sysfork.value.ui64 = css->sysfork;
3233 3230 csskd->sysvfork.value.ui64 = css->sysvfork;
3234 3231 csskd->sysexec.value.ui64 = css->sysexec;
3235 3232 csskd->readch.value.ui64 = css->readch;
3236 3233 csskd->writech.value.ui64 = css->writech;
3237 3234 csskd->rcvint.value.ui64 = css->rcvint;
3238 3235 csskd->xmtint.value.ui64 = css->xmtint;
3239 3236 csskd->mdmint.value.ui64 = css->mdmint;
3240 3237 csskd->rawch.value.ui64 = css->rawch;
3241 3238 csskd->canch.value.ui64 = css->canch;
3242 3239 csskd->outch.value.ui64 = css->outch;
3243 3240 csskd->msg.value.ui64 = css->msg;
3244 3241 csskd->sema.value.ui64 = css->sema;
3245 3242 csskd->namei.value.ui64 = css->namei;
3246 3243 csskd->ufsiget.value.ui64 = css->ufsiget;
3247 3244 csskd->ufsdirblk.value.ui64 = css->ufsdirblk;
3248 3245 csskd->ufsipage.value.ui64 = css->ufsipage;
3249 3246 csskd->ufsinopage.value.ui64 = css->ufsinopage;
3250 3247 csskd->procovf.value.ui64 = css->procovf;
3251 3248 csskd->intrthread.value.ui64 = 0;
3252 3249 for (i = 0; i < LOCK_LEVEL - 1; i++)
3253 3250 csskd->intrthread.value.ui64 += css->intr[i];
3254 3251 csskd->intrblk.value.ui64 = css->intrblk;
3255 3252 csskd->intrunpin.value.ui64 = css->intrunpin;
3256 3253 csskd->idlethread.value.ui64 = css->idlethread;
3257 3254 csskd->inv_swtch.value.ui64 = css->inv_swtch;
3258 3255 csskd->nthreads.value.ui64 = css->nthreads;
3259 3256 csskd->cpumigrate.value.ui64 = css->cpumigrate;
3260 3257 csskd->xcalls.value.ui64 = css->xcalls;
3261 3258 csskd->mutex_adenters.value.ui64 = css->mutex_adenters;
3262 3259 csskd->rw_rdfails.value.ui64 = css->rw_rdfails;
3263 3260 csskd->rw_wrfails.value.ui64 = css->rw_wrfails;
3264 3261 csskd->modload.value.ui64 = css->modload;
3265 3262 csskd->modunload.value.ui64 = css->modunload;
3266 3263 csskd->bawrite.value.ui64 = css->bawrite;
3267 3264 csskd->iowait.value.ui64 = css->iowait;
3268 3265
3269 3266 return (0);
3270 3267 }
3271 3268
3272 3269 static int
3273 3270 cpu_vm_stats_ks_update(kstat_t *ksp, int rw)
3274 3271 {
3275 3272 cpu_t *cp = (cpu_t *)ksp->ks_private;
3276 3273 struct cpu_vm_stats_ks_data *cvskd;
3277 3274 cpu_vm_stats_t *cvs;
3278 3275
3279 3276 if (rw == KSTAT_WRITE)
3280 3277 return (EACCES);
3281 3278
3282 3279 cvs = &cp->cpu_stats.vm;
3283 3280 cvskd = ksp->ks_data;
3284 3281
3285 3282 bcopy(&cpu_vm_stats_ks_data_template, ksp->ks_data,
3286 3283 sizeof (cpu_vm_stats_ks_data_template));
3287 3284 cvskd->pgrec.value.ui64 = cvs->pgrec;
3288 3285 cvskd->pgfrec.value.ui64 = cvs->pgfrec;
3289 3286 cvskd->pgin.value.ui64 = cvs->pgin;
3290 3287 cvskd->pgpgin.value.ui64 = cvs->pgpgin;
3291 3288 cvskd->pgout.value.ui64 = cvs->pgout;
3292 3289 cvskd->pgpgout.value.ui64 = cvs->pgpgout;
3293 3290 cvskd->swapin.value.ui64 = cvs->swapin;
3294 3291 cvskd->pgswapin.value.ui64 = cvs->pgswapin;
3295 3292 cvskd->swapout.value.ui64 = cvs->swapout;
3296 3293 cvskd->pgswapout.value.ui64 = cvs->pgswapout;
3297 3294 cvskd->zfod.value.ui64 = cvs->zfod;
3298 3295 cvskd->dfree.value.ui64 = cvs->dfree;
3299 3296 cvskd->scan.value.ui64 = cvs->scan;
3300 3297 cvskd->rev.value.ui64 = cvs->rev;
3301 3298 cvskd->hat_fault.value.ui64 = cvs->hat_fault;
3302 3299 cvskd->as_fault.value.ui64 = cvs->as_fault;
3303 3300 cvskd->maj_fault.value.ui64 = cvs->maj_fault;
3304 3301 cvskd->cow_fault.value.ui64 = cvs->cow_fault;
3305 3302 cvskd->prot_fault.value.ui64 = cvs->prot_fault;
3306 3303 cvskd->softlock.value.ui64 = cvs->softlock;
3307 3304 cvskd->kernel_asflt.value.ui64 = cvs->kernel_asflt;
3308 3305 cvskd->pgrrun.value.ui64 = cvs->pgrrun;
3309 3306 cvskd->execpgin.value.ui64 = cvs->execpgin;
3310 3307 cvskd->execpgout.value.ui64 = cvs->execpgout;
3311 3308 cvskd->execfree.value.ui64 = cvs->execfree;
3312 3309 cvskd->anonpgin.value.ui64 = cvs->anonpgin;
3313 3310 cvskd->anonpgout.value.ui64 = cvs->anonpgout;
3314 3311 cvskd->anonfree.value.ui64 = cvs->anonfree;
3315 3312 cvskd->fspgin.value.ui64 = cvs->fspgin;
3316 3313 cvskd->fspgout.value.ui64 = cvs->fspgout;
3317 3314 cvskd->fsfree.value.ui64 = cvs->fsfree;
3318 3315
3319 3316 return (0);
3320 3317 }
3321 3318
3322 3319 static int
3323 3320 cpu_stat_ks_update(kstat_t *ksp, int rw)
3324 3321 {
3325 3322 cpu_stat_t *cso;
3326 3323 cpu_t *cp;
3327 3324 int i;
3328 3325 hrtime_t msnsecs[NCMSTATES];
3329 3326
3330 3327 cso = (cpu_stat_t *)ksp->ks_data;
3331 3328 cp = (cpu_t *)ksp->ks_private;
3332 3329
3333 3330 if (rw == KSTAT_WRITE)
3334 3331 return (EACCES);
3335 3332
3336 3333 /*
3337 3334 * Read CPU mstate, but compare with the last values we
3338 3335 * received to make sure that the returned kstats never
3339 3336 * decrease.
3340 3337 */
3341 3338
3342 3339 get_cpu_mstate(cp, msnsecs);
3343 3340 msnsecs[CMS_IDLE] = NSEC_TO_TICK(msnsecs[CMS_IDLE]);
3344 3341 msnsecs[CMS_USER] = NSEC_TO_TICK(msnsecs[CMS_USER]);
3345 3342 msnsecs[CMS_SYSTEM] = NSEC_TO_TICK(msnsecs[CMS_SYSTEM]);
3346 3343 if (cso->cpu_sysinfo.cpu[CPU_IDLE] < msnsecs[CMS_IDLE])
3347 3344 cso->cpu_sysinfo.cpu[CPU_IDLE] = msnsecs[CMS_IDLE];
3348 3345 if (cso->cpu_sysinfo.cpu[CPU_USER] < msnsecs[CMS_USER])
3349 3346 cso->cpu_sysinfo.cpu[CPU_USER] = msnsecs[CMS_USER];
3350 3347 if (cso->cpu_sysinfo.cpu[CPU_KERNEL] < msnsecs[CMS_SYSTEM])
3351 3348 cso->cpu_sysinfo.cpu[CPU_KERNEL] = msnsecs[CMS_SYSTEM];
3352 3349 cso->cpu_sysinfo.cpu[CPU_WAIT] = 0;
3353 3350 cso->cpu_sysinfo.wait[W_IO] = 0;
3354 3351 cso->cpu_sysinfo.wait[W_SWAP] = 0;
3355 3352 cso->cpu_sysinfo.wait[W_PIO] = 0;
3356 3353 cso->cpu_sysinfo.bread = CPU_STATS(cp, sys.bread);
3357 3354 cso->cpu_sysinfo.bwrite = CPU_STATS(cp, sys.bwrite);
3358 3355 cso->cpu_sysinfo.lread = CPU_STATS(cp, sys.lread);
3359 3356 cso->cpu_sysinfo.lwrite = CPU_STATS(cp, sys.lwrite);
3360 3357 cso->cpu_sysinfo.phread = CPU_STATS(cp, sys.phread);
3361 3358 cso->cpu_sysinfo.phwrite = CPU_STATS(cp, sys.phwrite);
3362 3359 cso->cpu_sysinfo.pswitch = CPU_STATS(cp, sys.pswitch);
3363 3360 cso->cpu_sysinfo.trap = CPU_STATS(cp, sys.trap);
3364 3361 cso->cpu_sysinfo.intr = 0;
3365 3362 for (i = 0; i < PIL_MAX; i++)
3366 3363 cso->cpu_sysinfo.intr += CPU_STATS(cp, sys.intr[i]);
3367 3364 cso->cpu_sysinfo.syscall = CPU_STATS(cp, sys.syscall);
3368 3365 cso->cpu_sysinfo.sysread = CPU_STATS(cp, sys.sysread);
3369 3366 cso->cpu_sysinfo.syswrite = CPU_STATS(cp, sys.syswrite);
3370 3367 cso->cpu_sysinfo.sysfork = CPU_STATS(cp, sys.sysfork);
3371 3368 cso->cpu_sysinfo.sysvfork = CPU_STATS(cp, sys.sysvfork);
3372 3369 cso->cpu_sysinfo.sysexec = CPU_STATS(cp, sys.sysexec);
3373 3370 cso->cpu_sysinfo.readch = CPU_STATS(cp, sys.readch);
3374 3371 cso->cpu_sysinfo.writech = CPU_STATS(cp, sys.writech);
3375 3372 cso->cpu_sysinfo.rcvint = CPU_STATS(cp, sys.rcvint);
3376 3373 cso->cpu_sysinfo.xmtint = CPU_STATS(cp, sys.xmtint);
3377 3374 cso->cpu_sysinfo.mdmint = CPU_STATS(cp, sys.mdmint);
3378 3375 cso->cpu_sysinfo.rawch = CPU_STATS(cp, sys.rawch);
3379 3376 cso->cpu_sysinfo.canch = CPU_STATS(cp, sys.canch);
3380 3377 cso->cpu_sysinfo.outch = CPU_STATS(cp, sys.outch);
3381 3378 cso->cpu_sysinfo.msg = CPU_STATS(cp, sys.msg);
3382 3379 cso->cpu_sysinfo.sema = CPU_STATS(cp, sys.sema);
3383 3380 cso->cpu_sysinfo.namei = CPU_STATS(cp, sys.namei);
3384 3381 cso->cpu_sysinfo.ufsiget = CPU_STATS(cp, sys.ufsiget);
3385 3382 cso->cpu_sysinfo.ufsdirblk = CPU_STATS(cp, sys.ufsdirblk);
3386 3383 cso->cpu_sysinfo.ufsipage = CPU_STATS(cp, sys.ufsipage);
3387 3384 cso->cpu_sysinfo.ufsinopage = CPU_STATS(cp, sys.ufsinopage);
3388 3385 cso->cpu_sysinfo.inodeovf = 0;
3389 3386 cso->cpu_sysinfo.fileovf = 0;
3390 3387 cso->cpu_sysinfo.procovf = CPU_STATS(cp, sys.procovf);
3391 3388 cso->cpu_sysinfo.intrthread = 0;
3392 3389 for (i = 0; i < LOCK_LEVEL - 1; i++)
3393 3390 cso->cpu_sysinfo.intrthread += CPU_STATS(cp, sys.intr[i]);
3394 3391 cso->cpu_sysinfo.intrblk = CPU_STATS(cp, sys.intrblk);
3395 3392 cso->cpu_sysinfo.idlethread = CPU_STATS(cp, sys.idlethread);
3396 3393 cso->cpu_sysinfo.inv_swtch = CPU_STATS(cp, sys.inv_swtch);
3397 3394 cso->cpu_sysinfo.nthreads = CPU_STATS(cp, sys.nthreads);
3398 3395 cso->cpu_sysinfo.cpumigrate = CPU_STATS(cp, sys.cpumigrate);
3399 3396 cso->cpu_sysinfo.xcalls = CPU_STATS(cp, sys.xcalls);
3400 3397 cso->cpu_sysinfo.mutex_adenters = CPU_STATS(cp, sys.mutex_adenters);
3401 3398 cso->cpu_sysinfo.rw_rdfails = CPU_STATS(cp, sys.rw_rdfails);
3402 3399 cso->cpu_sysinfo.rw_wrfails = CPU_STATS(cp, sys.rw_wrfails);
3403 3400 cso->cpu_sysinfo.modload = CPU_STATS(cp, sys.modload);
3404 3401 cso->cpu_sysinfo.modunload = CPU_STATS(cp, sys.modunload);
3405 3402 cso->cpu_sysinfo.bawrite = CPU_STATS(cp, sys.bawrite);
3406 3403 cso->cpu_sysinfo.rw_enters = 0;
3407 3404 cso->cpu_sysinfo.win_uo_cnt = 0;
3408 3405 cso->cpu_sysinfo.win_uu_cnt = 0;
3409 3406 cso->cpu_sysinfo.win_so_cnt = 0;
3410 3407 cso->cpu_sysinfo.win_su_cnt = 0;
3411 3408 cso->cpu_sysinfo.win_suo_cnt = 0;
3412 3409
3413 3410 cso->cpu_syswait.iowait = CPU_STATS(cp, sys.iowait);
3414 3411 cso->cpu_syswait.swap = 0;
3415 3412 cso->cpu_syswait.physio = 0;
3416 3413
3417 3414 cso->cpu_vminfo.pgrec = CPU_STATS(cp, vm.pgrec);
3418 3415 cso->cpu_vminfo.pgfrec = CPU_STATS(cp, vm.pgfrec);
3419 3416 cso->cpu_vminfo.pgin = CPU_STATS(cp, vm.pgin);
3420 3417 cso->cpu_vminfo.pgpgin = CPU_STATS(cp, vm.pgpgin);
3421 3418 cso->cpu_vminfo.pgout = CPU_STATS(cp, vm.pgout);
3422 3419 cso->cpu_vminfo.pgpgout = CPU_STATS(cp, vm.pgpgout);
3423 3420 cso->cpu_vminfo.swapin = CPU_STATS(cp, vm.swapin);
3424 3421 cso->cpu_vminfo.pgswapin = CPU_STATS(cp, vm.pgswapin);
3425 3422 cso->cpu_vminfo.swapout = CPU_STATS(cp, vm.swapout);
3426 3423 cso->cpu_vminfo.pgswapout = CPU_STATS(cp, vm.pgswapout);
3427 3424 cso->cpu_vminfo.zfod = CPU_STATS(cp, vm.zfod);
3428 3425 cso->cpu_vminfo.dfree = CPU_STATS(cp, vm.dfree);
3429 3426 cso->cpu_vminfo.scan = CPU_STATS(cp, vm.scan);
3430 3427 cso->cpu_vminfo.rev = CPU_STATS(cp, vm.rev);
3431 3428 cso->cpu_vminfo.hat_fault = CPU_STATS(cp, vm.hat_fault);
3432 3429 cso->cpu_vminfo.as_fault = CPU_STATS(cp, vm.as_fault);
3433 3430 cso->cpu_vminfo.maj_fault = CPU_STATS(cp, vm.maj_fault);
3434 3431 cso->cpu_vminfo.cow_fault = CPU_STATS(cp, vm.cow_fault);
3435 3432 cso->cpu_vminfo.prot_fault = CPU_STATS(cp, vm.prot_fault);
3436 3433 cso->cpu_vminfo.softlock = CPU_STATS(cp, vm.softlock);
3437 3434 cso->cpu_vminfo.kernel_asflt = CPU_STATS(cp, vm.kernel_asflt);
3438 3435 cso->cpu_vminfo.pgrrun = CPU_STATS(cp, vm.pgrrun);
3439 3436 cso->cpu_vminfo.execpgin = CPU_STATS(cp, vm.execpgin);
3440 3437 cso->cpu_vminfo.execpgout = CPU_STATS(cp, vm.execpgout);
3441 3438 cso->cpu_vminfo.execfree = CPU_STATS(cp, vm.execfree);
3442 3439 cso->cpu_vminfo.anonpgin = CPU_STATS(cp, vm.anonpgin);
3443 3440 cso->cpu_vminfo.anonpgout = CPU_STATS(cp, vm.anonpgout);
3444 3441 cso->cpu_vminfo.anonfree = CPU_STATS(cp, vm.anonfree);
3445 3442 cso->cpu_vminfo.fspgin = CPU_STATS(cp, vm.fspgin);
3446 3443 cso->cpu_vminfo.fspgout = CPU_STATS(cp, vm.fspgout);
3447 3444 cso->cpu_vminfo.fsfree = CPU_STATS(cp, vm.fsfree);
3448 3445
3449 3446 return (0);
3450 3447 }
↓ open down ↓ |
750 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX