Print this page
5285 pass in cpu_pause_func via pause_cpus
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/sun4/os/prom_subr.c
+++ new/usr/src/uts/sun4/os/prom_subr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 #include <sys/types.h>
27 27 #include <sys/param.h>
28 28 #include <sys/cmn_err.h>
29 29 #include <sys/mutex.h>
30 30 #include <sys/systm.h>
31 31 #include <sys/sysmacros.h>
32 32 #include <sys/machsystm.h>
33 33 #include <sys/archsystm.h>
34 34 #include <sys/x_call.h>
35 35 #include <sys/promif.h>
36 36 #include <sys/prom_isa.h>
37 37 #include <sys/privregs.h>
38 38 #include <sys/vmem.h>
39 39 #include <sys/atomic.h>
40 40 #include <sys/panic.h>
41 41 #include <sys/rwlock.h>
42 42 #include <sys/reboot.h>
43 43 #include <sys/kdi.h>
44 44 #include <sys/kdi_machimpl.h>
45 45
46 46 /*
47 47 * We are called with a pointer to a cell-sized argument array.
48 48 * The service name (the first element of the argument array) is
49 49 * the name of the callback being invoked. When called, we are
50 50 * running on the firmwares trap table as a trusted subroutine
51 51 * of the firmware.
52 52 *
53 53 * We define entry points to allow callback handlers to be dynamically
54 54 * added and removed, to support obpsym, which is a separate module
55 55 * and can be dynamically loaded and unloaded and registers its
56 56 * callback handlers dynamically.
57 57 *
58 58 * Note: The actual callback handler we register, is the assembly lang.
59 59 * glue, callback_handler, which takes care of switching from a 64
60 60 * bit stack and environment to a 32 bit stack and environment, and
61 61 * back again, if the callback handler returns. callback_handler calls
62 62 * vx_handler to process the callback.
63 63 */
64 64
65 65 static kmutex_t vx_cmd_lock; /* protect vx_cmd table */
66 66
67 67 #define VX_CMD_MAX 10
68 68 #define ENDADDR(a) &a[sizeof (a) / sizeof (a[0])]
69 69 #define vx_cmd_end ((struct vx_cmd *)(ENDADDR(vx_cmd)))
70 70
71 71 static struct vx_cmd {
72 72 char *service; /* Service name */
73 73 int take_tba; /* If Non-zero we take over the tba */
74 74 void (*func)(cell_t *argument_array);
75 75 } vx_cmd[VX_CMD_MAX+1];
76 76
77 77 void
78 78 init_vx_handler(void)
79 79 {
80 80 extern int callback_handler(cell_t *arg_array);
81 81
82 82 /*
83 83 * initialize the lock protecting additions and deletions from
84 84 * the vx_cmd table. At callback time we don't need to grab
85 85 * this lock. Callback handlers do not need to modify the
86 86 * callback handler table.
87 87 */
88 88 mutex_init(&vx_cmd_lock, NULL, MUTEX_DEFAULT, NULL);
89 89
90 90 /*
91 91 * Tell OBP about our callback handler.
92 92 */
93 93 (void) prom_set_callback((void *)callback_handler);
94 94 }
95 95
96 96 /*
97 97 * Add a kernel callback handler to the kernel's list.
98 98 * The table is static, so if you add a callback handler, increase
99 99 * the value of VX_CMD_MAX. Find the first empty slot and use it.
100 100 */
101 101 void
102 102 add_vx_handler(char *name, int flag, void (*func)(cell_t *))
103 103 {
104 104 struct vx_cmd *vp;
105 105
106 106 mutex_enter(&vx_cmd_lock);
107 107 for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
108 108 if (vp->service == NULL) {
109 109 vp->service = name;
110 110 vp->take_tba = flag;
111 111 vp->func = func;
112 112 mutex_exit(&vx_cmd_lock);
113 113 return;
114 114 }
115 115 }
116 116 mutex_exit(&vx_cmd_lock);
117 117
118 118 #ifdef DEBUG
119 119
120 120 /*
121 121 * There must be enough entries to handle all callback entries.
122 122 * Increase VX_CMD_MAX if this happens. This shouldn't happen.
123 123 */
124 124 cmn_err(CE_PANIC, "add_vx_handler <%s>", name);
125 125 /* NOTREACHED */
126 126
127 127 #else /* DEBUG */
128 128
129 129 cmn_err(CE_WARN, "add_vx_handler: Can't add callback hander <%s>",
130 130 name);
131 131
132 132 #endif /* DEBUG */
133 133
134 134 }
135 135
136 136 /*
137 137 * Remove a vx_handler function -- find the name string in the table,
138 138 * and clear it.
139 139 */
140 140 void
141 141 remove_vx_handler(char *name)
142 142 {
143 143 struct vx_cmd *vp;
144 144
145 145 mutex_enter(&vx_cmd_lock);
146 146 for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
147 147 if (vp->service == NULL)
148 148 continue;
149 149 if (strcmp(vp->service, name) != 0)
150 150 continue;
151 151 vp->service = 0;
152 152 vp->take_tba = 0;
153 153 vp->func = 0;
154 154 mutex_exit(&vx_cmd_lock);
155 155 return;
156 156 }
157 157 mutex_exit(&vx_cmd_lock);
158 158 cmn_err(CE_WARN, "remove_vx_handler: <%s> not found", name);
159 159 }
160 160
161 161 int
162 162 vx_handler(cell_t *argument_array)
163 163 {
164 164 char *name;
165 165 struct vx_cmd *vp;
166 166 void *old_tba;
167 167
168 168 name = p1275_cell2ptr(*argument_array);
169 169
170 170 for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
171 171 if (vp->service == (char *)0)
172 172 continue;
173 173 if (strcmp(vp->service, name) != 0)
174 174 continue;
175 175 if (vp->take_tba != 0) {
176 176 reestablish_curthread();
177 177 if (tba_taken_over != 0)
178 178 old_tba = set_tba((void *)&trap_table);
179 179 }
180 180 vp->func(argument_array);
181 181 if ((vp->take_tba != 0) && (tba_taken_over != 0))
182 182 (void) set_tba(old_tba);
183 183 return (0); /* Service name was known */
184 184 }
185 185
186 186 return (-1); /* Service name unknown */
187 187 }
188 188
189 189 /*
190 190 * PROM Locking Primitives
191 191 *
192 192 * These routines are called immediately before and immediately after calling
193 193 * into the firmware. The firmware is single-threaded and assumes that the
194 194 * kernel will implement locking to prevent simultaneous service calls. In
195 195 * addition, some service calls (particularly character rendering) can be
196 196 * slow, so we would like to sleep if we cannot acquire the lock to allow the
197 197 * caller's CPU to continue to perform useful work in the interim. Service
198 198 * routines may also be called early in boot as part of slave CPU startup
199 199 * when mutexes and cvs are not yet available (i.e. they are still running on
200 200 * the prom's TLB handlers and cannot touch curthread). Therefore, these
201 201 * routines must reduce to a simple compare-and-swap spin lock when necessary.
202 202 * Finally, kernel code may wish to acquire the firmware lock before executing
203 203 * a block of code that includes service calls, so we also allow the firmware
204 204 * lock to be acquired recursively by the owning CPU after disabling preemption.
205 205 *
206 206 * To meet these constraints, the lock itself is implemented as a compare-and-
207 207 * swap spin lock on the global prom_cpu pointer. We implement recursion by
208 208 * atomically incrementing the integer prom_holdcnt after acquiring the lock.
209 209 * If the current CPU is an "adult" (determined by testing cpu_m.mutex_ready),
210 210 * we disable preemption before acquiring the lock and leave it disabled once
211 211 * the lock is held. The kern_postprom() routine then enables preemption if
212 212 * we drop the lock and prom_holdcnt returns to zero. If the current CPU is
213 213 * an adult and the lock is held by another adult CPU, we can safely sleep
214 214 * until the lock is released. To do so, we acquire the adaptive prom_mutex
215 215 * and then sleep on prom_cv. Therefore, service routines must not be called
216 216 * from above LOCK_LEVEL on any adult CPU. Finally, if recursive entry is
217 217 * attempted on an adult CPU, we must also verify that curthread matches the
218 218 * saved prom_thread (the original owner) to ensure that low-level interrupt
219 219 * threads do not step on other threads running on the same CPU.
220 220 */
221 221
222 222 static cpu_t *volatile prom_cpu;
223 223 static kthread_t *volatile prom_thread;
224 224 static uint32_t prom_holdcnt;
225 225 static kmutex_t prom_mutex;
226 226 static kcondvar_t prom_cv;
227 227
228 228 /*
229 229 * The debugger uses PROM services, and is thus unable to run if any of the
230 230 * CPUs on the system are executing in the PROM at the time of debugger entry.
231 231 * If a CPU is determined to be in the PROM when the debugger is entered,
232 232 * prom_return_enter_debugger will be set, thus triggering a programmed debugger
233 233 * entry when the given CPU returns from the PROM. That CPU is then released by
234 234 * the debugger, and is allowed to complete PROM-related work.
235 235 */
236 236 int prom_exit_enter_debugger;
237 237
238 238 void
239 239 kern_preprom(void)
240 240 {
241 241 for (;;) {
242 242 /*
243 243 * Load the current CPU pointer and examine the mutex_ready bit.
244 244 * It doesn't matter if we are preempted here because we are
245 245 * only trying to determine if we are in the *set* of mutex
246 246 * ready CPUs. We cannot disable preemption until we confirm
247 247 * that we are running on a CPU in this set, since a call to
248 248 * kpreempt_disable() requires access to curthread.
249 249 */
250 250 processorid_t cpuid = getprocessorid();
251 251 cpu_t *cp = cpu[cpuid];
252 252 cpu_t *prcp;
253 253
254 254 if (panicstr)
255 255 return; /* just return if we are currently panicking */
256 256
257 257 if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
258 258 /*
259 259 * Disable premption, and reload the current CPU. We
260 260 * can't move from a mutex_ready cpu to a non-ready cpu
261 261 * so we don't need to re-check cp->cpu_m.mutex_ready.
262 262 */
263 263 kpreempt_disable();
264 264 cp = CPU;
265 265 ASSERT(cp->cpu_m.mutex_ready);
266 266
267 267 /*
268 268 * Try the lock. If we don't get the lock, re-enable
269 269 * preemption and see if we should sleep. If we are
270 270 * already the lock holder, remove the effect of the
271 271 * previous kpreempt_disable() before returning since
272 272 * preemption was disabled by an earlier kern_preprom.
273 273 */
274 274 prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
275 275 if (prcp == NULL ||
276 276 (prcp == cp && prom_thread == curthread)) {
277 277 if (prcp == cp)
278 278 kpreempt_enable();
279 279 break;
280 280 }
281 281
282 282 kpreempt_enable();
283 283
284 284 /*
285 285 * We have to be very careful here since both prom_cpu
286 286 * and prcp->cpu_m.mutex_ready can be changed at any
287 287 * time by a non mutex_ready cpu holding the lock.
288 288 * If the owner is mutex_ready, holding prom_mutex
289 289 * prevents kern_postprom() from completing. If the
290 290 * owner isn't mutex_ready, we only know it will clear
291 291 * prom_cpu before changing cpu_m.mutex_ready, so we
292 292 * issue a membar after checking mutex_ready and then
293 293 * re-verify that prom_cpu is still held by the same
294 294 * cpu before actually proceeding to cv_wait().
295 295 */
296 296 mutex_enter(&prom_mutex);
297 297 prcp = prom_cpu;
298 298 if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
299 299 membar_consumer();
300 300 if (prcp == prom_cpu)
301 301 cv_wait(&prom_cv, &prom_mutex);
302 302 }
303 303 mutex_exit(&prom_mutex);
304 304
305 305 } else {
306 306 /*
307 307 * If we are not yet mutex_ready, just attempt to grab
308 308 * the lock. If we get it or already hold it, break.
309 309 */
310 310 ASSERT(getpil() == PIL_MAX);
311 311 prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
312 312 if (prcp == NULL || prcp == cp)
313 313 break;
314 314 }
315 315 }
316 316
317 317 /*
318 318 * We now hold the prom_cpu lock. Increment the hold count by one
319 319 * and assert our current state before returning to the caller.
320 320 */
321 321 atomic_inc_32(&prom_holdcnt);
322 322 ASSERT(prom_holdcnt >= 1);
323 323 prom_thread = curthread;
324 324 }
325 325
326 326 /*
327 327 * Drop the prom lock if it is held by the current CPU. If the lock is held
328 328 * recursively, return without clearing prom_cpu. If the hold count is now
329 329 * zero, clear prom_cpu and cv_signal any waiting CPU.
330 330 */
331 331 void
332 332 kern_postprom(void)
333 333 {
334 334 processorid_t cpuid = getprocessorid();
335 335 cpu_t *cp = cpu[cpuid];
336 336
337 337 if (panicstr)
338 338 return; /* do not modify lock further if we have panicked */
339 339
340 340 if (prom_cpu != cp)
341 341 panic("kern_postprom: not owner, cp=%p owner=%p",
342 342 (void *)cp, (void *)prom_cpu);
343 343
344 344 if (prom_holdcnt == 0)
345 345 panic("kern_postprom: prom_holdcnt == 0, owner=%p",
346 346 (void *)prom_cpu);
347 347
348 348 if (atomic_dec_32_nv(&prom_holdcnt) != 0)
349 349 return; /* prom lock is held recursively by this CPU */
350 350
351 351 if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
352 352 kmdb_enter();
353 353
354 354 prom_thread = NULL;
355 355 membar_producer();
356 356
357 357 prom_cpu = NULL;
358 358 membar_producer();
359 359
360 360 if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
361 361 mutex_enter(&prom_mutex);
362 362 cv_signal(&prom_cv);
363 363 mutex_exit(&prom_mutex);
364 364 kpreempt_enable();
365 365 }
366 366 }
367 367
368 368 /*
369 369 * If the frame buffer device is busy, briefly capture the other CPUs so that
370 370 * another CPU executing code to manipulate the device does not execute at the
371 371 * same time we are rendering characters. Refer to the comments and code in
372 372 * common/os/console.c for more information on these callbacks.
373 373 *
374 374 * Notice that we explicitly acquire the PROM lock using kern_preprom() prior
375 375 * to idling other CPUs. The idling mechanism will cross-trap the other CPUs
376 376 * and have them spin at MAX(%pil, XCALL_PIL), so we must be sure that none of
377 377 * them are holding the PROM lock before we idle them and then call into the
378 378 * PROM routines that render characters to the frame buffer.
379 379 */
380 380 int
381 381 console_enter(int busy)
382 382 {
383 383 int s = 0;
384 384
385 385 if (busy && panicstr == NULL) {
386 386 kern_preprom();
387 387 s = splhi();
388 388 idle_other_cpus();
389 389 }
390 390
391 391 return (s);
392 392 }
393 393
394 394 void
395 395 console_exit(int busy, int spl)
396 396 {
397 397 if (busy && panicstr == NULL) {
398 398 resume_other_cpus();
399 399 splx(spl);
400 400 kern_postprom();
↓ open down ↓ |
400 lines elided |
↑ open up ↑ |
401 401 }
402 402 }
403 403
404 404 /*
405 405 * This routine is a special form of pause_cpus(). It ensures that
406 406 * prom functions are callable while the cpus are paused.
407 407 */
408 408 void
409 409 promsafe_pause_cpus(void)
410 410 {
411 - pause_cpus(NULL);
411 + pause_cpus(NULL, NULL);
412 412
413 413 /* If some other cpu is entering or is in the prom, spin */
414 414 while (prom_cpu || mutex_owner(&prom_mutex)) {
415 415
416 416 start_cpus();
417 417 mutex_enter(&prom_mutex);
418 418
419 419 /* Wait for other cpu to exit prom */
420 420 while (prom_cpu)
421 421 cv_wait(&prom_cv, &prom_mutex);
422 422
423 423 mutex_exit(&prom_mutex);
424 - pause_cpus(NULL);
424 + pause_cpus(NULL, NULL);
425 425 }
426 426
427 427 /* At this point all cpus are paused and none are in the prom */
428 428 }
429 429
430 430 /*
431 431 * This routine is a special form of xc_attention(). It ensures that
432 432 * prom functions are callable while the cpus are at attention.
433 433 */
434 434 void
435 435 promsafe_xc_attention(cpuset_t cpuset)
436 436 {
437 437 xc_attention(cpuset);
438 438
439 439 /* If some other cpu is entering or is in the prom, spin */
440 440 while (prom_cpu || mutex_owner(&prom_mutex)) {
441 441
442 442 xc_dismissed(cpuset);
443 443 mutex_enter(&prom_mutex);
444 444
445 445 /* Wait for other cpu to exit prom */
446 446 while (prom_cpu)
447 447 cv_wait(&prom_cv, &prom_mutex);
448 448
449 449 mutex_exit(&prom_mutex);
450 450 xc_attention(cpuset);
451 451 }
452 452
453 453 /* At this point all cpus are paused and none are in the prom */
454 454 }
455 455
456 456
457 457 #if defined(PROM_32BIT_ADDRS)
458 458
459 459 #include <sys/promimpl.h>
460 460 #include <vm/seg_kmem.h>
461 461 #include <sys/kmem.h>
462 462 #include <sys/bootconf.h>
463 463
464 464 /*
465 465 * These routines are only used to workaround "poor feature interaction"
466 466 * in OBP. See bug 4115680 for details.
467 467 *
468 468 * Many of the promif routines need to allocate temporary buffers
469 469 * with 32-bit addresses to pass in/out of the CIF. The lifetime
470 470 * of the buffers is extremely short, they are allocated and freed
471 471 * around the CIF call. We use vmem_alloc() to cache 32-bit memory.
472 472 *
473 473 * Note the code in promplat_free() to prevent exhausting the 32 bit
474 474 * heap during boot.
475 475 */
476 476 static void *promplat_last_free = NULL;
477 477 static size_t promplat_last_size;
478 478 static vmem_t *promplat_arena;
479 479 static kmutex_t promplat_lock; /* protect arena, last_free, and last_size */
480 480
481 481 void *
482 482 promplat_alloc(size_t size)
483 483 {
484 484
485 485 mutex_enter(&promplat_lock);
486 486 if (promplat_arena == NULL) {
487 487 promplat_arena = vmem_create("promplat", NULL, 0, 8,
488 488 segkmem_alloc, segkmem_free, heap32_arena, 0, VM_SLEEP);
489 489 }
490 490 mutex_exit(&promplat_lock);
491 491
492 492 return (vmem_alloc(promplat_arena, size, VM_NOSLEEP));
493 493 }
494 494
495 495 /*
496 496 * Delaying the free() of small allocations gets more mileage
497 497 * from pages during boot, otherwise a cycle of allocate/free
498 498 * calls could burn through available heap32 space too quickly.
499 499 */
500 500 void
501 501 promplat_free(void *p, size_t size)
502 502 {
503 503 void *p2 = NULL;
504 504 size_t s2;
505 505
506 506 /*
507 507 * If VM is initialized, clean up any delayed free().
508 508 */
509 509 if (kvseg.s_base != 0 && promplat_last_free != NULL) {
510 510 mutex_enter(&promplat_lock);
511 511 p2 = promplat_last_free;
512 512 s2 = promplat_last_size;
513 513 promplat_last_free = NULL;
514 514 promplat_last_size = 0;
515 515 mutex_exit(&promplat_lock);
516 516 if (p2 != NULL) {
517 517 vmem_free(promplat_arena, p2, s2);
518 518 p2 = NULL;
519 519 }
520 520 }
521 521
522 522 /*
523 523 * Do the free if VM is initialized or it's a large allocation.
524 524 */
525 525 if (kvseg.s_base != 0 || size >= PAGESIZE) {
526 526 vmem_free(promplat_arena, p, size);
527 527 return;
528 528 }
529 529
530 530 /*
531 531 * Otherwise, do the last free request and delay this one.
532 532 */
533 533 mutex_enter(&promplat_lock);
534 534 if (promplat_last_free != NULL) {
535 535 p2 = promplat_last_free;
536 536 s2 = promplat_last_size;
537 537 }
538 538 promplat_last_free = p;
539 539 promplat_last_size = size;
540 540 mutex_exit(&promplat_lock);
541 541
542 542 if (p2 != NULL)
543 543 vmem_free(promplat_arena, p2, s2);
544 544 }
545 545
546 546 void
547 547 promplat_bcopy(const void *src, void *dst, size_t count)
548 548 {
549 549 bcopy(src, dst, count);
550 550 }
551 551
552 552 #endif /* PROM_32BIT_ADDRS */
553 553
554 554 static prom_generation_cookie_t prom_tree_gen;
555 555 static krwlock_t prom_tree_lock;
556 556
557 557 int
558 558 prom_tree_access(int (*callback)(void *arg, int has_changed), void *arg,
559 559 prom_generation_cookie_t *ckp)
560 560 {
561 561 int chg, rv;
562 562
563 563 rw_enter(&prom_tree_lock, RW_READER);
564 564 /*
565 565 * If the tree has changed since the caller last accessed it
566 566 * pass 1 as the second argument to the callback function,
567 567 * otherwise 0.
568 568 */
569 569 if (ckp != NULL && *ckp != prom_tree_gen) {
570 570 *ckp = prom_tree_gen;
571 571 chg = 1;
572 572 } else
573 573 chg = 0;
574 574 rv = callback(arg, chg);
575 575 rw_exit(&prom_tree_lock);
576 576 return (rv);
577 577 }
578 578
579 579 int
580 580 prom_tree_update(int (*callback)(void *arg), void *arg)
581 581 {
582 582 int rv;
583 583
584 584 rw_enter(&prom_tree_lock, RW_WRITER);
585 585 prom_tree_gen++;
586 586 rv = callback(arg);
587 587 rw_exit(&prom_tree_lock);
588 588 return (rv);
589 589 }
↓ open down ↓ |
155 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX