1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
  23  * Use is subject to license terms.
  24  */
  25 
  26 #pragma ident   "%Z%%M% %I%     %E% SMI"
  27 
  28 #include <sys/types.h>
  29 #include <sys/param.h>
  30 #include <sys/cmn_err.h>
  31 #include <sys/mutex.h>
  32 #include <sys/systm.h>
  33 #include <sys/sysmacros.h>
  34 #include <sys/machsystm.h>
  35 #include <sys/archsystm.h>
  36 #include <sys/x_call.h>
  37 #include <sys/promif.h>
  38 #include <sys/prom_isa.h>
  39 #include <sys/privregs.h>
  40 #include <sys/vmem.h>
  41 #include <sys/atomic.h>
  42 #include <sys/panic.h>
  43 #include <sys/rwlock.h>
  44 #include <sys/reboot.h>
  45 #include <sys/kdi.h>
  46 #include <sys/kdi_machimpl.h>
  47 
  48 /*
  49  * We are called with a pointer to a cell-sized argument array.
  50  * The service name (the first element of the argument array) is
  51  * the name of the callback being invoked.  When called, we are
  52  * running on the firmwares trap table as a trusted subroutine
  53  * of the firmware.
  54  *
  55  * We define entry points to allow callback handlers to be dynamically
  56  * added and removed, to support obpsym, which is a separate module
  57  * and can be dynamically loaded and unloaded and registers its
  58  * callback handlers dynamically.
  59  *
  60  * Note: The actual callback handler we register, is the assembly lang.
  61  * glue, callback_handler, which takes care of switching from a 64
  62  * bit stack and environment to a 32 bit stack and environment, and
  63  * back again, if the callback handler returns. callback_handler calls
  64  * vx_handler to process the callback.
  65  */
  66 
  67 static kmutex_t vx_cmd_lock;    /* protect vx_cmd table */
  68 
  69 #define VX_CMD_MAX      10
  70 #define ENDADDR(a)      &a[sizeof (a) / sizeof (a[0])]
  71 #define vx_cmd_end      ((struct vx_cmd *)(ENDADDR(vx_cmd)))
  72 
  73 static struct vx_cmd {
  74         char    *service;       /* Service name */
  75         int     take_tba;       /* If Non-zero we take over the tba */
  76         void    (*func)(cell_t *argument_array);
  77 } vx_cmd[VX_CMD_MAX+1];
  78 
  79 void
  80 init_vx_handler(void)
  81 {
  82         extern int callback_handler(cell_t *arg_array);
  83 
  84         /*
  85          * initialize the lock protecting additions and deletions from
  86          * the vx_cmd table.  At callback time we don't need to grab
  87          * this lock.  Callback handlers do not need to modify the
  88          * callback handler table.
  89          */
  90         mutex_init(&vx_cmd_lock, NULL, MUTEX_DEFAULT, NULL);
  91 
  92         /*
  93          * Tell OBP about our callback handler.
  94          */
  95         (void) prom_set_callback((void *)callback_handler);
  96 }
  97 
  98 /*
  99  * Add a kernel callback handler to the kernel's list.
 100  * The table is static, so if you add a callback handler, increase
 101  * the value of VX_CMD_MAX. Find the first empty slot and use it.
 102  */
 103 void
 104 add_vx_handler(char *name, int flag, void (*func)(cell_t *))
 105 {
 106         struct vx_cmd *vp;
 107 
 108         mutex_enter(&vx_cmd_lock);
 109         for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
 110                 if (vp->service == NULL) {
 111                         vp->service = name;
 112                         vp->take_tba = flag;
 113                         vp->func = func;
 114                         mutex_exit(&vx_cmd_lock);
 115                         return;
 116                 }
 117         }
 118         mutex_exit(&vx_cmd_lock);
 119 
 120 #ifdef  DEBUG
 121 
 122         /*
 123          * There must be enough entries to handle all callback entries.
 124          * Increase VX_CMD_MAX if this happens. This shouldn't happen.
 125          */
 126         cmn_err(CE_PANIC, "add_vx_handler <%s>", name);
 127         /* NOTREACHED */
 128 
 129 #else   /* DEBUG */
 130 
 131         cmn_err(CE_WARN, "add_vx_handler: Can't add callback hander <%s>",
 132             name);
 133 
 134 #endif  /* DEBUG */
 135 
 136 }
 137 
 138 /*
 139  * Remove a vx_handler function -- find the name string in the table,
 140  * and clear it.
 141  */
 142 void
 143 remove_vx_handler(char *name)
 144 {
 145         struct vx_cmd *vp;
 146 
 147         mutex_enter(&vx_cmd_lock);
 148         for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
 149                 if (vp->service == NULL)
 150                         continue;
 151                 if (strcmp(vp->service, name) != 0)
 152                         continue;
 153                 vp->service = 0;
 154                 vp->take_tba = 0;
 155                 vp->func = 0;
 156                 mutex_exit(&vx_cmd_lock);
 157                 return;
 158         }
 159         mutex_exit(&vx_cmd_lock);
 160         cmn_err(CE_WARN, "remove_vx_handler: <%s> not found", name);
 161 }
 162 
 163 int
 164 vx_handler(cell_t *argument_array)
 165 {
 166         char *name;
 167         struct vx_cmd *vp;
 168         void *old_tba;
 169 
 170         name = p1275_cell2ptr(*argument_array);
 171 
 172         for (vp = vx_cmd; vp < vx_cmd_end; vp++) {
 173                 if (vp->service == (char *)0)
 174                         continue;
 175                 if (strcmp(vp->service, name) != 0)
 176                         continue;
 177                 if (vp->take_tba != 0)  {
 178                         reestablish_curthread();
 179                         if (tba_taken_over != 0)
 180                                 old_tba = set_tba((void *)&trap_table);
 181                 }
 182                 vp->func(argument_array);
 183                 if ((vp->take_tba != 0) && (tba_taken_over != 0))
 184                         (void) set_tba(old_tba);
 185                 return (0);     /* Service name was known */
 186         }
 187 
 188         return (-1);            /* Service name unknown */
 189 }
 190 
 191 /*
 192  * PROM Locking Primitives
 193  *
 194  * These routines are called immediately before and immediately after calling
 195  * into the firmware.  The firmware is single-threaded and assumes that the
 196  * kernel will implement locking to prevent simultaneous service calls.  In
 197  * addition, some service calls (particularly character rendering) can be
 198  * slow, so we would like to sleep if we cannot acquire the lock to allow the
 199  * caller's CPU to continue to perform useful work in the interim.  Service
 200  * routines may also be called early in boot as part of slave CPU startup
 201  * when mutexes and cvs are not yet available (i.e. they are still running on
 202  * the prom's TLB handlers and cannot touch curthread).  Therefore, these
 203  * routines must reduce to a simple compare-and-swap spin lock when necessary.
 204  * Finally, kernel code may wish to acquire the firmware lock before executing
 205  * a block of code that includes service calls, so we also allow the firmware
 206  * lock to be acquired recursively by the owning CPU after disabling preemption.
 207  *
 208  * To meet these constraints, the lock itself is implemented as a compare-and-
 209  * swap spin lock on the global prom_cpu pointer.  We implement recursion by
 210  * atomically incrementing the integer prom_holdcnt after acquiring the lock.
 211  * If the current CPU is an "adult" (determined by testing cpu_m.mutex_ready),
 212  * we disable preemption before acquiring the lock and leave it disabled once
 213  * the lock is held.  The kern_postprom() routine then enables preemption if
 214  * we drop the lock and prom_holdcnt returns to zero.  If the current CPU is
 215  * an adult and the lock is held by another adult CPU, we can safely sleep
 216  * until the lock is released.  To do so, we acquire the adaptive prom_mutex
 217  * and then sleep on prom_cv.  Therefore, service routines must not be called
 218  * from above LOCK_LEVEL on any adult CPU.  Finally, if recursive entry is
 219  * attempted on an adult CPU, we must also verify that curthread matches the
 220  * saved prom_thread (the original owner) to ensure that low-level interrupt
 221  * threads do not step on other threads running on the same CPU.
 222  */
 223 
 224 static cpu_t *volatile prom_cpu;
 225 static kthread_t *volatile prom_thread;
 226 static uint32_t prom_holdcnt;
 227 static kmutex_t prom_mutex;
 228 static kcondvar_t prom_cv;
 229 
 230 /*
 231  * The debugger uses PROM services, and is thus unable to run if any of the
 232  * CPUs on the system are executing in the PROM at the time of debugger entry.
 233  * If a CPU is determined to be in the PROM when the debugger is entered,
 234  * prom_return_enter_debugger will be set, thus triggering a programmed debugger
 235  * entry when the given CPU returns from the PROM.  That CPU is then released by
 236  * the debugger, and is allowed to complete PROM-related work.
 237  */
 238 int prom_exit_enter_debugger;
 239 
 240 void
 241 kern_preprom(void)
 242 {
 243         for (;;) {
 244                 /*
 245                  * Load the current CPU pointer and examine the mutex_ready bit.
 246                  * It doesn't matter if we are preempted here because we are
 247                  * only trying to determine if we are in the *set* of mutex
 248                  * ready CPUs.  We cannot disable preemption until we confirm
 249                  * that we are running on a CPU in this set, since a call to
 250                  * kpreempt_disable() requires access to curthread.
 251                  */
 252                 processorid_t cpuid = getprocessorid();
 253                 cpu_t *cp = cpu[cpuid];
 254                 cpu_t *prcp;
 255 
 256                 if (panicstr)
 257                         return; /* just return if we are currently panicking */
 258 
 259                 if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
 260                         /*
 261                          * Disable premption, and reload the current CPU.  We
 262                          * can't move from a mutex_ready cpu to a non-ready cpu
 263                          * so we don't need to re-check cp->cpu_m.mutex_ready.
 264                          */
 265                         kpreempt_disable();
 266                         cp = CPU;
 267                         ASSERT(cp->cpu_m.mutex_ready);
 268 
 269                         /*
 270                          * Try the lock.  If we don't get the lock, re-enable
 271                          * preemption and see if we should sleep.  If we are
 272                          * already the lock holder, remove the effect of the
 273                          * previous kpreempt_disable() before returning since
 274                          * preemption was disabled by an earlier kern_preprom.
 275                          */
 276                         prcp = casptr((void *)&prom_cpu, NULL, cp);
 277                         if (prcp == NULL ||
 278                             (prcp == cp && prom_thread == curthread)) {
 279                                 if (prcp == cp)
 280                                         kpreempt_enable();
 281                                 break;
 282                         }
 283 
 284                         kpreempt_enable();
 285 
 286                         /*
 287                          * We have to be very careful here since both prom_cpu
 288                          * and prcp->cpu_m.mutex_ready can be changed at any
 289                          * time by a non mutex_ready cpu holding the lock.
 290                          * If the owner is mutex_ready, holding prom_mutex
 291                          * prevents kern_postprom() from completing.  If the
 292                          * owner isn't mutex_ready, we only know it will clear
 293                          * prom_cpu before changing cpu_m.mutex_ready, so we
 294                          * issue a membar after checking mutex_ready and then
 295                          * re-verify that prom_cpu is still held by the same
 296                          * cpu before actually proceeding to cv_wait().
 297                          */
 298                         mutex_enter(&prom_mutex);
 299                         prcp = prom_cpu;
 300                         if (prcp != NULL && prcp->cpu_m.mutex_ready != 0) {
 301                                 membar_consumer();
 302                                 if (prcp == prom_cpu)
 303                                         cv_wait(&prom_cv, &prom_mutex);
 304                         }
 305                         mutex_exit(&prom_mutex);
 306 
 307                 } else {
 308                         /*
 309                          * If we are not yet mutex_ready, just attempt to grab
 310                          * the lock.  If we get it or already hold it, break.
 311                          */
 312                         ASSERT(getpil() == PIL_MAX);
 313                         prcp = casptr((void *)&prom_cpu, NULL, cp);
 314                         if (prcp == NULL || prcp == cp)
 315                                 break;
 316                 }
 317         }
 318 
 319         /*
 320          * We now hold the prom_cpu lock.  Increment the hold count by one
 321          * and assert our current state before returning to the caller.
 322          */
 323         atomic_add_32(&prom_holdcnt, 1);
 324         ASSERT(prom_holdcnt >= 1);
 325         prom_thread = curthread;
 326 }
 327 
 328 /*
 329  * Drop the prom lock if it is held by the current CPU.  If the lock is held
 330  * recursively, return without clearing prom_cpu.  If the hold count is now
 331  * zero, clear prom_cpu and cv_signal any waiting CPU.
 332  */
 333 void
 334 kern_postprom(void)
 335 {
 336         processorid_t cpuid = getprocessorid();
 337         cpu_t *cp = cpu[cpuid];
 338 
 339         if (panicstr)
 340                 return; /* do not modify lock further if we have panicked */
 341 
 342         if (prom_cpu != cp)
 343                 panic("kern_postprom: not owner, cp=%p owner=%p",
 344                     (void *)cp, (void *)prom_cpu);
 345 
 346         if (prom_holdcnt == 0)
 347                 panic("kern_postprom: prom_holdcnt == 0, owner=%p",
 348                     (void *)prom_cpu);
 349 
 350         if (atomic_add_32_nv(&prom_holdcnt, -1) != 0)
 351                 return; /* prom lock is held recursively by this CPU */
 352 
 353         if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
 354                 kmdb_enter();
 355 
 356         prom_thread = NULL;
 357         membar_producer();
 358 
 359         prom_cpu = NULL;
 360         membar_producer();
 361 
 362         if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
 363                 mutex_enter(&prom_mutex);
 364                 cv_signal(&prom_cv);
 365                 mutex_exit(&prom_mutex);
 366                 kpreempt_enable();
 367         }
 368 }
 369 
 370 /*
 371  * If the frame buffer device is busy, briefly capture the other CPUs so that
 372  * another CPU executing code to manipulate the device does not execute at the
 373  * same time we are rendering characters.  Refer to the comments and code in
 374  * common/os/console.c for more information on these callbacks.
 375  *
 376  * Notice that we explicitly acquire the PROM lock using kern_preprom() prior
 377  * to idling other CPUs.  The idling mechanism will cross-trap the other CPUs
 378  * and have them spin at MAX(%pil, XCALL_PIL), so we must be sure that none of
 379  * them are holding the PROM lock before we idle them and then call into the
 380  * PROM routines that render characters to the frame buffer.
 381  */
 382 int
 383 console_enter(int busy)
 384 {
 385         int s = 0;
 386 
 387         if (busy && panicstr == NULL) {
 388                 kern_preprom();
 389                 s = splhi();
 390                 idle_other_cpus();
 391         }
 392 
 393         return (s);
 394 }
 395 
 396 void
 397 console_exit(int busy, int spl)
 398 {
 399         if (busy && panicstr == NULL) {
 400                 resume_other_cpus();
 401                 splx(spl);
 402                 kern_postprom();
 403         }
 404 }
 405 
 406 /*
 407  * This routine is a special form of pause_cpus().  It ensures that
 408  * prom functions are callable while the cpus are paused.
 409  */
 410 void
 411 promsafe_pause_cpus(void)
 412 {
 413         pause_cpus(NULL, NULL);
 414 
 415         /* If some other cpu is entering or is in the prom, spin */
 416         while (prom_cpu || mutex_owner(&prom_mutex)) {
 417 
 418                 start_cpus();
 419                 mutex_enter(&prom_mutex);
 420 
 421                 /* Wait for other cpu to exit prom */
 422                 while (prom_cpu)
 423                         cv_wait(&prom_cv, &prom_mutex);
 424 
 425                 mutex_exit(&prom_mutex);
 426                 pause_cpus(NULL, NULL);
 427         }
 428 
 429         /* At this point all cpus are paused and none are in the prom */
 430 }
 431 
 432 /*
 433  * This routine is a special form of xc_attention().  It ensures that
 434  * prom functions are callable while the cpus are at attention.
 435  */
 436 void
 437 promsafe_xc_attention(cpuset_t cpuset)
 438 {
 439         xc_attention(cpuset);
 440 
 441         /* If some other cpu is entering or is in the prom, spin */
 442         while (prom_cpu || mutex_owner(&prom_mutex)) {
 443 
 444                 xc_dismissed(cpuset);
 445                 mutex_enter(&prom_mutex);
 446 
 447                 /* Wait for other cpu to exit prom */
 448                 while (prom_cpu)
 449                         cv_wait(&prom_cv, &prom_mutex);
 450 
 451                 mutex_exit(&prom_mutex);
 452                 xc_attention(cpuset);
 453         }
 454 
 455         /* At this point all cpus are paused and none are in the prom */
 456 }
 457 
 458 
 459 #if defined(PROM_32BIT_ADDRS)
 460 
 461 #include <sys/promimpl.h>
 462 #include <vm/seg_kmem.h>
 463 #include <sys/kmem.h>
 464 #include <sys/bootconf.h>
 465 
 466 /*
 467  * These routines are only used to workaround "poor feature interaction"
 468  * in OBP.  See bug 4115680 for details.
 469  *
 470  * Many of the promif routines need to allocate temporary buffers
 471  * with 32-bit addresses to pass in/out of the CIF.  The lifetime
 472  * of the buffers is extremely short, they are allocated and freed
 473  * around the CIF call.  We use vmem_alloc() to cache 32-bit memory.
 474  *
 475  * Note the code in promplat_free() to prevent exhausting the 32 bit
 476  * heap during boot.
 477  */
 478 static void *promplat_last_free = NULL;
 479 static size_t promplat_last_size;
 480 static vmem_t *promplat_arena;
 481 static kmutex_t promplat_lock;  /* protect arena, last_free, and last_size */
 482 
 483 void *
 484 promplat_alloc(size_t size)
 485 {
 486 
 487         mutex_enter(&promplat_lock);
 488         if (promplat_arena == NULL) {
 489                 promplat_arena = vmem_create("promplat", NULL, 0, 8,
 490                     segkmem_alloc, segkmem_free, heap32_arena, 0, VM_SLEEP);
 491         }
 492         mutex_exit(&promplat_lock);
 493 
 494         return (vmem_alloc(promplat_arena, size, VM_NOSLEEP));
 495 }
 496 
 497 /*
 498  * Delaying the free() of small allocations gets more mileage
 499  * from pages during boot, otherwise a cycle of allocate/free
 500  * calls could burn through available heap32 space too quickly.
 501  */
 502 void
 503 promplat_free(void *p, size_t size)
 504 {
 505         void *p2 = NULL;
 506         size_t s2;
 507 
 508         /*
 509          * If VM is initialized, clean up any delayed free().
 510          */
 511         if (kvseg.s_base != 0 && promplat_last_free != NULL) {
 512                 mutex_enter(&promplat_lock);
 513                 p2 = promplat_last_free;
 514                 s2 = promplat_last_size;
 515                 promplat_last_free = NULL;
 516                 promplat_last_size = 0;
 517                 mutex_exit(&promplat_lock);
 518                 if (p2 != NULL) {
 519                         vmem_free(promplat_arena, p2, s2);
 520                         p2 = NULL;
 521                 }
 522         }
 523 
 524         /*
 525          * Do the free if VM is initialized or it's a large allocation.
 526          */
 527         if (kvseg.s_base != 0 || size >= PAGESIZE) {
 528                 vmem_free(promplat_arena, p, size);
 529                 return;
 530         }
 531 
 532         /*
 533          * Otherwise, do the last free request and delay this one.
 534          */
 535         mutex_enter(&promplat_lock);
 536         if (promplat_last_free != NULL) {
 537                 p2 = promplat_last_free;
 538                 s2 = promplat_last_size;
 539         }
 540         promplat_last_free = p;
 541         promplat_last_size = size;
 542         mutex_exit(&promplat_lock);
 543 
 544         if (p2 != NULL)
 545                 vmem_free(promplat_arena, p2, s2);
 546 }
 547 
 548 void
 549 promplat_bcopy(const void *src, void *dst, size_t count)
 550 {
 551         bcopy(src, dst, count);
 552 }
 553 
 554 #endif /* PROM_32BIT_ADDRS */
 555 
 556 static prom_generation_cookie_t prom_tree_gen;
 557 static krwlock_t prom_tree_lock;
 558 
 559 int
 560 prom_tree_access(int (*callback)(void *arg, int has_changed), void *arg,
 561     prom_generation_cookie_t *ckp)
 562 {
 563         int chg, rv;
 564 
 565         rw_enter(&prom_tree_lock, RW_READER);
 566         /*
 567          * If the tree has changed since the caller last accessed it
 568          * pass 1 as the second argument to the callback function,
 569          * otherwise 0.
 570          */
 571         if (ckp != NULL && *ckp != prom_tree_gen) {
 572                 *ckp = prom_tree_gen;
 573                 chg = 1;
 574         } else
 575                 chg = 0;
 576         rv = callback(arg, chg);
 577         rw_exit(&prom_tree_lock);
 578         return (rv);
 579 }
 580 
 581 int
 582 prom_tree_update(int (*callback)(void *arg), void *arg)
 583 {
 584         int rv;
 585 
 586         rw_enter(&prom_tree_lock, RW_WRITER);
 587         prom_tree_gen++;
 588         rv = callback(arg);
 589         rw_exit(&prom_tree_lock);
 590         return (rv);
 591 }