Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


 301                                         cv_wait(&prom_cv, &prom_mutex);
 302                         }
 303                         mutex_exit(&prom_mutex);
 304 
 305                 } else {
 306                         /*
 307                          * If we are not yet mutex_ready, just attempt to grab
 308                          * the lock.  If we get it or already hold it, break.
 309                          */
 310                         ASSERT(getpil() == PIL_MAX);
 311                         prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
 312                         if (prcp == NULL || prcp == cp)
 313                                 break;
 314                 }
 315         }
 316 
 317         /*
 318          * We now hold the prom_cpu lock.  Increment the hold count by one
 319          * and assert our current state before returning to the caller.
 320          */
 321         atomic_add_32(&prom_holdcnt, 1);
 322         ASSERT(prom_holdcnt >= 1);
 323         prom_thread = curthread;
 324 }
 325 
 326 /*
 327  * Drop the prom lock if it is held by the current CPU.  If the lock is held
 328  * recursively, return without clearing prom_cpu.  If the hold count is now
 329  * zero, clear prom_cpu and cv_signal any waiting CPU.
 330  */
 331 void
 332 kern_postprom(void)
 333 {
 334         processorid_t cpuid = getprocessorid();
 335         cpu_t *cp = cpu[cpuid];
 336 
 337         if (panicstr)
 338                 return; /* do not modify lock further if we have panicked */
 339 
 340         if (prom_cpu != cp)
 341                 panic("kern_postprom: not owner, cp=%p owner=%p",
 342                     (void *)cp, (void *)prom_cpu);
 343 
 344         if (prom_holdcnt == 0)
 345                 panic("kern_postprom: prom_holdcnt == 0, owner=%p",
 346                     (void *)prom_cpu);
 347 
 348         if (atomic_add_32_nv(&prom_holdcnt, -1) != 0)
 349                 return; /* prom lock is held recursively by this CPU */
 350 
 351         if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
 352                 kmdb_enter();
 353 
 354         prom_thread = NULL;
 355         membar_producer();
 356 
 357         prom_cpu = NULL;
 358         membar_producer();
 359 
 360         if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
 361                 mutex_enter(&prom_mutex);
 362                 cv_signal(&prom_cv);
 363                 mutex_exit(&prom_mutex);
 364                 kpreempt_enable();
 365         }
 366 }
 367 
 368 /*




 301                                         cv_wait(&prom_cv, &prom_mutex);
 302                         }
 303                         mutex_exit(&prom_mutex);
 304 
 305                 } else {
 306                         /*
 307                          * If we are not yet mutex_ready, just attempt to grab
 308                          * the lock.  If we get it or already hold it, break.
 309                          */
 310                         ASSERT(getpil() == PIL_MAX);
 311                         prcp = atomic_cas_ptr((void *)&prom_cpu, NULL, cp);
 312                         if (prcp == NULL || prcp == cp)
 313                                 break;
 314                 }
 315         }
 316 
 317         /*
 318          * We now hold the prom_cpu lock.  Increment the hold count by one
 319          * and assert our current state before returning to the caller.
 320          */
 321         atomic_inc_32(&prom_holdcnt);
 322         ASSERT(prom_holdcnt >= 1);
 323         prom_thread = curthread;
 324 }
 325 
 326 /*
 327  * Drop the prom lock if it is held by the current CPU.  If the lock is held
 328  * recursively, return without clearing prom_cpu.  If the hold count is now
 329  * zero, clear prom_cpu and cv_signal any waiting CPU.
 330  */
 331 void
 332 kern_postprom(void)
 333 {
 334         processorid_t cpuid = getprocessorid();
 335         cpu_t *cp = cpu[cpuid];
 336 
 337         if (panicstr)
 338                 return; /* do not modify lock further if we have panicked */
 339 
 340         if (prom_cpu != cp)
 341                 panic("kern_postprom: not owner, cp=%p owner=%p",
 342                     (void *)cp, (void *)prom_cpu);
 343 
 344         if (prom_holdcnt == 0)
 345                 panic("kern_postprom: prom_holdcnt == 0, owner=%p",
 346                     (void *)prom_cpu);
 347 
 348         if (atomic_dec_32_nv(&prom_holdcnt) != 0)
 349                 return; /* prom lock is held recursively by this CPU */
 350 
 351         if ((boothowto & RB_DEBUG) && prom_exit_enter_debugger)
 352                 kmdb_enter();
 353 
 354         prom_thread = NULL;
 355         membar_producer();
 356 
 357         prom_cpu = NULL;
 358         membar_producer();
 359 
 360         if (CPU_IN_SET(cpu_ready_set, cpuid) && cp->cpu_m.mutex_ready) {
 361                 mutex_enter(&prom_mutex);
 362                 cv_signal(&prom_cv);
 363                 mutex_exit(&prom_mutex);
 364                 kpreempt_enable();
 365         }
 366 }
 367 
 368 /*