Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/disp/cmt.c
          +++ new/usr/src/uts/common/disp/cmt.c
↓ open down ↓ 1350 lines elided ↑ open up ↑
1351 1351  }
1352 1352  
1353 1353  /* ARGSUSED */
1354 1354  static void
1355 1355  cmt_ev_thread_swtch(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old,
1356 1356                      kthread_t *new)
1357 1357  {
1358 1358          pg_cmt_t        *cmt_pg = (pg_cmt_t *)pg;
1359 1359  
1360 1360          if (old == cp->cpu_idle_thread) {
1361      -                atomic_add_32(&cmt_pg->cmt_utilization, 1);
     1361 +                atomic_inc_32(&cmt_pg->cmt_utilization);
1362 1362          } else if (new == cp->cpu_idle_thread) {
1363      -                atomic_add_32(&cmt_pg->cmt_utilization, -1);
     1363 +                atomic_dec_32(&cmt_pg->cmt_utilization);
1364 1364          }
1365 1365  }
1366 1366  
1367 1367  /*
1368 1368   * Macro to test whether a thread is currently runnable on a CPU in a PG.
1369 1369   */
1370 1370  #define THREAD_RUNNABLE_IN_PG(t, pg)                                    \
1371 1371          ((t)->t_state == TS_RUN &&                                      \
1372 1372              (t)->t_disp_queue->disp_cpu &&                              \
1373 1373              bitset_in_set(&(pg)->cmt_cpus_actv_set,                     \
↓ open down ↓ 2 lines elided ↑ open up ↑
1376 1376  static void
1377 1377  cmt_ev_thread_swtch_pwr(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old,
1378 1378      kthread_t *new)
1379 1379  {
1380 1380          pg_cmt_t        *cmt = (pg_cmt_t *)pg;
1381 1381          cpupm_domain_t  *dom;
1382 1382          uint32_t        u;
1383 1383  
1384 1384          if (old == cp->cpu_idle_thread) {
1385 1385                  ASSERT(new != cp->cpu_idle_thread);
1386      -                u = atomic_add_32_nv(&cmt->cmt_utilization, 1);
     1386 +                u = atomic_inc_32_nv(&cmt->cmt_utilization);
1387 1387                  if (u == 1) {
1388 1388                          /*
1389 1389                           * Notify the CPU power manager that the domain
1390 1390                           * is non-idle.
1391 1391                           */
1392 1392                          dom = (cpupm_domain_t *)cmt->cmt_pg.pghw_handle;
1393 1393                          cpupm_utilization_event(cp, now, dom,
1394 1394                              CPUPM_DOM_BUSY_FROM_IDLE);
1395 1395                  }
1396 1396          } else if (new == cp->cpu_idle_thread) {
1397 1397                  ASSERT(old != cp->cpu_idle_thread);
1398      -                u = atomic_add_32_nv(&cmt->cmt_utilization, -1);
     1398 +                u = atomic_dec_32_nv(&cmt->cmt_utilization);
1399 1399                  if (u == 0) {
1400 1400                          /*
1401 1401                           * The domain is idle, notify the CPU power
1402 1402                           * manager.
1403 1403                           *
1404 1404                           * Avoid notifying if the thread is simply migrating
1405 1405                           * between CPUs in the domain.
1406 1406                           */
1407 1407                          if (!THREAD_RUNNABLE_IN_PG(old, cmt)) {
1408 1408                                  dom = (cpupm_domain_t *)cmt->cmt_pg.pghw_handle;
↓ open down ↓ 557 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX