Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


1341                 if (pg->cmt_children != NULL &&
1342                     GROUP_SIZE(pg->cmt_children) == 1) {
1343                         child = GROUP_ACCESS(pg->cmt_children, 0);
1344                         if ((child->cmt_policy & CMT_BALANCE) == 0) {
1345                                 cmt_hier_promote(child, NULL);
1346                         }
1347                 }
1348                 pg->cmt_policy = CMT_BALANCE;
1349         }
1350         return (0);
1351 }
1352 
1353 /* ARGSUSED */
1354 static void
1355 cmt_ev_thread_swtch(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old,
1356                     kthread_t *new)
1357 {
1358         pg_cmt_t        *cmt_pg = (pg_cmt_t *)pg;
1359 
1360         if (old == cp->cpu_idle_thread) {
1361                 atomic_add_32(&cmt_pg->cmt_utilization, 1);
1362         } else if (new == cp->cpu_idle_thread) {
1363                 atomic_add_32(&cmt_pg->cmt_utilization, -1);
1364         }
1365 }
1366 
1367 /*
1368  * Macro to test whether a thread is currently runnable on a CPU in a PG.
1369  */
1370 #define THREAD_RUNNABLE_IN_PG(t, pg)                                    \
1371         ((t)->t_state == TS_RUN &&                                   \
1372             (t)->t_disp_queue->disp_cpu &&                                \
1373             bitset_in_set(&(pg)->cmt_cpus_actv_set,                      \
1374             (t)->t_disp_queue->disp_cpu->cpu_seqid))
1375 
1376 static void
1377 cmt_ev_thread_swtch_pwr(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old,
1378     kthread_t *new)
1379 {
1380         pg_cmt_t        *cmt = (pg_cmt_t *)pg;
1381         cpupm_domain_t  *dom;
1382         uint32_t        u;
1383 
1384         if (old == cp->cpu_idle_thread) {
1385                 ASSERT(new != cp->cpu_idle_thread);
1386                 u = atomic_add_32_nv(&cmt->cmt_utilization, 1);
1387                 if (u == 1) {
1388                         /*
1389                          * Notify the CPU power manager that the domain
1390                          * is non-idle.
1391                          */
1392                         dom = (cpupm_domain_t *)cmt->cmt_pg.pghw_handle;
1393                         cpupm_utilization_event(cp, now, dom,
1394                             CPUPM_DOM_BUSY_FROM_IDLE);
1395                 }
1396         } else if (new == cp->cpu_idle_thread) {
1397                 ASSERT(old != cp->cpu_idle_thread);
1398                 u = atomic_add_32_nv(&cmt->cmt_utilization, -1);
1399                 if (u == 0) {
1400                         /*
1401                          * The domain is idle, notify the CPU power
1402                          * manager.
1403                          *
1404                          * Avoid notifying if the thread is simply migrating
1405                          * between CPUs in the domain.
1406                          */
1407                         if (!THREAD_RUNNABLE_IN_PG(old, cmt)) {
1408                                 dom = (cpupm_domain_t *)cmt->cmt_pg.pghw_handle;
1409                                 cpupm_utilization_event(cp, now, dom,
1410                                     CPUPM_DOM_IDLE_FROM_BUSY);
1411                         }
1412                 }
1413         }
1414 }
1415 
1416 /* ARGSUSED */
1417 static void
1418 cmt_ev_thread_remain_pwr(pg_t *pg, cpu_t *cp, kthread_t *t)




1341                 if (pg->cmt_children != NULL &&
1342                     GROUP_SIZE(pg->cmt_children) == 1) {
1343                         child = GROUP_ACCESS(pg->cmt_children, 0);
1344                         if ((child->cmt_policy & CMT_BALANCE) == 0) {
1345                                 cmt_hier_promote(child, NULL);
1346                         }
1347                 }
1348                 pg->cmt_policy = CMT_BALANCE;
1349         }
1350         return (0);
1351 }
1352 
1353 /* ARGSUSED */
1354 static void
1355 cmt_ev_thread_swtch(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old,
1356                     kthread_t *new)
1357 {
1358         pg_cmt_t        *cmt_pg = (pg_cmt_t *)pg;
1359 
1360         if (old == cp->cpu_idle_thread) {
1361                 atomic_inc_32(&cmt_pg->cmt_utilization);
1362         } else if (new == cp->cpu_idle_thread) {
1363                 atomic_dec_32(&cmt_pg->cmt_utilization);
1364         }
1365 }
1366 
1367 /*
1368  * Macro to test whether a thread is currently runnable on a CPU in a PG.
1369  */
1370 #define THREAD_RUNNABLE_IN_PG(t, pg)                                    \
1371         ((t)->t_state == TS_RUN &&                                   \
1372             (t)->t_disp_queue->disp_cpu &&                                \
1373             bitset_in_set(&(pg)->cmt_cpus_actv_set,                      \
1374             (t)->t_disp_queue->disp_cpu->cpu_seqid))
1375 
1376 static void
1377 cmt_ev_thread_swtch_pwr(pg_t *pg, cpu_t *cp, hrtime_t now, kthread_t *old,
1378     kthread_t *new)
1379 {
1380         pg_cmt_t        *cmt = (pg_cmt_t *)pg;
1381         cpupm_domain_t  *dom;
1382         uint32_t        u;
1383 
1384         if (old == cp->cpu_idle_thread) {
1385                 ASSERT(new != cp->cpu_idle_thread);
1386                 u = atomic_inc_32_nv(&cmt->cmt_utilization);
1387                 if (u == 1) {
1388                         /*
1389                          * Notify the CPU power manager that the domain
1390                          * is non-idle.
1391                          */
1392                         dom = (cpupm_domain_t *)cmt->cmt_pg.pghw_handle;
1393                         cpupm_utilization_event(cp, now, dom,
1394                             CPUPM_DOM_BUSY_FROM_IDLE);
1395                 }
1396         } else if (new == cp->cpu_idle_thread) {
1397                 ASSERT(old != cp->cpu_idle_thread);
1398                 u = atomic_dec_32_nv(&cmt->cmt_utilization);
1399                 if (u == 0) {
1400                         /*
1401                          * The domain is idle, notify the CPU power
1402                          * manager.
1403                          *
1404                          * Avoid notifying if the thread is simply migrating
1405                          * between CPUs in the domain.
1406                          */
1407                         if (!THREAD_RUNNABLE_IN_PG(old, cmt)) {
1408                                 dom = (cpupm_domain_t *)cmt->cmt_pg.pghw_handle;
1409                                 cpupm_utilization_event(cp, now, dom,
1410                                     CPUPM_DOM_IDLE_FROM_BUSY);
1411                         }
1412                 }
1413         }
1414 }
1415 
1416 /* ARGSUSED */
1417 static void
1418 cmt_ev_thread_remain_pwr(pg_t *pg, cpu_t *cp, kthread_t *t)