Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


 921 {
 922         kcpc_ctx_t      *ctx;
 923         kthread_t       *t = curthread;
 924         int             i;
 925 
 926         /*
 927          * On both x86 and UltraSPARC, we may deliver the high-level
 928          * interrupt in kernel mode, just after we've started to run an
 929          * interrupt thread.  (That's because the hardware helpfully
 930          * delivers the overflow interrupt some random number of cycles
 931          * after the instruction that caused the overflow by which time
 932          * we're in some part of the kernel, not necessarily running on
 933          * the right thread).
 934          *
 935          * Check for this case here -- find the pinned thread
 936          * that was running when the interrupt went off.
 937          */
 938         if (t->t_flag & T_INTR_THREAD) {
 939                 klwp_t *lwp;
 940 
 941                 atomic_add_32(&kcpc_intrctx_count, 1);
 942 
 943                 /*
 944                  * Note that t_lwp is always set to point at the underlying
 945                  * thread, thus this will work in the presence of nested
 946                  * interrupts.
 947                  */
 948                 ctx = NULL;
 949                 if ((lwp = t->t_lwp) != NULL) {
 950                         t = lwptot(lwp);
 951                         ctx = t->t_cpc_ctx;
 952                 }
 953         } else
 954                 ctx = t->t_cpc_ctx;
 955 
 956         if (ctx == NULL) {
 957                 /*
 958                  * This can easily happen if we're using the counters in
 959                  * "shared" mode, for example, and an overflow interrupt
 960                  * occurs while we are running cpustat.  In that case, the
 961                  * bound thread that has the context that belongs to this


 991                  * provider issues a xcall to the remote CPU before it tears
 992                  * down that CPUs context. As high priority xcalls, on an x86
 993                  * architecture, execute at a higher PIL than this handler, it
 994                  * is possible (though extremely unlikely) that the xcall could
 995                  * interrupt the overflow handler before the handler has
 996                  * checked the 'dtrace_cpc_in_use' variable, stop the counters,
 997                  * return to the cpc provider which could then rip down
 998                  * contexts and unset 'dtrace_cpc_in_use' *before* the CPUs
 999                  * overflow handler has had a chance to check the variable. In
1000                  * that case, the handler would direct the overflow into this
1001                  * code and no valid context will be found. The default behavior
1002                  * when no valid context is found is now to shout a warning to
1003                  * the console and bump the 'kcpc_nullctx_count' variable.
1004                  */
1005                 if (kcpc_nullctx_panic)
1006                         panic("null cpc context, thread %p", (void *)t);
1007 #ifdef DEBUG
1008                 cmn_err(CE_NOTE,
1009                     "null cpc context found in overflow handler!\n");
1010 #endif
1011                 atomic_add_32(&kcpc_nullctx_count, 1);
1012         } else if ((ctx->kc_flags & KCPC_CTX_INVALID) == 0) {
1013                 /*
1014                  * Schedule an ast to sample the counters, which will
1015                  * propagate any overflow into the virtualized performance
1016                  * counter(s), and may deliver a signal.
1017                  */
1018                 ttolwp(t)->lwp_pcb.pcb_flags |= CPC_OVERFLOW;
1019                 /*
1020                  * If a counter has overflowed which was counting on behalf of
1021                  * a request which specified CPC_OVF_NOTIFY_EMT, send the
1022                  * process a signal.
1023                  */
1024                 for (i = 0; i < cpc_ncounters; i++) {
1025                         if (ctx->kc_pics[i].kp_req != NULL &&
1026                             bitmap & (1 << i) &&
1027                             ctx->kc_pics[i].kp_req->kr_flags &
1028                             CPC_OVF_NOTIFY_EMT) {
1029                                 /*
1030                                  * A signal has been requested for this PIC, so
1031                                  * so freeze the context. The interrupt handler




 921 {
 922         kcpc_ctx_t      *ctx;
 923         kthread_t       *t = curthread;
 924         int             i;
 925 
 926         /*
 927          * On both x86 and UltraSPARC, we may deliver the high-level
 928          * interrupt in kernel mode, just after we've started to run an
 929          * interrupt thread.  (That's because the hardware helpfully
 930          * delivers the overflow interrupt some random number of cycles
 931          * after the instruction that caused the overflow by which time
 932          * we're in some part of the kernel, not necessarily running on
 933          * the right thread).
 934          *
 935          * Check for this case here -- find the pinned thread
 936          * that was running when the interrupt went off.
 937          */
 938         if (t->t_flag & T_INTR_THREAD) {
 939                 klwp_t *lwp;
 940 
 941                 atomic_inc_32(&kcpc_intrctx_count);
 942 
 943                 /*
 944                  * Note that t_lwp is always set to point at the underlying
 945                  * thread, thus this will work in the presence of nested
 946                  * interrupts.
 947                  */
 948                 ctx = NULL;
 949                 if ((lwp = t->t_lwp) != NULL) {
 950                         t = lwptot(lwp);
 951                         ctx = t->t_cpc_ctx;
 952                 }
 953         } else
 954                 ctx = t->t_cpc_ctx;
 955 
 956         if (ctx == NULL) {
 957                 /*
 958                  * This can easily happen if we're using the counters in
 959                  * "shared" mode, for example, and an overflow interrupt
 960                  * occurs while we are running cpustat.  In that case, the
 961                  * bound thread that has the context that belongs to this


 991                  * provider issues a xcall to the remote CPU before it tears
 992                  * down that CPUs context. As high priority xcalls, on an x86
 993                  * architecture, execute at a higher PIL than this handler, it
 994                  * is possible (though extremely unlikely) that the xcall could
 995                  * interrupt the overflow handler before the handler has
 996                  * checked the 'dtrace_cpc_in_use' variable, stop the counters,
 997                  * return to the cpc provider which could then rip down
 998                  * contexts and unset 'dtrace_cpc_in_use' *before* the CPUs
 999                  * overflow handler has had a chance to check the variable. In
1000                  * that case, the handler would direct the overflow into this
1001                  * code and no valid context will be found. The default behavior
1002                  * when no valid context is found is now to shout a warning to
1003                  * the console and bump the 'kcpc_nullctx_count' variable.
1004                  */
1005                 if (kcpc_nullctx_panic)
1006                         panic("null cpc context, thread %p", (void *)t);
1007 #ifdef DEBUG
1008                 cmn_err(CE_NOTE,
1009                     "null cpc context found in overflow handler!\n");
1010 #endif
1011                 atomic_inc_32(&kcpc_nullctx_count);
1012         } else if ((ctx->kc_flags & KCPC_CTX_INVALID) == 0) {
1013                 /*
1014                  * Schedule an ast to sample the counters, which will
1015                  * propagate any overflow into the virtualized performance
1016                  * counter(s), and may deliver a signal.
1017                  */
1018                 ttolwp(t)->lwp_pcb.pcb_flags |= CPC_OVERFLOW;
1019                 /*
1020                  * If a counter has overflowed which was counting on behalf of
1021                  * a request which specified CPC_OVF_NOTIFY_EMT, send the
1022                  * process a signal.
1023                  */
1024                 for (i = 0; i < cpc_ncounters; i++) {
1025                         if (ctx->kc_pics[i].kp_req != NULL &&
1026                             bitmap & (1 << i) &&
1027                             ctx->kc_pics[i].kp_req->kr_flags &
1028                             CPC_OVF_NOTIFY_EMT) {
1029                                 /*
1030                                  * A signal has been requested for this PIC, so
1031                                  * so freeze the context. The interrupt handler