Print this page
5045 use atomic_{inc,dec}_* instead of atomic_add_*


 362                 kmem_free(chp->ch_name, chp->ch_namelen);
 363                 kmem_free(chp, sizeof (evch_chan_t));
 364         }
 365 
 366         mutex_exit(&eg->evch_list_lock);
 367         /* all channels should now be gone */
 368         ASSERT(evch_dl_getnum(&eg->evch_list) == 0);
 369         kmem_free(eg, sizeof (*eg));
 370 }
 371 
 372 /*
 373  * Frees evch_gevent_t structure including the payload, if the reference count
 374  * drops to or below zero. Below zero happens when the event is freed
 375  * without beeing queued into a queue.
 376  */
 377 static void
 378 evch_gevent_free(evch_gevent_t *evp)
 379 {
 380         int32_t refcnt;
 381 
 382         refcnt = (int32_t)atomic_add_32_nv(&evp->ge_refcount, -1);
 383         if (refcnt <= 0) {
 384                 if (evp->ge_destruct != NULL) {
 385                         evp->ge_destruct((void *)&(evp->ge_payload),
 386                             evp->ge_dstcookie);
 387                 }
 388                 kmem_free(evp, evp->ge_size);
 389         }
 390 }
 391 
 392 /*
 393  * Deliver is called for every subscription to the current event
 394  * It calls the registered filter function and then the registered delivery
 395  * callback routine. Returns 0 on success. The callback routine returns
 396  * EVQ_AGAIN or EVQ_SLEEP in case the event could not be delivered.
 397  */
 398 static int
 399 evch_deliver(evch_evqsub_t *sp, evch_gevent_t *ep)
 400 {
 401         void            *uep = &ep->ge_payload;
 402         int             res = EVQ_DELIVER;


 630  */
 631 static int
 632 evch_evq_pub(evch_eventq_t *eqp, void *ev, int flags)
 633 {
 634         size_t size;
 635         evch_qelem_t    *qep;
 636         evch_gevent_t   *evp = GEVENT(ev);
 637 
 638         size = sizeof (evch_qelem_t);
 639         if (flags & EVCH_TRYHARD) {
 640                 qep = kmem_alloc_tryhard(size, &size, KM_NOSLEEP);
 641         } else {
 642                 qep = kmem_alloc(size, flags & EVCH_NOSLEEP ?
 643                     KM_NOSLEEP : KM_SLEEP);
 644         }
 645         if (qep == NULL) {
 646                 return (-1);
 647         }
 648         qep->q_objref = (void *)evp;
 649         qep->q_objsize = size;
 650         atomic_add_32(&evp->ge_refcount, 1);
 651         mutex_enter(&eqp->eq_queuemx);
 652         evch_q_in(&eqp->eq_eventq, qep);
 653 
 654         /* Wakeup delivery thread */
 655         cv_signal(&eqp->eq_thrsleepcv);
 656         mutex_exit(&eqp->eq_queuemx);
 657         return (0);
 658 }
 659 
 660 /*
 661  * Enter hold mode of an event queue. Event delivery thread stops event
 662  * handling after delivery of current event (if any).
 663  */
 664 static void
 665 evch_evq_stop(evch_eventq_t *eqp)
 666 {
 667         mutex_enter(&eqp->eq_queuemx);
 668         eqp->eq_holdmode = 1;
 669         if (evq_initcomplete) {
 670                 cv_signal(&eqp->eq_thrsleepcv);




 362                 kmem_free(chp->ch_name, chp->ch_namelen);
 363                 kmem_free(chp, sizeof (evch_chan_t));
 364         }
 365 
 366         mutex_exit(&eg->evch_list_lock);
 367         /* all channels should now be gone */
 368         ASSERT(evch_dl_getnum(&eg->evch_list) == 0);
 369         kmem_free(eg, sizeof (*eg));
 370 }
 371 
 372 /*
 373  * Frees evch_gevent_t structure including the payload, if the reference count
 374  * drops to or below zero. Below zero happens when the event is freed
 375  * without beeing queued into a queue.
 376  */
 377 static void
 378 evch_gevent_free(evch_gevent_t *evp)
 379 {
 380         int32_t refcnt;
 381 
 382         refcnt = (int32_t)atomic_dec_32_nv(&evp->ge_refcount);
 383         if (refcnt <= 0) {
 384                 if (evp->ge_destruct != NULL) {
 385                         evp->ge_destruct((void *)&(evp->ge_payload),
 386                             evp->ge_dstcookie);
 387                 }
 388                 kmem_free(evp, evp->ge_size);
 389         }
 390 }
 391 
 392 /*
 393  * Deliver is called for every subscription to the current event
 394  * It calls the registered filter function and then the registered delivery
 395  * callback routine. Returns 0 on success. The callback routine returns
 396  * EVQ_AGAIN or EVQ_SLEEP in case the event could not be delivered.
 397  */
 398 static int
 399 evch_deliver(evch_evqsub_t *sp, evch_gevent_t *ep)
 400 {
 401         void            *uep = &ep->ge_payload;
 402         int             res = EVQ_DELIVER;


 630  */
 631 static int
 632 evch_evq_pub(evch_eventq_t *eqp, void *ev, int flags)
 633 {
 634         size_t size;
 635         evch_qelem_t    *qep;
 636         evch_gevent_t   *evp = GEVENT(ev);
 637 
 638         size = sizeof (evch_qelem_t);
 639         if (flags & EVCH_TRYHARD) {
 640                 qep = kmem_alloc_tryhard(size, &size, KM_NOSLEEP);
 641         } else {
 642                 qep = kmem_alloc(size, flags & EVCH_NOSLEEP ?
 643                     KM_NOSLEEP : KM_SLEEP);
 644         }
 645         if (qep == NULL) {
 646                 return (-1);
 647         }
 648         qep->q_objref = (void *)evp;
 649         qep->q_objsize = size;
 650         atomic_inc_32(&evp->ge_refcount);
 651         mutex_enter(&eqp->eq_queuemx);
 652         evch_q_in(&eqp->eq_eventq, qep);
 653 
 654         /* Wakeup delivery thread */
 655         cv_signal(&eqp->eq_thrsleepcv);
 656         mutex_exit(&eqp->eq_queuemx);
 657         return (0);
 658 }
 659 
 660 /*
 661  * Enter hold mode of an event queue. Event delivery thread stops event
 662  * handling after delivery of current event (if any).
 663  */
 664 static void
 665 evch_evq_stop(evch_eventq_t *eqp)
 666 {
 667         mutex_enter(&eqp->eq_queuemx);
 668         eqp->eq_holdmode = 1;
 669         if (evq_initcomplete) {
 670                 cv_signal(&eqp->eq_thrsleepcv);