Print this page
XXXX introduce drv_sectohz


 548 /* XXX */
 549 void
 550 rdsv3_flush_workqueue(rdsv3_workqueue_struct_t *wq)
 551 {
 552         RDSV3_DPRINTF4("rdsv3_flush_workqueue", "Enter(wq: %p)", wq);
 553 
 554         mutex_enter(&wq->wq_lock);
 555         switch (wq->wq_state) {
 556         case RDSV3_WQ_THREAD_IDLE:
 557                 /* nothing to do */
 558                 ASSERT(list_is_empty(&wq->wq_queue));
 559                 break;
 560 
 561         case RDSV3_WQ_THREAD_RUNNING:
 562                 wq->wq_state = RDSV3_WQ_THREAD_FLUSHING;
 563                 /* FALLTHRU */
 564         case RDSV3_WQ_THREAD_FLUSHING:
 565                 /* already flushing, wait until the flushing is complete */
 566                 do {
 567                         mutex_exit(&wq->wq_lock);
 568                         delay(drv_usectohz(1000000));
 569                         mutex_enter(&wq->wq_lock);
 570                 } while (wq->wq_state == RDSV3_WQ_THREAD_FLUSHING);
 571                 break;
 572         case RDSV3_WQ_THREAD_EXITING:
 573                 mutex_exit(&wq->wq_lock);
 574                 rdsv3_worker_thread(wq);
 575                 return;
 576         }
 577         mutex_exit(&wq->wq_lock);
 578 
 579         RDSV3_DPRINTF4("rdsv3_flush_workqueue", "Return(wq: %p)", wq);
 580 }
 581 
 582 void
 583 rdsv3_queue_work(rdsv3_workqueue_struct_t *wq, rdsv3_work_t *wp)
 584 {
 585         RDSV3_DPRINTF4("rdsv3_queue_work", "Enter(wq: %p, wp: %p)", wq, wp);
 586 
 587         mutex_enter(&wq->wq_lock);
 588 
 589         if (list_link_active(&wp->work_item)) {
 590                 /* This is already in the queue, ignore this call */
 591                 mutex_exit(&wq->wq_lock);
 592                 RDSV3_DPRINTF3("rdsv3_queue_work", "already queued: %p", wp);
 593                 return;
 594         }
 595 
 596         switch (wq->wq_state) {
 597         case RDSV3_WQ_THREAD_RUNNING:
 598                 list_insert_tail(&wq->wq_queue, wp);
 599                 mutex_exit(&wq->wq_lock);
 600                 break;
 601 
 602         case RDSV3_WQ_THREAD_FLUSHING:
 603                 do {
 604                         mutex_exit(&wq->wq_lock);
 605                         delay(drv_usectohz(1000000));
 606                         mutex_enter(&wq->wq_lock);
 607                 } while (wq->wq_state == RDSV3_WQ_THREAD_FLUSHING);
 608 
 609                 if (wq->wq_state == RDSV3_WQ_THREAD_RUNNING) {
 610                         list_insert_tail(&wq->wq_queue, wp);
 611                         mutex_exit(&wq->wq_lock);
 612                         break;
 613                 }
 614                 /* FALLTHRU */
 615 
 616         case RDSV3_WQ_THREAD_IDLE:
 617                 list_insert_tail(&wq->wq_queue, wp);
 618                 wq->wq_state = RDSV3_WQ_THREAD_RUNNING;
 619                 mutex_exit(&wq->wq_lock);
 620 
 621                 (void) ddi_taskq_dispatch(rdsv3_taskq, rdsv3_worker_thread, wq,
 622                     DDI_SLEEP);
 623                 break;
 624 
 625         case RDSV3_WQ_THREAD_EXITING:


 719         mutex_enter(&dwp->wq->wq_lock);
 720         dwp->wq->wq_pending--;
 721         mutex_exit(&dwp->wq->wq_lock);
 722 
 723         RDSV3_DPRINTF4("rdsv3_cancel_delayed_work",
 724             "Return(wq: %p, dwp: %p)", dwp->wq, dwp);
 725 }
 726 
 727 void
 728 rdsv3_destroy_task_workqueue(rdsv3_workqueue_struct_t *wq)
 729 {
 730         RDSV3_DPRINTF2("rdsv3_destroy_workqueue", "Enter");
 731 
 732         ASSERT(wq);
 733 
 734         mutex_enter(&wq->wq_lock);
 735         wq->wq_state = RDSV3_WQ_THREAD_EXITING;
 736 
 737         while (wq->wq_pending > 0) {
 738                 mutex_exit(&wq->wq_lock);
 739                 delay(drv_usectohz(1000000));
 740                 mutex_enter(&wq->wq_lock);
 741         };
 742         mutex_exit(&wq->wq_lock);
 743 
 744         rdsv3_flush_workqueue(wq);
 745 
 746         list_destroy(&wq->wq_queue);
 747         mutex_destroy(&wq->wq_lock);
 748         kmem_free(wq, sizeof (rdsv3_workqueue_struct_t));
 749 
 750         ASSERT(rdsv3_taskq);
 751         ddi_taskq_destroy(rdsv3_taskq);
 752 
 753         wq = NULL;
 754         rdsv3_taskq = NULL;
 755 
 756         RDSV3_DPRINTF2("rdsv3_destroy_workqueue", "Return");
 757 }
 758 
 759 /* ARGSUSED */


 775         rdsv3_taskq = ddi_taskq_create(rdsv3_dev_info, name,
 776             RDSV3_NUM_TASKQ_THREADS, TASKQ_DEFAULTPRI, 0);
 777         if (rdsv3_taskq == NULL) {
 778                 RDSV3_DPRINTF2(__FILE__,
 779                     "ddi_taskq_create failed for rdsv3_taskq");
 780                 return (NULL);
 781         }
 782 
 783         wq = kmem_zalloc(sizeof (rdsv3_workqueue_struct_t), KM_NOSLEEP);
 784         if (wq == NULL) {
 785                 RDSV3_DPRINTF2(__FILE__, "kmem_zalloc failed for wq");
 786                 ddi_taskq_destroy(rdsv3_taskq);
 787                 return (NULL);
 788         }
 789 
 790         list_create(&wq->wq_queue, sizeof (struct rdsv3_work_s),
 791             offsetof(struct rdsv3_work_s, work_item));
 792         mutex_init(&wq->wq_lock, NULL, MUTEX_DRIVER, NULL);
 793         wq->wq_state = RDSV3_WQ_THREAD_IDLE;
 794         wq->wq_pending = 0;
 795         rdsv3_one_sec_in_hz = drv_usectohz(1000000);
 796 
 797         RDSV3_DPRINTF2("create_singlethread_workqueue", "Return");
 798 
 799         return (wq);
 800 }
 801 
 802 /*
 803  * Implementation for struct sock
 804  */
 805 
 806 void
 807 rdsv3_sock_exit_data(struct rsock *sk)
 808 {
 809         struct rdsv3_sock *rs = sk->sk_protinfo;
 810 
 811         RDSV3_DPRINTF4("rdsv3_sock_exit_data", "rs: %p sk: %p", rs, sk);
 812 
 813         ASSERT(rs != NULL);
 814         ASSERT(rdsv3_sk_sock_flag(sk, SOCK_DEAD));
 815 




 548 /* XXX */
 549 void
 550 rdsv3_flush_workqueue(rdsv3_workqueue_struct_t *wq)
 551 {
 552         RDSV3_DPRINTF4("rdsv3_flush_workqueue", "Enter(wq: %p)", wq);
 553 
 554         mutex_enter(&wq->wq_lock);
 555         switch (wq->wq_state) {
 556         case RDSV3_WQ_THREAD_IDLE:
 557                 /* nothing to do */
 558                 ASSERT(list_is_empty(&wq->wq_queue));
 559                 break;
 560 
 561         case RDSV3_WQ_THREAD_RUNNING:
 562                 wq->wq_state = RDSV3_WQ_THREAD_FLUSHING;
 563                 /* FALLTHRU */
 564         case RDSV3_WQ_THREAD_FLUSHING:
 565                 /* already flushing, wait until the flushing is complete */
 566                 do {
 567                         mutex_exit(&wq->wq_lock);
 568                         delay(drv_sectohz(1));
 569                         mutex_enter(&wq->wq_lock);
 570                 } while (wq->wq_state == RDSV3_WQ_THREAD_FLUSHING);
 571                 break;
 572         case RDSV3_WQ_THREAD_EXITING:
 573                 mutex_exit(&wq->wq_lock);
 574                 rdsv3_worker_thread(wq);
 575                 return;
 576         }
 577         mutex_exit(&wq->wq_lock);
 578 
 579         RDSV3_DPRINTF4("rdsv3_flush_workqueue", "Return(wq: %p)", wq);
 580 }
 581 
 582 void
 583 rdsv3_queue_work(rdsv3_workqueue_struct_t *wq, rdsv3_work_t *wp)
 584 {
 585         RDSV3_DPRINTF4("rdsv3_queue_work", "Enter(wq: %p, wp: %p)", wq, wp);
 586 
 587         mutex_enter(&wq->wq_lock);
 588 
 589         if (list_link_active(&wp->work_item)) {
 590                 /* This is already in the queue, ignore this call */
 591                 mutex_exit(&wq->wq_lock);
 592                 RDSV3_DPRINTF3("rdsv3_queue_work", "already queued: %p", wp);
 593                 return;
 594         }
 595 
 596         switch (wq->wq_state) {
 597         case RDSV3_WQ_THREAD_RUNNING:
 598                 list_insert_tail(&wq->wq_queue, wp);
 599                 mutex_exit(&wq->wq_lock);
 600                 break;
 601 
 602         case RDSV3_WQ_THREAD_FLUSHING:
 603                 do {
 604                         mutex_exit(&wq->wq_lock);
 605                         delay(drv_sectohz(1));
 606                         mutex_enter(&wq->wq_lock);
 607                 } while (wq->wq_state == RDSV3_WQ_THREAD_FLUSHING);
 608 
 609                 if (wq->wq_state == RDSV3_WQ_THREAD_RUNNING) {
 610                         list_insert_tail(&wq->wq_queue, wp);
 611                         mutex_exit(&wq->wq_lock);
 612                         break;
 613                 }
 614                 /* FALLTHRU */
 615 
 616         case RDSV3_WQ_THREAD_IDLE:
 617                 list_insert_tail(&wq->wq_queue, wp);
 618                 wq->wq_state = RDSV3_WQ_THREAD_RUNNING;
 619                 mutex_exit(&wq->wq_lock);
 620 
 621                 (void) ddi_taskq_dispatch(rdsv3_taskq, rdsv3_worker_thread, wq,
 622                     DDI_SLEEP);
 623                 break;
 624 
 625         case RDSV3_WQ_THREAD_EXITING:


 719         mutex_enter(&dwp->wq->wq_lock);
 720         dwp->wq->wq_pending--;
 721         mutex_exit(&dwp->wq->wq_lock);
 722 
 723         RDSV3_DPRINTF4("rdsv3_cancel_delayed_work",
 724             "Return(wq: %p, dwp: %p)", dwp->wq, dwp);
 725 }
 726 
 727 void
 728 rdsv3_destroy_task_workqueue(rdsv3_workqueue_struct_t *wq)
 729 {
 730         RDSV3_DPRINTF2("rdsv3_destroy_workqueue", "Enter");
 731 
 732         ASSERT(wq);
 733 
 734         mutex_enter(&wq->wq_lock);
 735         wq->wq_state = RDSV3_WQ_THREAD_EXITING;
 736 
 737         while (wq->wq_pending > 0) {
 738                 mutex_exit(&wq->wq_lock);
 739                 delay(drv_sectohz(1));
 740                 mutex_enter(&wq->wq_lock);
 741         };
 742         mutex_exit(&wq->wq_lock);
 743 
 744         rdsv3_flush_workqueue(wq);
 745 
 746         list_destroy(&wq->wq_queue);
 747         mutex_destroy(&wq->wq_lock);
 748         kmem_free(wq, sizeof (rdsv3_workqueue_struct_t));
 749 
 750         ASSERT(rdsv3_taskq);
 751         ddi_taskq_destroy(rdsv3_taskq);
 752 
 753         wq = NULL;
 754         rdsv3_taskq = NULL;
 755 
 756         RDSV3_DPRINTF2("rdsv3_destroy_workqueue", "Return");
 757 }
 758 
 759 /* ARGSUSED */


 775         rdsv3_taskq = ddi_taskq_create(rdsv3_dev_info, name,
 776             RDSV3_NUM_TASKQ_THREADS, TASKQ_DEFAULTPRI, 0);
 777         if (rdsv3_taskq == NULL) {
 778                 RDSV3_DPRINTF2(__FILE__,
 779                     "ddi_taskq_create failed for rdsv3_taskq");
 780                 return (NULL);
 781         }
 782 
 783         wq = kmem_zalloc(sizeof (rdsv3_workqueue_struct_t), KM_NOSLEEP);
 784         if (wq == NULL) {
 785                 RDSV3_DPRINTF2(__FILE__, "kmem_zalloc failed for wq");
 786                 ddi_taskq_destroy(rdsv3_taskq);
 787                 return (NULL);
 788         }
 789 
 790         list_create(&wq->wq_queue, sizeof (struct rdsv3_work_s),
 791             offsetof(struct rdsv3_work_s, work_item));
 792         mutex_init(&wq->wq_lock, NULL, MUTEX_DRIVER, NULL);
 793         wq->wq_state = RDSV3_WQ_THREAD_IDLE;
 794         wq->wq_pending = 0;
 795         rdsv3_one_sec_in_hz = drv_sectohz(1);
 796 
 797         RDSV3_DPRINTF2("create_singlethread_workqueue", "Return");
 798 
 799         return (wq);
 800 }
 801 
 802 /*
 803  * Implementation for struct sock
 804  */
 805 
 806 void
 807 rdsv3_sock_exit_data(struct rsock *sk)
 808 {
 809         struct rdsv3_sock *rs = sk->sk_protinfo;
 810 
 811         RDSV3_DPRINTF4("rdsv3_sock_exit_data", "rs: %p sk: %p", rs, sk);
 812 
 813         ASSERT(rs != NULL);
 814         ASSERT(rdsv3_sk_sock_flag(sk, SOCK_DEAD));
 815