Print this page
6583 remove whole-process swapping


 176          * THREAD_SLEEP() moves curthread->t_lockp to point to the
 177          * lock sqh->sq_lock. This lock is later released by the caller
 178          * when it calls thread_unlock() on curthread.
 179          */
 180 }
 181 
 182 #define cv_block_sig(t, cvp)    \
 183         { (t)->t_flag |= T_WAKEABLE; cv_block(cvp); }
 184 
 185 /*
 186  * Block on the indicated condition variable and release the
 187  * associated kmutex while blocked.
 188  */
 189 void
 190 cv_wait(kcondvar_t *cvp, kmutex_t *mp)
 191 {
 192         if (panicstr)
 193                 return;
 194         ASSERT(!quiesce_active);
 195 
 196         ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
 197         thread_lock(curthread);                 /* lock the thread */
 198         cv_block((condvar_impl_t *)cvp);
 199         thread_unlock_nopreempt(curthread);     /* unlock the waiters field */
 200         mutex_exit(mp);
 201         swtch();
 202         mutex_enter(mp);
 203 }
 204 
 205 static void
 206 cv_wakeup(void *arg)
 207 {
 208         kthread_t *t = arg;
 209 
 210         /*
 211          * This mutex is acquired and released in order to make sure that
 212          * the wakeup does not happen before the block itself happens.
 213          */
 214         mutex_enter(&t->t_wait_mutex);
 215         mutex_exit(&t->t_wait_mutex);
 216         setrun(t);


 309         klwp_t *lwp = ttolwp(t);
 310         int cancel_pending;
 311         int rval = 1;
 312         int signalled = 0;
 313 
 314         if (panicstr)
 315                 return (rval);
 316         ASSERT(!quiesce_active);
 317 
 318         /*
 319          * Threads in system processes don't process signals.  This is
 320          * true both for standard threads of system processes and for
 321          * interrupt threads which have borrowed their pinned thread's LWP.
 322          */
 323         if (lwp == NULL || (p->p_flag & SSYS)) {
 324                 cv_wait(cvp, mp);
 325                 return (rval);
 326         }
 327         ASSERT(t->t_intr == NULL);
 328 
 329         ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
 330         cancel_pending = schedctl_cancel_pending();
 331         lwp->lwp_asleep = 1;
 332         lwp->lwp_sysabort = 0;
 333         thread_lock(t);
 334         cv_block_sig(t, (condvar_impl_t *)cvp);
 335         thread_unlock_nopreempt(t);
 336         mutex_exit(mp);
 337         if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending)
 338                 setrun(t);
 339         /* ASSERT(no locks are held) */
 340         swtch();
 341         signalled = (t->t_schedflag & TS_SIGNALLED);
 342         t->t_flag &= ~T_WAKEABLE;
 343         mutex_enter(mp);
 344         if (ISSIG_PENDING(t, lwp, p)) {
 345                 mutex_exit(mp);
 346                 if (issig(FORREAL))
 347                         rval = 0;
 348                 mutex_enter(mp);
 349         }


 537         if (panicstr)
 538                 return (rval);
 539 
 540         /*
 541          * Threads in system processes don't process signals.  This is
 542          * true both for standard threads of system processes and for
 543          * interrupt threads which have borrowed their pinned thread's LWP.
 544          */
 545         if (lwp == NULL || (p->p_flag & SSYS)) {
 546                 cv_wait(cvp, mp);
 547                 return (rval);
 548         }
 549         ASSERT(t->t_intr == NULL);
 550 
 551         cancel_pending = schedctl_cancel_pending();
 552         lwp->lwp_asleep = 1;
 553         lwp->lwp_sysabort = 0;
 554         thread_lock(t);
 555         t->t_kpri_req = 0;   /* don't need kernel priority */
 556         cv_block_sig(t, (condvar_impl_t *)cvp);
 557         /* I can be swapped now */
 558         curthread->t_schedflag &= ~TS_DONT_SWAP;
 559         thread_unlock_nopreempt(t);
 560         mutex_exit(mp);
 561         if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending)
 562                 setrun(t);
 563         /* ASSERT(no locks are held) */
 564         swtch();
 565         signalled = (t->t_schedflag & TS_SIGNALLED);
 566         t->t_flag &= ~T_WAKEABLE;
 567         /* TS_DONT_SWAP set by disp() */
 568         ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
 569         mutex_enter(mp);
 570         if (ISSIG_PENDING(t, lwp, p)) {
 571                 mutex_exit(mp);
 572                 if (issig(FORREAL))
 573                         rval = 0;
 574                 mutex_enter(mp);
 575         }
 576         if (lwp->lwp_sysabort || MUSTRETURN(p, t))
 577                 rval = 0;
 578         if (rval != 0 && cancel_pending) {
 579                 schedctl_cancel_eintr();
 580                 rval = 0;
 581         }
 582         lwp->lwp_asleep = 0;
 583         lwp->lwp_sysabort = 0;
 584         if (rval == 0) {
 585                 if (sigret != NULL)
 586                         *sigret = signalled;    /* just tell the caller */
 587                 else if (signalled)
 588                         cv_signal(cvp); /* avoid consuming the cv_signal() */




 176          * THREAD_SLEEP() moves curthread->t_lockp to point to the
 177          * lock sqh->sq_lock. This lock is later released by the caller
 178          * when it calls thread_unlock() on curthread.
 179          */
 180 }
 181 
 182 #define cv_block_sig(t, cvp)    \
 183         { (t)->t_flag |= T_WAKEABLE; cv_block(cvp); }
 184 
 185 /*
 186  * Block on the indicated condition variable and release the
 187  * associated kmutex while blocked.
 188  */
 189 void
 190 cv_wait(kcondvar_t *cvp, kmutex_t *mp)
 191 {
 192         if (panicstr)
 193                 return;
 194         ASSERT(!quiesce_active);
 195 

 196         thread_lock(curthread);                 /* lock the thread */
 197         cv_block((condvar_impl_t *)cvp);
 198         thread_unlock_nopreempt(curthread);     /* unlock the waiters field */
 199         mutex_exit(mp);
 200         swtch();
 201         mutex_enter(mp);
 202 }
 203 
 204 static void
 205 cv_wakeup(void *arg)
 206 {
 207         kthread_t *t = arg;
 208 
 209         /*
 210          * This mutex is acquired and released in order to make sure that
 211          * the wakeup does not happen before the block itself happens.
 212          */
 213         mutex_enter(&t->t_wait_mutex);
 214         mutex_exit(&t->t_wait_mutex);
 215         setrun(t);


 308         klwp_t *lwp = ttolwp(t);
 309         int cancel_pending;
 310         int rval = 1;
 311         int signalled = 0;
 312 
 313         if (panicstr)
 314                 return (rval);
 315         ASSERT(!quiesce_active);
 316 
 317         /*
 318          * Threads in system processes don't process signals.  This is
 319          * true both for standard threads of system processes and for
 320          * interrupt threads which have borrowed their pinned thread's LWP.
 321          */
 322         if (lwp == NULL || (p->p_flag & SSYS)) {
 323                 cv_wait(cvp, mp);
 324                 return (rval);
 325         }
 326         ASSERT(t->t_intr == NULL);
 327 

 328         cancel_pending = schedctl_cancel_pending();
 329         lwp->lwp_asleep = 1;
 330         lwp->lwp_sysabort = 0;
 331         thread_lock(t);
 332         cv_block_sig(t, (condvar_impl_t *)cvp);
 333         thread_unlock_nopreempt(t);
 334         mutex_exit(mp);
 335         if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending)
 336                 setrun(t);
 337         /* ASSERT(no locks are held) */
 338         swtch();
 339         signalled = (t->t_schedflag & TS_SIGNALLED);
 340         t->t_flag &= ~T_WAKEABLE;
 341         mutex_enter(mp);
 342         if (ISSIG_PENDING(t, lwp, p)) {
 343                 mutex_exit(mp);
 344                 if (issig(FORREAL))
 345                         rval = 0;
 346                 mutex_enter(mp);
 347         }


 535         if (panicstr)
 536                 return (rval);
 537 
 538         /*
 539          * Threads in system processes don't process signals.  This is
 540          * true both for standard threads of system processes and for
 541          * interrupt threads which have borrowed their pinned thread's LWP.
 542          */
 543         if (lwp == NULL || (p->p_flag & SSYS)) {
 544                 cv_wait(cvp, mp);
 545                 return (rval);
 546         }
 547         ASSERT(t->t_intr == NULL);
 548 
 549         cancel_pending = schedctl_cancel_pending();
 550         lwp->lwp_asleep = 1;
 551         lwp->lwp_sysabort = 0;
 552         thread_lock(t);
 553         t->t_kpri_req = 0;   /* don't need kernel priority */
 554         cv_block_sig(t, (condvar_impl_t *)cvp);


 555         thread_unlock_nopreempt(t);
 556         mutex_exit(mp);
 557         if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || cancel_pending)
 558                 setrun(t);
 559         /* ASSERT(no locks are held) */
 560         swtch();
 561         signalled = (t->t_schedflag & TS_SIGNALLED);
 562         t->t_flag &= ~T_WAKEABLE;


 563         mutex_enter(mp);
 564         if (ISSIG_PENDING(t, lwp, p)) {
 565                 mutex_exit(mp);
 566                 if (issig(FORREAL))
 567                         rval = 0;
 568                 mutex_enter(mp);
 569         }
 570         if (lwp->lwp_sysabort || MUSTRETURN(p, t))
 571                 rval = 0;
 572         if (rval != 0 && cancel_pending) {
 573                 schedctl_cancel_eintr();
 574                 rval = 0;
 575         }
 576         lwp->lwp_asleep = 0;
 577         lwp->lwp_sysabort = 0;
 578         if (rval == 0) {
 579                 if (sigret != NULL)
 580                         *sigret = signalled;    /* just tell the caller */
 581                 else if (signalled)
 582                         cv_signal(cvp); /* avoid consuming the cv_signal() */