Print this page
patch delete-t_stime
patch delete-swapped_lock
patch remove-load-flag
patch remove-on-swapq-flag
patch remove-swapenq-flag
patch remove-dont-swap-flag


 138         uintptr_t t_lofault;    /* ret pc for failed page faults */
 139         label_t *t_onfault;     /* on_fault() setjmp buf */
 140         struct on_trap_data *t_ontrap;  /* on_trap() protection data */
 141         caddr_t t_swap;         /* the bottom of the stack, if from segkp */
 142         lock_t  t_lock;         /* used to resume() a thread */
 143         uint8_t t_lockstat;     /* set while thread is in lockstat code */
 144         uint8_t t_pil;          /* interrupt thread PIL */
 145         disp_lock_t     t_pi_lock;      /* lock protecting t_prioinv list */
 146         char    t_nomigrate;    /* do not migrate if set */
 147         struct cpu      *t_cpu; /* CPU that thread last ran on */
 148         struct cpu      *t_weakbound_cpu;       /* cpu weakly bound to */
 149         struct lgrp_ld  *t_lpl; /* load average for home lgroup */
 150         void            *t_lgrp_reserv[2];      /* reserved for future */
 151         struct _kthread *t_intr; /* interrupted (pinned) thread */
 152         uint64_t        t_intr_start;   /* timestamp when time slice began */
 153         kt_did_t        t_did;  /* thread id for kernel debuggers */
 154         caddr_t t_tnf_tpdp;     /* Trace facility data pointer */
 155         struct _kcpc_ctx *t_cpc_ctx;    /* performance counter context */
 156         struct _kcpc_set *t_cpc_set;    /* set this thread has bound */
 157 
 158         /*
 159          * non swappable part of the lwp state.
 160          */
 161         id_t            t_tid;          /* lwp's id */
 162         id_t            t_waitfor;      /* target lwp id in lwp_wait() */
 163         struct sigqueue *t_sigqueue;    /* queue of siginfo structs */
 164         k_sigset_t      t_sig;          /* signals pending to this process */
 165         k_sigset_t      t_extsig;       /* signals sent from another contract */
 166         k_sigset_t      t_hold;         /* hold signal bit mask */
 167         k_sigset_t      t_sigwait;      /* sigtimedwait() is accepting these */
 168         struct  _kthread *t_forw;       /* process's forward thread link */
 169         struct  _kthread *t_back;       /* process's backward thread link */
 170         struct  _kthread *t_thlink;     /* tid (lwpid) lookup hash link */
 171         klwp_t  *t_lwp;                 /* thread's lwp pointer */
 172         struct  proc    *t_procp;       /* proc pointer */
 173         struct  t_audit_data *t_audit_data;     /* per thread audit data */
 174         struct  _kthread *t_next;       /* doubly linked list of all threads */
 175         struct  _kthread *t_prev;
 176         ushort_t t_whystop;             /* reason for stopping */
 177         ushort_t t_whatstop;            /* more detailed reason */
 178         int     t_dslot;                /* index in proc's thread directory */
 179         struct  pollstate *t_pollstate; /* state used during poll(2) */
 180         struct  pollcache *t_pollcache; /* to pass a pcache ptr by /dev/poll */


 251         struct turnstile *t_prioinv;
 252 
 253         /*
 254          * Pointer to the turnstile attached to the synchronization
 255          * object where this thread is blocked.
 256          */
 257 
 258         struct turnstile *t_ts;
 259 
 260         /*
 261          * kernel thread specific data
 262          *      Borrowed from userland implementation of POSIX tsd
 263          */
 264         struct tsd_thread {
 265                 struct tsd_thread *ts_next;     /* threads with TSD */
 266                 struct tsd_thread *ts_prev;     /* threads with TSD */
 267                 uint_t            ts_nkeys;     /* entries in value array */
 268                 void              **ts_value;   /* array of value/key */
 269         } *t_tsd;
 270 
 271         clock_t         t_stime;        /* time stamp used by the swapper */
 272         struct door_data *t_door;       /* door invocation data */
 273         kmutex_t        *t_plockp;      /* pointer to process's p_lock */
 274 
 275         struct sc_shared *t_schedctl;   /* scheduler activations shared data */
 276         uintptr_t       t_sc_uaddr;     /* user-level address of shared data */
 277 
 278         struct cpupart  *t_cpupart;     /* partition containing thread */
 279         int             t_bind_pset;    /* processor set binding */
 280 
 281         struct copyops  *t_copyops;     /* copy in/out ops vector */
 282 
 283         caddr_t         t_stkbase;      /* base of the the stack */
 284         struct page     *t_red_pp;      /* if non-NULL, redzone is mapped */
 285 
 286         afd_t           t_activefd;     /* active file descriptor table */
 287 
 288         struct _kthread *t_priforw;     /* sleepq per-priority sublist */
 289         struct _kthread *t_priback;
 290 
 291         struct sleepq   *t_sleepq;      /* sleep queue thread is waiting on */


 382 #define TP_DAEMON       0x0001  /* this is an LWP_DAEMON lwp */
 383 #define TP_HOLDLWP      0x0002  /* hold thread's lwp */
 384 #define TP_TWAIT        0x0004  /* wait to be freed by lwp_wait() */
 385 #define TP_LWPEXIT      0x0008  /* lwp has exited */
 386 #define TP_PRSTOP       0x0010  /* thread is being stopped via /proc */
 387 #define TP_CHKPT        0x0020  /* thread is being stopped via CPR checkpoint */
 388 #define TP_EXITLWP      0x0040  /* terminate this lwp */
 389 #define TP_PRVSTOP      0x0080  /* thread is virtually stopped via /proc */
 390 #define TP_MSACCT       0x0100  /* collect micro-state accounting information */
 391 #define TP_STOPPING     0x0200  /* thread is executing stop() */
 392 #define TP_WATCHPT      0x0400  /* process has watchpoints in effect */
 393 #define TP_PAUSE        0x0800  /* process is being stopped via pauselwps() */
 394 #define TP_CHANGEBIND   0x1000  /* thread has a new cpu/cpupart binding */
 395 #define TP_ZTHREAD      0x2000  /* this is a kernel thread for a zone */
 396 #define TP_WATCHSTOP    0x4000  /* thread is stopping via holdwatch() */
 397 
 398 /*
 399  * Thread scheduler flag (t_schedflag) definitions.
 400  *      The thread must be locked via thread_lock() or equiv. to change these.
 401  */
 402 #define TS_LOAD         0x0001  /* thread is in memory */
 403 #define TS_DONT_SWAP    0x0002  /* thread/lwp should not be swapped */
 404 #define TS_SWAPENQ      0x0004  /* swap thread when it reaches a safe point */
 405 #define TS_ON_SWAPQ     0x0008  /* thread is on the swap queue */
 406 #define TS_SIGNALLED    0x0010  /* thread was awakened by cv_signal() */
 407 #define TS_PROJWAITQ    0x0020  /* thread is on its project's waitq */
 408 #define TS_ZONEWAITQ    0x0040  /* thread is on its zone's waitq */
 409 #define TS_CSTART       0x0100  /* setrun() by continuelwps() */
 410 #define TS_UNPAUSE      0x0200  /* setrun() by unpauselwps() */
 411 #define TS_XSTART       0x0400  /* setrun() by SIGCONT */
 412 #define TS_PSTART       0x0800  /* setrun() by /proc */
 413 #define TS_RESUME       0x1000  /* setrun() by CPR resume process */
 414 #define TS_CREATE       0x2000  /* setrun() by syslwp_create() */
 415 #define TS_RUNQMATCH    0x4000  /* exact run queue balancing by setbackdq() */
 416 #define TS_ALLSTART     \
 417         (TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE)
 418 #define TS_ANYWAITQ     (TS_PROJWAITQ|TS_ZONEWAITQ)
 419 
 420 /*
 421  * Thread binding types
 422  */
 423 #define TB_ALLHARD      0
 424 #define TB_CPU_SOFT     0x01            /* soft binding to CPU */
 425 #define TB_PSET_SOFT    0x02            /* soft binding to pset */


 463 #define CPR_VSTOPPED(t)                         \
 464         ((t)->t_state == TS_SLEEP &&         \
 465         (t)->t_wchan0 != NULL &&             \
 466         ((t)->t_flag & T_WAKEABLE) &&            \
 467         ((t)->t_proc_flag & TP_CHKPT))
 468 
 469 /* True if thread has been stopped by hold*() or was created stopped */
 470 #define SUSPENDED(t) ((t)->t_state == TS_STOPPED && \
 471         ((t)->t_schedflag & (TS_CSTART|TS_UNPAUSE)) != (TS_CSTART|TS_UNPAUSE))
 472 
 473 /* True if thread possesses an inherited priority */
 474 #define INHERITED(t)    ((t)->t_epri != 0)
 475 
 476 /* The dispatch priority of a thread */
 477 #define DISP_PRIO(t) ((t)->t_epri > (t)->t_pri ? (t)->t_epri : (t)->t_pri)
 478 
 479 /* The assigned priority of a thread */
 480 #define ASSIGNED_PRIO(t)        ((t)->t_pri)
 481 
 482 /*
 483  * Macros to determine whether a thread can be swapped.
 484  * If t_lock is held, the thread is either on a processor or being swapped.
 485  */
 486 #define SWAP_OK(t)      (!LOCK_HELD(&(t)->t_lock))
 487 
 488 /*
 489  * proctot(x)
 490  *      convert a proc pointer to a thread pointer. this only works with
 491  *      procs that have only one lwp.
 492  *
 493  * proctolwp(x)
 494  *      convert a proc pointer to a lwp pointer. this only works with
 495  *      procs that have only one lwp.
 496  *
 497  * ttolwp(x)
 498  *      convert a thread pointer to its lwp pointer.
 499  *
 500  * ttoproc(x)
 501  *      convert a thread pointer to its proc pointer.
 502  *
 503  * ttoproj(x)
 504  *      convert a thread pointer to its project pointer.
 505  *
 506  * ttozone(x)
 507  *      convert a thread pointer to its zone pointer.
 508  *


 636 #define THREAD_TRANSITION(tp)   thread_transition(tp);
 637 /*
 638  * Set the thread's lock to be the transition lock, without dropping
 639  * previosly held lock.
 640  */
 641 #define THREAD_TRANSITION_NOLOCK(tp)    ((tp)->t_lockp = &transition_lock)
 642 
 643 /*
 644  * Put thread in run state, and set the lock pointer to the dispatcher queue
 645  * lock pointer provided.  This lock should be held.
 646  */
 647 #define THREAD_RUN(tp, lp)      THREAD_SET_STATE(tp, TS_RUN, lp)
 648 
 649 /*
 650  * Put thread in wait state, and set the lock pointer to the wait queue
 651  * lock pointer provided.  This lock should be held.
 652  */
 653 #define THREAD_WAIT(tp, lp)     THREAD_SET_STATE(tp, TS_WAIT, lp)
 654 
 655 /*
 656  * Put thread in run state, and set the lock pointer to the dispatcher queue
 657  * lock pointer provided (i.e., the "swapped_lock").  This lock should be held.
 658  */
 659 #define THREAD_SWAP(tp, lp)     THREAD_SET_STATE(tp, TS_RUN, lp)
 660 
 661 /*
 662  * Put the thread in zombie state and set the lock pointer to NULL.
 663  * The NULL will catch anything that tries to lock a zombie.
 664  */
 665 #define THREAD_ZOMB(tp)         THREAD_SET_STATE(tp, TS_ZOMB, NULL)
 666 
 667 /*
 668  * Set the thread into ONPROC state, and point the lock at the CPUs
 669  * lock for the onproc thread(s).  This lock should be held, so the
 670  * thread deoes not become unlocked, since these stores can be reordered.
 671  */
 672 #define THREAD_ONPROC(tp, cpu)  \
 673                 THREAD_SET_STATE(tp, TS_ONPROC, &(cpu)->cpu_thread_lock)
 674 
 675 /*
 676  * Set the thread into the TS_SLEEP state, and set the lock pointer to
 677  * to some sleep queue's lock.  The new lock should already be held.
 678  */
 679 #define THREAD_SLEEP(tp, lp)    {                               \
 680                         disp_lock_t     *tlp;                   \
 681                         tlp = (tp)->t_lockp;                 \




 138         uintptr_t t_lofault;    /* ret pc for failed page faults */
 139         label_t *t_onfault;     /* on_fault() setjmp buf */
 140         struct on_trap_data *t_ontrap;  /* on_trap() protection data */
 141         caddr_t t_swap;         /* the bottom of the stack, if from segkp */
 142         lock_t  t_lock;         /* used to resume() a thread */
 143         uint8_t t_lockstat;     /* set while thread is in lockstat code */
 144         uint8_t t_pil;          /* interrupt thread PIL */
 145         disp_lock_t     t_pi_lock;      /* lock protecting t_prioinv list */
 146         char    t_nomigrate;    /* do not migrate if set */
 147         struct cpu      *t_cpu; /* CPU that thread last ran on */
 148         struct cpu      *t_weakbound_cpu;       /* cpu weakly bound to */
 149         struct lgrp_ld  *t_lpl; /* load average for home lgroup */
 150         void            *t_lgrp_reserv[2];      /* reserved for future */
 151         struct _kthread *t_intr; /* interrupted (pinned) thread */
 152         uint64_t        t_intr_start;   /* timestamp when time slice began */
 153         kt_did_t        t_did;  /* thread id for kernel debuggers */
 154         caddr_t t_tnf_tpdp;     /* Trace facility data pointer */
 155         struct _kcpc_ctx *t_cpc_ctx;    /* performance counter context */
 156         struct _kcpc_set *t_cpc_set;    /* set this thread has bound */
 157 



 158         id_t            t_tid;          /* lwp's id */
 159         id_t            t_waitfor;      /* target lwp id in lwp_wait() */
 160         struct sigqueue *t_sigqueue;    /* queue of siginfo structs */
 161         k_sigset_t      t_sig;          /* signals pending to this process */
 162         k_sigset_t      t_extsig;       /* signals sent from another contract */
 163         k_sigset_t      t_hold;         /* hold signal bit mask */
 164         k_sigset_t      t_sigwait;      /* sigtimedwait() is accepting these */
 165         struct  _kthread *t_forw;       /* process's forward thread link */
 166         struct  _kthread *t_back;       /* process's backward thread link */
 167         struct  _kthread *t_thlink;     /* tid (lwpid) lookup hash link */
 168         klwp_t  *t_lwp;                 /* thread's lwp pointer */
 169         struct  proc    *t_procp;       /* proc pointer */
 170         struct  t_audit_data *t_audit_data;     /* per thread audit data */
 171         struct  _kthread *t_next;       /* doubly linked list of all threads */
 172         struct  _kthread *t_prev;
 173         ushort_t t_whystop;             /* reason for stopping */
 174         ushort_t t_whatstop;            /* more detailed reason */
 175         int     t_dslot;                /* index in proc's thread directory */
 176         struct  pollstate *t_pollstate; /* state used during poll(2) */
 177         struct  pollcache *t_pollcache; /* to pass a pcache ptr by /dev/poll */


 248         struct turnstile *t_prioinv;
 249 
 250         /*
 251          * Pointer to the turnstile attached to the synchronization
 252          * object where this thread is blocked.
 253          */
 254 
 255         struct turnstile *t_ts;
 256 
 257         /*
 258          * kernel thread specific data
 259          *      Borrowed from userland implementation of POSIX tsd
 260          */
 261         struct tsd_thread {
 262                 struct tsd_thread *ts_next;     /* threads with TSD */
 263                 struct tsd_thread *ts_prev;     /* threads with TSD */
 264                 uint_t            ts_nkeys;     /* entries in value array */
 265                 void              **ts_value;   /* array of value/key */
 266         } *t_tsd;
 267 

 268         struct door_data *t_door;       /* door invocation data */
 269         kmutex_t        *t_plockp;      /* pointer to process's p_lock */
 270 
 271         struct sc_shared *t_schedctl;   /* scheduler activations shared data */
 272         uintptr_t       t_sc_uaddr;     /* user-level address of shared data */
 273 
 274         struct cpupart  *t_cpupart;     /* partition containing thread */
 275         int             t_bind_pset;    /* processor set binding */
 276 
 277         struct copyops  *t_copyops;     /* copy in/out ops vector */
 278 
 279         caddr_t         t_stkbase;      /* base of the the stack */
 280         struct page     *t_red_pp;      /* if non-NULL, redzone is mapped */
 281 
 282         afd_t           t_activefd;     /* active file descriptor table */
 283 
 284         struct _kthread *t_priforw;     /* sleepq per-priority sublist */
 285         struct _kthread *t_priback;
 286 
 287         struct sleepq   *t_sleepq;      /* sleep queue thread is waiting on */


 378 #define TP_DAEMON       0x0001  /* this is an LWP_DAEMON lwp */
 379 #define TP_HOLDLWP      0x0002  /* hold thread's lwp */
 380 #define TP_TWAIT        0x0004  /* wait to be freed by lwp_wait() */
 381 #define TP_LWPEXIT      0x0008  /* lwp has exited */
 382 #define TP_PRSTOP       0x0010  /* thread is being stopped via /proc */
 383 #define TP_CHKPT        0x0020  /* thread is being stopped via CPR checkpoint */
 384 #define TP_EXITLWP      0x0040  /* terminate this lwp */
 385 #define TP_PRVSTOP      0x0080  /* thread is virtually stopped via /proc */
 386 #define TP_MSACCT       0x0100  /* collect micro-state accounting information */
 387 #define TP_STOPPING     0x0200  /* thread is executing stop() */
 388 #define TP_WATCHPT      0x0400  /* process has watchpoints in effect */
 389 #define TP_PAUSE        0x0800  /* process is being stopped via pauselwps() */
 390 #define TP_CHANGEBIND   0x1000  /* thread has a new cpu/cpupart binding */
 391 #define TP_ZTHREAD      0x2000  /* this is a kernel thread for a zone */
 392 #define TP_WATCHSTOP    0x4000  /* thread is stopping via holdwatch() */
 393 
 394 /*
 395  * Thread scheduler flag (t_schedflag) definitions.
 396  *      The thread must be locked via thread_lock() or equiv. to change these.
 397  */




 398 #define TS_SIGNALLED    0x0010  /* thread was awakened by cv_signal() */
 399 #define TS_PROJWAITQ    0x0020  /* thread is on its project's waitq */
 400 #define TS_ZONEWAITQ    0x0040  /* thread is on its zone's waitq */
 401 #define TS_CSTART       0x0100  /* setrun() by continuelwps() */
 402 #define TS_UNPAUSE      0x0200  /* setrun() by unpauselwps() */
 403 #define TS_XSTART       0x0400  /* setrun() by SIGCONT */
 404 #define TS_PSTART       0x0800  /* setrun() by /proc */
 405 #define TS_RESUME       0x1000  /* setrun() by CPR resume process */
 406 #define TS_CREATE       0x2000  /* setrun() by syslwp_create() */
 407 #define TS_RUNQMATCH    0x4000  /* exact run queue balancing by setbackdq() */
 408 #define TS_ALLSTART     \
 409         (TS_CSTART|TS_UNPAUSE|TS_XSTART|TS_PSTART|TS_RESUME|TS_CREATE)
 410 #define TS_ANYWAITQ     (TS_PROJWAITQ|TS_ZONEWAITQ)
 411 
 412 /*
 413  * Thread binding types
 414  */
 415 #define TB_ALLHARD      0
 416 #define TB_CPU_SOFT     0x01            /* soft binding to CPU */
 417 #define TB_PSET_SOFT    0x02            /* soft binding to pset */


 455 #define CPR_VSTOPPED(t)                         \
 456         ((t)->t_state == TS_SLEEP &&         \
 457         (t)->t_wchan0 != NULL &&             \
 458         ((t)->t_flag & T_WAKEABLE) &&            \
 459         ((t)->t_proc_flag & TP_CHKPT))
 460 
 461 /* True if thread has been stopped by hold*() or was created stopped */
 462 #define SUSPENDED(t) ((t)->t_state == TS_STOPPED && \
 463         ((t)->t_schedflag & (TS_CSTART|TS_UNPAUSE)) != (TS_CSTART|TS_UNPAUSE))
 464 
 465 /* True if thread possesses an inherited priority */
 466 #define INHERITED(t)    ((t)->t_epri != 0)
 467 
 468 /* The dispatch priority of a thread */
 469 #define DISP_PRIO(t) ((t)->t_epri > (t)->t_pri ? (t)->t_epri : (t)->t_pri)
 470 
 471 /* The assigned priority of a thread */
 472 #define ASSIGNED_PRIO(t)        ((t)->t_pri)
 473 
 474 /*






 475  * proctot(x)
 476  *      convert a proc pointer to a thread pointer. this only works with
 477  *      procs that have only one lwp.
 478  *
 479  * proctolwp(x)
 480  *      convert a proc pointer to a lwp pointer. this only works with
 481  *      procs that have only one lwp.
 482  *
 483  * ttolwp(x)
 484  *      convert a thread pointer to its lwp pointer.
 485  *
 486  * ttoproc(x)
 487  *      convert a thread pointer to its proc pointer.
 488  *
 489  * ttoproj(x)
 490  *      convert a thread pointer to its project pointer.
 491  *
 492  * ttozone(x)
 493  *      convert a thread pointer to its zone pointer.
 494  *


 622 #define THREAD_TRANSITION(tp)   thread_transition(tp);
 623 /*
 624  * Set the thread's lock to be the transition lock, without dropping
 625  * previosly held lock.
 626  */
 627 #define THREAD_TRANSITION_NOLOCK(tp)    ((tp)->t_lockp = &transition_lock)
 628 
 629 /*
 630  * Put thread in run state, and set the lock pointer to the dispatcher queue
 631  * lock pointer provided.  This lock should be held.
 632  */
 633 #define THREAD_RUN(tp, lp)      THREAD_SET_STATE(tp, TS_RUN, lp)
 634 
 635 /*
 636  * Put thread in wait state, and set the lock pointer to the wait queue
 637  * lock pointer provided.  This lock should be held.
 638  */
 639 #define THREAD_WAIT(tp, lp)     THREAD_SET_STATE(tp, TS_WAIT, lp)
 640 
 641 /*






 642  * Put the thread in zombie state and set the lock pointer to NULL.
 643  * The NULL will catch anything that tries to lock a zombie.
 644  */
 645 #define THREAD_ZOMB(tp)         THREAD_SET_STATE(tp, TS_ZOMB, NULL)
 646 
 647 /*
 648  * Set the thread into ONPROC state, and point the lock at the CPUs
 649  * lock for the onproc thread(s).  This lock should be held, so the
 650  * thread deoes not become unlocked, since these stores can be reordered.
 651  */
 652 #define THREAD_ONPROC(tp, cpu)  \
 653                 THREAD_SET_STATE(tp, TS_ONPROC, &(cpu)->cpu_thread_lock)
 654 
 655 /*
 656  * Set the thread into the TS_SLEEP state, and set the lock pointer to
 657  * to some sleep queue's lock.  The new lock should already be held.
 658  */
 659 #define THREAD_SLEEP(tp, lp)    {                               \
 660                         disp_lock_t     *tlp;                   \
 661                         tlp = (tp)->t_lockp;                 \