Print this page
patch delete-swapped_lock
patch remove-dead-disp-code
patch remove-useless-var2
patch remove-load-flag
patch remove-on-swapq-flag
patch remove-dont-swap-flag


  79 
  80 /* platform-specific routine to call when processor is idle */
  81 static void     generic_idle_cpu();
  82 void            (*idle_cpu)() = generic_idle_cpu;
  83 
  84 /* routines invoked when a CPU enters/exits the idle loop */
  85 static void     idle_enter();
  86 static void     idle_exit();
  87 
  88 /* platform-specific routine to call when thread is enqueued */
  89 static void     generic_enq_thread(cpu_t *, int);
  90 void            (*disp_enq_thread)(cpu_t *, int) = generic_enq_thread;
  91 
  92 pri_t   kpreemptpri;            /* priority where kernel preemption applies */
  93 pri_t   upreemptpri = 0;        /* priority where normal preemption applies */
  94 pri_t   intr_pri;               /* interrupt thread priority base level */
  95 
  96 #define KPQPRI  -1              /* pri where cpu affinity is dropped for kpq */
  97 pri_t   kpqpri = KPQPRI;        /* can be set in /etc/system */
  98 disp_t  cpu0_disp;              /* boot CPU's dispatch queue */
  99 disp_lock_t     swapped_lock;   /* lock swapped threads and swap queue */
 100 int     nswapped;               /* total number of swapped threads */
 101 void    disp_swapped_enq(kthread_t *tp);
 102 static void     disp_swapped_setrun(kthread_t *tp);
 103 static void     cpu_resched(cpu_t *cp, pri_t tpri);
 104 
 105 /*
 106  * If this is set, only interrupt threads will cause kernel preemptions.
 107  * This is done by changing the value of kpreemptpri.  kpreemptpri
 108  * will either be the max sysclass pri + 1 or the min interrupt pri.
 109  */
 110 int     only_intr_kpreempt;
 111 
 112 extern void set_idle_cpu(int cpun);
 113 extern void unset_idle_cpu(int cpun);
 114 static void setkpdq(kthread_t *tp, int borf);
 115 #define SETKP_BACK      0
 116 #define SETKP_FRONT     1
 117 /*
 118  * Parameter that determines how recently a thread must have run
 119  * on the CPU to be considered loosely-bound to that CPU to reduce
 120  * cold cache effects.  The interval is in hertz.
 121  */


 759                                 cpup->cpu_chosen_level = -1;
 760                         }
 761                 } else {
 762                         disp_lock_exit_high(&dp->disp_lock);
 763                         tp = cpup->cpu_idle_thread;
 764                         THREAD_ONPROC(tp, cpup);
 765                         cpup->cpu_dispthread = tp;
 766                         cpup->cpu_dispatch_pri = -1;
 767                         cpup->cpu_runrun = cpup->cpu_kprunrun = 0;
 768                         cpup->cpu_chosen_level = -1;
 769                 }
 770                 TRACE_1(TR_FAC_DISP, TR_DISP_END,
 771                     "disp_end:tid %p", tp);
 772                 return (tp);
 773         }
 774 
 775         dq = &dp->disp_q[pri];
 776         tp = dq->dq_first;
 777 
 778         ASSERT(tp != NULL);
 779         ASSERT(tp->t_schedflag & TS_LOAD);       /* thread must be swapped in */
 780 
 781         DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp);
 782 
 783         /*
 784          * Found it so remove it from queue.
 785          */
 786         dp->disp_nrunnable--;
 787         dq->dq_sruncnt--;
 788         if ((dq->dq_first = tp->t_link) == NULL) {
 789                 ulong_t *dqactmap = dp->disp_qactmap;
 790 
 791                 ASSERT(dq->dq_sruncnt == 0);
 792                 dq->dq_last = NULL;
 793 
 794                 /*
 795                  * The queue is empty, so the corresponding bit needs to be
 796                  * turned off in dqactmap.   If nrunnable != 0 just took the
 797                  * last runnable thread off the
 798                  * highest queue, so recompute disp_maxrunpri.
 799                  */
 800                 maxrunword = pri >> BT_ULSHIFT;
 801                 dqactmap[maxrunword] &= ~BT_BIW(pri);
 802 
 803                 if (dp->disp_nrunnable == 0) {
 804                         dp->disp_max_unbound_pri = -1;
 805                         dp->disp_maxrunpri = -1;
 806                 } else {
 807                         int ipri;
 808 
 809                         ipri = bt_gethighbit(dqactmap, maxrunword);
 810                         dp->disp_maxrunpri = ipri;
 811                         if (ipri < dp->disp_max_unbound_pri)
 812                                 dp->disp_max_unbound_pri = ipri;
 813                 }
 814         } else {
 815                 tp->t_link = NULL;
 816         }
 817 
 818         /*
 819          * Set TS_DONT_SWAP flag to prevent another processor from swapping
 820          * out this thread before we have a chance to run it.
 821          * While running, it is protected against swapping by t_lock.
 822          */
 823         tp->t_schedflag |= TS_DONT_SWAP;
 824         cpup->cpu_dispthread = tp;           /* protected by spl only */
 825         cpup->cpu_dispatch_pri = pri;
 826         ASSERT(pri == DISP_PRIO(tp));
 827         thread_onproc(tp, cpup);                /* set t_state to TS_ONPROC */
 828         disp_lock_exit_high(&dp->disp_lock);     /* drop run queue lock */
 829 
 830         ASSERT(tp != NULL);
 831         TRACE_1(TR_FAC_DISP, TR_DISP_END,
 832             "disp_end:tid %p", tp);
 833 
 834         if (disp_ratify(tp, kpq) == NULL)
 835                 goto reschedule;
 836 
 837         return (tp);
 838 }
 839 
 840 /*
 841  * swtch()
 842  *      Find best runnable thread and run it.
 843  *      Called with the current thread already switched to a new state,


1174  * queue corresponding to its current priority.
1175  *
1176  * Called with the thread in transition, onproc or stopped state
1177  * and locked (transition implies locked) and at high spl.
1178  * Returns with the thread in TS_RUN state and still locked.
1179  */
1180 void
1181 setbackdq(kthread_t *tp)
1182 {
1183         dispq_t *dq;
1184         disp_t          *dp;
1185         cpu_t           *cp;
1186         pri_t           tpri;
1187         int             bound;
1188         boolean_t       self;
1189 
1190         ASSERT(THREAD_LOCK_HELD(tp));
1191         ASSERT((tp->t_schedflag & TS_ALLSTART) == 0);
1192         ASSERT(!thread_on_queue(tp));   /* make sure tp isn't on a runq */
1193 
1194         /*
1195          * If thread is "swapped" or on the swap queue don't
1196          * queue it, but wake sched.
1197          */
1198         if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) {
1199                 disp_swapped_setrun(tp);
1200                 return;
1201         }
1202 
1203         self = (tp == curthread);
1204 
1205         if (tp->t_bound_cpu || tp->t_weakbound_cpu)
1206                 bound = 1;
1207         else
1208                 bound = 0;
1209 
1210         tpri = DISP_PRIO(tp);
1211         if (ncpus == 1)
1212                 cp = tp->t_cpu;
1213         else if (!bound) {
1214                 if (tpri >= kpqpri) {
1215                         setkpdq(tp, SETKP_BACK);
1216                         return;
1217                 }
1218 
1219                 /*
1220                  * We'll generally let this thread continue to run where
1221                  * it last ran...but will consider migration if:
1222                  * - We thread probably doesn't have much cache warmth.


1363  * Put the specified thread on the front of the dispatcher
1364  * queue corresponding to its current priority.
1365  *
1366  * Called with the thread in transition, onproc or stopped state
1367  * and locked (transition implies locked) and at high spl.
1368  * Returns with the thread in TS_RUN state and still locked.
1369  */
1370 void
1371 setfrontdq(kthread_t *tp)
1372 {
1373         disp_t          *dp;
1374         dispq_t         *dq;
1375         cpu_t           *cp;
1376         pri_t           tpri;
1377         int             bound;
1378 
1379         ASSERT(THREAD_LOCK_HELD(tp));
1380         ASSERT((tp->t_schedflag & TS_ALLSTART) == 0);
1381         ASSERT(!thread_on_queue(tp));   /* make sure tp isn't on a runq */
1382 
1383         /*
1384          * If thread is "swapped" or on the swap queue don't
1385          * queue it, but wake sched.
1386          */
1387         if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) {
1388                 disp_swapped_setrun(tp);
1389                 return;
1390         }
1391 
1392         if (tp->t_bound_cpu || tp->t_weakbound_cpu)
1393                 bound = 1;
1394         else
1395                 bound = 0;
1396 
1397         tpri = DISP_PRIO(tp);
1398         if (ncpus == 1)
1399                 cp = tp->t_cpu;
1400         else if (!bound) {
1401                 if (tpri >= kpqpri) {
1402                         setkpdq(tp, SETKP_FRONT);
1403                         return;
1404                 }
1405                 cp = tp->t_cpu;
1406                 if (tp->t_cpupart == cp->cpu_part) {
1407                         /*
1408                          * We'll generally let this thread continue to run
1409                          * where it last ran, but will consider migration if:
1410                          * - The thread last ran outside it's home lgroup.
1411                          * - The CPU where it last ran is the target of an


1600 /*
1601  * Remove a thread from the dispatcher queue if it is on it.
1602  * It is not an error if it is not found but we return whether
1603  * or not it was found in case the caller wants to check.
1604  */
1605 int
1606 dispdeq(kthread_t *tp)
1607 {
1608         disp_t          *dp;
1609         dispq_t         *dq;
1610         kthread_t       *rp;
1611         kthread_t       *trp;
1612         kthread_t       **ptp;
1613         int             tpri;
1614 
1615         ASSERT(THREAD_LOCK_HELD(tp));
1616 
1617         if (tp->t_state != TS_RUN)
1618                 return (0);
1619 
1620         /*
1621          * The thread is "swapped" or is on the swap queue and
1622          * hence no longer on the run queue, so return true.
1623          */
1624         if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD)
1625                 return (1);
1626 
1627         tpri = DISP_PRIO(tp);
1628         dp = tp->t_disp_queue;
1629         ASSERT(tpri < dp->disp_npri);
1630         dq = &dp->disp_q[tpri];
1631         ptp = &dq->dq_first;
1632         rp = *ptp;
1633         trp = NULL;
1634 
1635         ASSERT(dq->dq_last == NULL || dq->dq_last->t_link == NULL);
1636 
1637         /*
1638          * Search for thread in queue.
1639          * Double links would simplify this at the expense of disp/setrun.
1640          */
1641         while (rp != tp && rp != NULL) {
1642                 trp = rp;
1643                 ptp = &trp->t_link;
1644                 rp = trp->t_link;
1645         }
1646 


1660         if (--dq->dq_sruncnt == 0) {
1661                 dp->disp_qactmap[tpri >> BT_ULSHIFT] &= ~BT_BIW(tpri);
1662                 if (dp->disp_nrunnable == 0) {
1663                         dp->disp_max_unbound_pri = -1;
1664                         dp->disp_maxrunpri = -1;
1665                 } else if (tpri == dp->disp_maxrunpri) {
1666                         int ipri;
1667 
1668                         ipri = bt_gethighbit(dp->disp_qactmap,
1669                             dp->disp_maxrunpri >> BT_ULSHIFT);
1670                         if (ipri < dp->disp_max_unbound_pri)
1671                                 dp->disp_max_unbound_pri = ipri;
1672                         dp->disp_maxrunpri = ipri;
1673                 }
1674         }
1675         tp->t_link = NULL;
1676         THREAD_TRANSITION(tp);          /* put in intermediate state */
1677         return (1);
1678 }
1679 
1680 
1681 /*
1682  * dq_sruninc and dq_srundec are public functions for
1683  * incrementing/decrementing the sruncnts when a thread on
1684  * a dispatcher queue is made schedulable/unschedulable by
1685  * resetting the TS_LOAD flag.
1686  *
1687  * The caller MUST have the thread lock and therefore the dispatcher
1688  * queue lock so that the operation which changes
1689  * the flag, the operation that checks the status of the thread to
1690  * determine if it's on a disp queue AND the call to this function
1691  * are one atomic operation with respect to interrupts.
1692  */
1693 
1694 /*
1695  * Called by sched AFTER TS_LOAD flag is set on a swapped, runnable thread.
1696  */
1697 void
1698 dq_sruninc(kthread_t *t)
1699 {
1700         ASSERT(t->t_state == TS_RUN);
1701         ASSERT(t->t_schedflag & TS_LOAD);
1702 
1703         THREAD_TRANSITION(t);
1704         setfrontdq(t);
1705 }
1706 
1707 /*
1708  * See comment on calling conventions above.
1709  * Called by sched BEFORE TS_LOAD flag is cleared on a runnable thread.
1710  */
1711 void
1712 dq_srundec(kthread_t *t)
1713 {
1714         ASSERT(t->t_schedflag & TS_LOAD);
1715 
1716         (void) dispdeq(t);
1717         disp_swapped_enq(t);
1718 }
1719 
1720 /*
1721  * Change the dispatcher lock of thread to the "swapped_lock"
1722  * and return with thread lock still held.
1723  *
1724  * Called with thread_lock held, in transition state, and at high spl.
1725  */
1726 void
1727 disp_swapped_enq(kthread_t *tp)
1728 {
1729         ASSERT(THREAD_LOCK_HELD(tp));
1730         ASSERT(tp->t_schedflag & TS_LOAD);
1731 
1732         switch (tp->t_state) {
1733         case TS_RUN:
1734                 disp_lock_enter_high(&swapped_lock);
1735                 THREAD_SWAP(tp, &swapped_lock);     /* set TS_RUN state and lock */
1736                 break;
1737         case TS_ONPROC:
1738                 disp_lock_enter_high(&swapped_lock);
1739                 THREAD_TRANSITION(tp);
1740                 wake_sched_sec = 1;             /* tell clock to wake sched */
1741                 THREAD_SWAP(tp, &swapped_lock);     /* set TS_RUN state and lock */
1742                 break;
1743         default:
1744                 panic("disp_swapped: tp: %p bad t_state", (void *)tp);
1745         }
1746 }
1747 
1748 /*
1749  * This routine is called by setbackdq/setfrontdq if the thread is
1750  * not loaded or loaded and on the swap queue.
1751  *
1752  * Thread state TS_SLEEP implies that a swapped thread
1753  * has been woken up and needs to be swapped in by the swapper.
1754  *
1755  * Thread state TS_RUN, it implies that the priority of a swapped
1756  * thread is being increased by scheduling class (e.g. ts_update).
1757  */
1758 static void
1759 disp_swapped_setrun(kthread_t *tp)
1760 {
1761         ASSERT(THREAD_LOCK_HELD(tp));
1762         ASSERT((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD);
1763 
1764         switch (tp->t_state) {
1765         case TS_SLEEP:
1766                 disp_lock_enter_high(&swapped_lock);
1767                 /*
1768                  * Wakeup sched immediately (i.e., next tick) if the
1769                  * thread priority is above maxclsyspri.
1770                  */
1771                 if (DISP_PRIO(tp) > maxclsyspri)
1772                         wake_sched = 1;
1773                 else
1774                         wake_sched_sec = 1;
1775                 THREAD_RUN(tp, &swapped_lock); /* set TS_RUN state and lock */
1776                 break;
1777         case TS_RUN:                            /* called from ts_update */
1778                 break;
1779         default:
1780                 panic("disp_swapped_setrun: tp: %p bad t_state", (void *)tp);
1781         }
1782 }
1783 
1784 /*
1785  *      Make a thread give up its processor.  Find the processor on
1786  *      which this thread is executing, and have that processor
1787  *      preempt.
1788  *
1789  *      We allow System Duty Cycle (SDC) threads to be preempted even if
1790  *      they are running at kernel priorities.  To implement this, we always
1791  *      set cpu_kprunrun; this ensures preempt() will be called.  Since SDC
1792  *      calls cpu_surrender() very often, we only preempt if there is anyone
1793  *      competing with us.
1794  */
1795 void
1796 cpu_surrender(kthread_t *tp)
1797 {
1798         cpu_t   *cpup;
1799         int     max_pri;
1800         int     max_run_pri;
1801         klwp_t  *lwp;
1802 
1803         ASSERT(THREAD_LOCK_HELD(tp));


2137          */
2138         if (dp->disp_max_unbound_pri != pri)
2139                 dp->disp_max_unbound_pri = pri;
2140 }
2141 
2142 /*
2143  * disp_adjust_unbound_pri() - thread is becoming unbound, so we should
2144  *      check if the CPU to which is was previously bound should have
2145  *      its disp_max_unbound_pri increased.
2146  */
2147 void
2148 disp_adjust_unbound_pri(kthread_t *tp)
2149 {
2150         disp_t *dp;
2151         pri_t tpri;
2152 
2153         ASSERT(THREAD_LOCK_HELD(tp));
2154 
2155         /*
2156          * Don't do anything if the thread is not bound, or
2157          * currently not runnable or swapped out.
2158          */
2159         if (tp->t_bound_cpu == NULL ||
2160             tp->t_state != TS_RUN ||
2161             tp->t_schedflag & TS_ON_SWAPQ)
2162                 return;
2163 
2164         tpri = DISP_PRIO(tp);
2165         dp = tp->t_bound_cpu->cpu_disp;
2166         ASSERT(tpri >= 0 && tpri < dp->disp_npri);
2167         if (tpri > dp->disp_max_unbound_pri)
2168                 dp->disp_max_unbound_pri = tpri;
2169 }
2170 
2171 /*
2172  * disp_getbest()
2173  *   De-queue the highest priority unbound runnable thread.
2174  *   Returns with the thread unlocked and onproc but at splhigh (like disp()).
2175  *   Returns NULL if nothing found.
2176  *   Returns T_DONTSTEAL if the thread was not stealable.
2177  *   so that the caller will try again later.
2178  *
2179  *   Passed a pointer to a dispatch queue not associated with this CPU, and
2180  *   its type.
2181  */


2336          */
2337 
2338 #ifdef DEBUG
2339         {
2340                 int     thread_was_on_queue;
2341 
2342                 thread_was_on_queue = dispdeq(tp);      /* drops disp_lock */
2343                 ASSERT(thread_was_on_queue);
2344         }
2345 
2346 #else /* DEBUG */
2347         (void) dispdeq(tp);                     /* drops disp_lock */
2348 #endif /* DEBUG */
2349 
2350         /*
2351          * Reset the disp_queue steal time - we do not know what is the smallest
2352          * value across the queue is.
2353          */
2354         dp->disp_steal = 0;
2355 
2356         tp->t_schedflag |= TS_DONT_SWAP;
2357 
2358         /*
2359          * Setup thread to run on the current CPU.
2360          */
2361         tp->t_disp_queue = cp->cpu_disp;
2362 
2363         cp->cpu_dispthread = tp;             /* protected by spl only */
2364         cp->cpu_dispatch_pri = pri;
2365 
2366         /*
2367          * There can be a memory synchronization race between disp_getbest()
2368          * and disp_ratify() vs cpu_resched() where cpu_resched() is trying
2369          * to preempt the current thread to run the enqueued thread while
2370          * disp_getbest() and disp_ratify() are changing the current thread
2371          * to the stolen thread. This may lead to a situation where
2372          * cpu_resched() tries to preempt the wrong thread and the
2373          * stolen thread continues to run on the CPU which has been tagged
2374          * for preemption.
2375          * Later the clock thread gets enqueued but doesn't get to run on the
2376          * CPU causing the system to hang.
2377          *




  79 
  80 /* platform-specific routine to call when processor is idle */
  81 static void     generic_idle_cpu();
  82 void            (*idle_cpu)() = generic_idle_cpu;
  83 
  84 /* routines invoked when a CPU enters/exits the idle loop */
  85 static void     idle_enter();
  86 static void     idle_exit();
  87 
  88 /* platform-specific routine to call when thread is enqueued */
  89 static void     generic_enq_thread(cpu_t *, int);
  90 void            (*disp_enq_thread)(cpu_t *, int) = generic_enq_thread;
  91 
  92 pri_t   kpreemptpri;            /* priority where kernel preemption applies */
  93 pri_t   upreemptpri = 0;        /* priority where normal preemption applies */
  94 pri_t   intr_pri;               /* interrupt thread priority base level */
  95 
  96 #define KPQPRI  -1              /* pri where cpu affinity is dropped for kpq */
  97 pri_t   kpqpri = KPQPRI;        /* can be set in /etc/system */
  98 disp_t  cpu0_disp;              /* boot CPU's dispatch queue */

  99 int     nswapped;               /* total number of swapped threads */

 100 static void     disp_swapped_setrun(kthread_t *tp);
 101 static void     cpu_resched(cpu_t *cp, pri_t tpri);
 102 
 103 /*
 104  * If this is set, only interrupt threads will cause kernel preemptions.
 105  * This is done by changing the value of kpreemptpri.  kpreemptpri
 106  * will either be the max sysclass pri + 1 or the min interrupt pri.
 107  */
 108 int     only_intr_kpreempt;
 109 
 110 extern void set_idle_cpu(int cpun);
 111 extern void unset_idle_cpu(int cpun);
 112 static void setkpdq(kthread_t *tp, int borf);
 113 #define SETKP_BACK      0
 114 #define SETKP_FRONT     1
 115 /*
 116  * Parameter that determines how recently a thread must have run
 117  * on the CPU to be considered loosely-bound to that CPU to reduce
 118  * cold cache effects.  The interval is in hertz.
 119  */


 757                                 cpup->cpu_chosen_level = -1;
 758                         }
 759                 } else {
 760                         disp_lock_exit_high(&dp->disp_lock);
 761                         tp = cpup->cpu_idle_thread;
 762                         THREAD_ONPROC(tp, cpup);
 763                         cpup->cpu_dispthread = tp;
 764                         cpup->cpu_dispatch_pri = -1;
 765                         cpup->cpu_runrun = cpup->cpu_kprunrun = 0;
 766                         cpup->cpu_chosen_level = -1;
 767                 }
 768                 TRACE_1(TR_FAC_DISP, TR_DISP_END,
 769                     "disp_end:tid %p", tp);
 770                 return (tp);
 771         }
 772 
 773         dq = &dp->disp_q[pri];
 774         tp = dq->dq_first;
 775 
 776         ASSERT(tp != NULL);

 777 
 778         DTRACE_SCHED2(dequeue, kthread_t *, tp, disp_t *, dp);
 779 
 780         /*
 781          * Found it so remove it from queue.
 782          */
 783         dp->disp_nrunnable--;
 784         dq->dq_sruncnt--;
 785         if ((dq->dq_first = tp->t_link) == NULL) {
 786                 ulong_t *dqactmap = dp->disp_qactmap;
 787 
 788                 ASSERT(dq->dq_sruncnt == 0);
 789                 dq->dq_last = NULL;
 790 
 791                 /*
 792                  * The queue is empty, so the corresponding bit needs to be
 793                  * turned off in dqactmap.   If nrunnable != 0 just took the
 794                  * last runnable thread off the
 795                  * highest queue, so recompute disp_maxrunpri.
 796                  */
 797                 maxrunword = pri >> BT_ULSHIFT;
 798                 dqactmap[maxrunword] &= ~BT_BIW(pri);
 799 
 800                 if (dp->disp_nrunnable == 0) {
 801                         dp->disp_max_unbound_pri = -1;
 802                         dp->disp_maxrunpri = -1;
 803                 } else {
 804                         int ipri;
 805 
 806                         ipri = bt_gethighbit(dqactmap, maxrunword);
 807                         dp->disp_maxrunpri = ipri;
 808                         if (ipri < dp->disp_max_unbound_pri)
 809                                 dp->disp_max_unbound_pri = ipri;
 810                 }
 811         } else {
 812                 tp->t_link = NULL;
 813         }
 814 






 815         cpup->cpu_dispthread = tp;           /* protected by spl only */
 816         cpup->cpu_dispatch_pri = pri;
 817         ASSERT(pri == DISP_PRIO(tp));
 818         thread_onproc(tp, cpup);                /* set t_state to TS_ONPROC */
 819         disp_lock_exit_high(&dp->disp_lock);     /* drop run queue lock */
 820 
 821         ASSERT(tp != NULL);
 822         TRACE_1(TR_FAC_DISP, TR_DISP_END,
 823             "disp_end:tid %p", tp);
 824 
 825         if (disp_ratify(tp, kpq) == NULL)
 826                 goto reschedule;
 827 
 828         return (tp);
 829 }
 830 
 831 /*
 832  * swtch()
 833  *      Find best runnable thread and run it.
 834  *      Called with the current thread already switched to a new state,


1165  * queue corresponding to its current priority.
1166  *
1167  * Called with the thread in transition, onproc or stopped state
1168  * and locked (transition implies locked) and at high spl.
1169  * Returns with the thread in TS_RUN state and still locked.
1170  */
1171 void
1172 setbackdq(kthread_t *tp)
1173 {
1174         dispq_t *dq;
1175         disp_t          *dp;
1176         cpu_t           *cp;
1177         pri_t           tpri;
1178         int             bound;
1179         boolean_t       self;
1180 
1181         ASSERT(THREAD_LOCK_HELD(tp));
1182         ASSERT((tp->t_schedflag & TS_ALLSTART) == 0);
1183         ASSERT(!thread_on_queue(tp));   /* make sure tp isn't on a runq */
1184 









1185         self = (tp == curthread);
1186 
1187         if (tp->t_bound_cpu || tp->t_weakbound_cpu)
1188                 bound = 1;
1189         else
1190                 bound = 0;
1191 
1192         tpri = DISP_PRIO(tp);
1193         if (ncpus == 1)
1194                 cp = tp->t_cpu;
1195         else if (!bound) {
1196                 if (tpri >= kpqpri) {
1197                         setkpdq(tp, SETKP_BACK);
1198                         return;
1199                 }
1200 
1201                 /*
1202                  * We'll generally let this thread continue to run where
1203                  * it last ran...but will consider migration if:
1204                  * - We thread probably doesn't have much cache warmth.


1345  * Put the specified thread on the front of the dispatcher
1346  * queue corresponding to its current priority.
1347  *
1348  * Called with the thread in transition, onproc or stopped state
1349  * and locked (transition implies locked) and at high spl.
1350  * Returns with the thread in TS_RUN state and still locked.
1351  */
1352 void
1353 setfrontdq(kthread_t *tp)
1354 {
1355         disp_t          *dp;
1356         dispq_t         *dq;
1357         cpu_t           *cp;
1358         pri_t           tpri;
1359         int             bound;
1360 
1361         ASSERT(THREAD_LOCK_HELD(tp));
1362         ASSERT((tp->t_schedflag & TS_ALLSTART) == 0);
1363         ASSERT(!thread_on_queue(tp));   /* make sure tp isn't on a runq */
1364 









1365         if (tp->t_bound_cpu || tp->t_weakbound_cpu)
1366                 bound = 1;
1367         else
1368                 bound = 0;
1369 
1370         tpri = DISP_PRIO(tp);
1371         if (ncpus == 1)
1372                 cp = tp->t_cpu;
1373         else if (!bound) {
1374                 if (tpri >= kpqpri) {
1375                         setkpdq(tp, SETKP_FRONT);
1376                         return;
1377                 }
1378                 cp = tp->t_cpu;
1379                 if (tp->t_cpupart == cp->cpu_part) {
1380                         /*
1381                          * We'll generally let this thread continue to run
1382                          * where it last ran, but will consider migration if:
1383                          * - The thread last ran outside it's home lgroup.
1384                          * - The CPU where it last ran is the target of an


1573 /*
1574  * Remove a thread from the dispatcher queue if it is on it.
1575  * It is not an error if it is not found but we return whether
1576  * or not it was found in case the caller wants to check.
1577  */
1578 int
1579 dispdeq(kthread_t *tp)
1580 {
1581         disp_t          *dp;
1582         dispq_t         *dq;
1583         kthread_t       *rp;
1584         kthread_t       *trp;
1585         kthread_t       **ptp;
1586         int             tpri;
1587 
1588         ASSERT(THREAD_LOCK_HELD(tp));
1589 
1590         if (tp->t_state != TS_RUN)
1591                 return (0);
1592 







1593         tpri = DISP_PRIO(tp);
1594         dp = tp->t_disp_queue;
1595         ASSERT(tpri < dp->disp_npri);
1596         dq = &dp->disp_q[tpri];
1597         ptp = &dq->dq_first;
1598         rp = *ptp;
1599         trp = NULL;
1600 
1601         ASSERT(dq->dq_last == NULL || dq->dq_last->t_link == NULL);
1602 
1603         /*
1604          * Search for thread in queue.
1605          * Double links would simplify this at the expense of disp/setrun.
1606          */
1607         while (rp != tp && rp != NULL) {
1608                 trp = rp;
1609                 ptp = &trp->t_link;
1610                 rp = trp->t_link;
1611         }
1612 


1626         if (--dq->dq_sruncnt == 0) {
1627                 dp->disp_qactmap[tpri >> BT_ULSHIFT] &= ~BT_BIW(tpri);
1628                 if (dp->disp_nrunnable == 0) {
1629                         dp->disp_max_unbound_pri = -1;
1630                         dp->disp_maxrunpri = -1;
1631                 } else if (tpri == dp->disp_maxrunpri) {
1632                         int ipri;
1633 
1634                         ipri = bt_gethighbit(dp->disp_qactmap,
1635                             dp->disp_maxrunpri >> BT_ULSHIFT);
1636                         if (ipri < dp->disp_max_unbound_pri)
1637                                 dp->disp_max_unbound_pri = ipri;
1638                         dp->disp_maxrunpri = ipri;
1639                 }
1640         }
1641         tp->t_link = NULL;
1642         THREAD_TRANSITION(tp);          /* put in intermediate state */
1643         return (1);
1644 }
1645 








































































































1646 /*
1647  *      Make a thread give up its processor.  Find the processor on
1648  *      which this thread is executing, and have that processor
1649  *      preempt.
1650  *
1651  *      We allow System Duty Cycle (SDC) threads to be preempted even if
1652  *      they are running at kernel priorities.  To implement this, we always
1653  *      set cpu_kprunrun; this ensures preempt() will be called.  Since SDC
1654  *      calls cpu_surrender() very often, we only preempt if there is anyone
1655  *      competing with us.
1656  */
1657 void
1658 cpu_surrender(kthread_t *tp)
1659 {
1660         cpu_t   *cpup;
1661         int     max_pri;
1662         int     max_run_pri;
1663         klwp_t  *lwp;
1664 
1665         ASSERT(THREAD_LOCK_HELD(tp));


1999          */
2000         if (dp->disp_max_unbound_pri != pri)
2001                 dp->disp_max_unbound_pri = pri;
2002 }
2003 
2004 /*
2005  * disp_adjust_unbound_pri() - thread is becoming unbound, so we should
2006  *      check if the CPU to which is was previously bound should have
2007  *      its disp_max_unbound_pri increased.
2008  */
2009 void
2010 disp_adjust_unbound_pri(kthread_t *tp)
2011 {
2012         disp_t *dp;
2013         pri_t tpri;
2014 
2015         ASSERT(THREAD_LOCK_HELD(tp));
2016 
2017         /*
2018          * Don't do anything if the thread is not bound, or
2019          * currently not runnable.
2020          */
2021         if (tp->t_bound_cpu == NULL ||
2022             tp->t_state != TS_RUN)

2023                 return;
2024 
2025         tpri = DISP_PRIO(tp);
2026         dp = tp->t_bound_cpu->cpu_disp;
2027         ASSERT(tpri >= 0 && tpri < dp->disp_npri);
2028         if (tpri > dp->disp_max_unbound_pri)
2029                 dp->disp_max_unbound_pri = tpri;
2030 }
2031 
2032 /*
2033  * disp_getbest()
2034  *   De-queue the highest priority unbound runnable thread.
2035  *   Returns with the thread unlocked and onproc but at splhigh (like disp()).
2036  *   Returns NULL if nothing found.
2037  *   Returns T_DONTSTEAL if the thread was not stealable.
2038  *   so that the caller will try again later.
2039  *
2040  *   Passed a pointer to a dispatch queue not associated with this CPU, and
2041  *   its type.
2042  */


2197          */
2198 
2199 #ifdef DEBUG
2200         {
2201                 int     thread_was_on_queue;
2202 
2203                 thread_was_on_queue = dispdeq(tp);      /* drops disp_lock */
2204                 ASSERT(thread_was_on_queue);
2205         }
2206 
2207 #else /* DEBUG */
2208         (void) dispdeq(tp);                     /* drops disp_lock */
2209 #endif /* DEBUG */
2210 
2211         /*
2212          * Reset the disp_queue steal time - we do not know what is the smallest
2213          * value across the queue is.
2214          */
2215         dp->disp_steal = 0;
2216 


2217         /*
2218          * Setup thread to run on the current CPU.
2219          */
2220         tp->t_disp_queue = cp->cpu_disp;
2221 
2222         cp->cpu_dispthread = tp;             /* protected by spl only */
2223         cp->cpu_dispatch_pri = pri;
2224 
2225         /*
2226          * There can be a memory synchronization race between disp_getbest()
2227          * and disp_ratify() vs cpu_resched() where cpu_resched() is trying
2228          * to preempt the current thread to run the enqueued thread while
2229          * disp_getbest() and disp_ratify() are changing the current thread
2230          * to the stolen thread. This may lead to a situation where
2231          * cpu_resched() tries to preempt the wrong thread and the
2232          * stolen thread continues to run on the CPU which has been tagged
2233          * for preemption.
2234          * Later the clock thread gets enqueued but doesn't get to run on the
2235          * CPU causing the system to hang.
2236          *