Print this page
patch delete-t_stime
patch remove-swapenq-flag
patch remove-dont-swap-flag
patch remove-swapinout-class-ops


 185 static int      ts_enterclass(kthread_t *, id_t, void *, cred_t *, void *);
 186 static int      ts_fork(kthread_t *, kthread_t *, void *);
 187 static int      ts_getclinfo(void *);
 188 static int      ts_getclpri(pcpri_t *);
 189 static int      ts_parmsin(void *);
 190 static int      ts_parmsout(void *, pc_vaparms_t *);
 191 static int      ts_vaparmsin(void *, pc_vaparms_t *);
 192 static int      ts_vaparmsout(void *, pc_vaparms_t *);
 193 static int      ts_parmsset(kthread_t *, void *, id_t, cred_t *);
 194 static void     ts_exit(kthread_t *);
 195 static int      ts_donice(kthread_t *, cred_t *, int, int *);
 196 static int      ts_doprio(kthread_t *, cred_t *, int, int *);
 197 static void     ts_exitclass(void *);
 198 static int      ts_canexit(kthread_t *, cred_t *);
 199 static void     ts_forkret(kthread_t *, kthread_t *);
 200 static void     ts_nullsys();
 201 static void     ts_parmsget(kthread_t *, void *);
 202 static void     ts_preempt(kthread_t *);
 203 static void     ts_setrun(kthread_t *);
 204 static void     ts_sleep(kthread_t *);
 205 static pri_t    ts_swapin(kthread_t *, int);
 206 static pri_t    ts_swapout(kthread_t *, int);
 207 static void     ts_tick(kthread_t *);
 208 static void     ts_trapret(kthread_t *);
 209 static void     ts_update(void *);
 210 static int      ts_update_list(int);
 211 static void     ts_wakeup(kthread_t *);
 212 static pri_t    ts_globpri(kthread_t *);
 213 static void     ts_yield(kthread_t *);
 214 extern tsdpent_t *ts_getdptbl(void);
 215 extern pri_t    *ts_getkmdpris(void);
 216 extern pri_t    td_getmaxumdpri(void);
 217 static int      ts_alloc(void **, int);
 218 static void     ts_free(void *);
 219 
 220 pri_t           ia_init(id_t, int, classfuncs_t **);
 221 static int      ia_getclinfo(void *);
 222 static int      ia_getclpri(pcpri_t *);
 223 static int      ia_parmsin(void *);
 224 static int      ia_vaparmsin(void *, pc_vaparms_t *);
 225 static int      ia_vaparmsout(void *, pc_vaparms_t *);
 226 static int      ia_parmsset(kthread_t *, void *, id_t, cred_t *);


 244         ts_parmsin,
 245         ts_parmsout,
 246         ts_vaparmsin,
 247         ts_vaparmsout,
 248         ts_getclpri,
 249         ts_alloc,
 250         ts_free,
 251 
 252         /* thread functions */
 253         ts_enterclass,
 254         ts_exitclass,
 255         ts_canexit,
 256         ts_fork,
 257         ts_forkret,
 258         ts_parmsget,
 259         ts_parmsset,
 260         ts_nullsys,     /* stop */
 261         ts_exit,
 262         ts_nullsys,     /* active */
 263         ts_nullsys,     /* inactive */
 264         ts_swapin,
 265         ts_swapout,
 266         ts_trapret,
 267         ts_preempt,
 268         ts_setrun,
 269         ts_sleep,
 270         ts_tick,
 271         ts_wakeup,
 272         ts_donice,
 273         ts_globpri,
 274         ts_nullsys,     /* set_process_group */
 275         ts_yield,
 276         ts_doprio,
 277 };
 278 
 279 /*
 280  * ia_classfuncs is used for interactive class threads; IA threads are stored
 281  * on the same class list as TS threads, and most of the class functions are
 282  * identical, but a few have different enough functionality to require their
 283  * own functions.
 284  */
 285 static struct classfuncs ia_classfuncs = {


 289         ia_parmsin,
 290         ts_parmsout,
 291         ia_vaparmsin,
 292         ia_vaparmsout,
 293         ia_getclpri,
 294         ts_alloc,
 295         ts_free,
 296 
 297         /* thread functions */
 298         ts_enterclass,
 299         ts_exitclass,
 300         ts_canexit,
 301         ts_fork,
 302         ts_forkret,
 303         ia_parmsget,
 304         ia_parmsset,
 305         ts_nullsys,     /* stop */
 306         ts_exit,
 307         ts_nullsys,     /* active */
 308         ts_nullsys,     /* inactive */
 309         ts_swapin,
 310         ts_swapout,
 311         ts_trapret,
 312         ts_preempt,
 313         ts_setrun,
 314         ts_sleep,
 315         ts_tick,
 316         ts_wakeup,
 317         ts_donice,
 318         ts_globpri,
 319         ia_set_process_group,
 320         ts_yield,
 321         ts_doprio,
 322 };
 323 
 324 
 325 /*
 326  * Time sharing class initialization.  Called by dispinit() at boot time.
 327  * We can ignore the clparmsz argument since we know that the smallest
 328  * possible parameter buffer is big enough for us.
 329  */
 330 /* ARGSUSED */


1387                 tspp->ts_flags |= TSKPRI;
1388                 THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
1389                 ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
1390                 t->t_trapret = 1;            /* so ts_trapret will run */
1391                 aston(t);
1392         }
1393 
1394         /*
1395          * This thread may be placed on wait queue by CPU Caps. In this case we
1396          * do not need to do anything until it is removed from the wait queue.
1397          * Do not enforce CPU caps on threads running at a kernel priority
1398          */
1399         if (CPUCAPS_ON()) {
1400                 (void) cpucaps_charge(t, &tspp->ts_caps,
1401                     CPUCAPS_CHARGE_ENFORCE);
1402                 if (!(tspp->ts_flags & TSKPRI) && CPUCAPS_ENFORCE(t))
1403                         return;
1404         }
1405 
1406         /*
1407          * If thread got preempted in the user-land then we know
1408          * it isn't holding any locks.  Mark it as swappable.
1409          */
1410         ASSERT(t->t_schedflag & TS_DONT_SWAP);
1411         if (lwp != NULL && lwp->lwp_state == LWP_USER)
1412                 t->t_schedflag &= ~TS_DONT_SWAP;
1413 
1414         /*
1415          * Check to see if we're doing "preemption control" here.  If
1416          * we are, and if the user has requested that this thread not
1417          * be preempted, and if preemptions haven't been put off for
1418          * too long, let the preemption happen here but try to make
1419          * sure the thread is rescheduled as soon as possible.  We do
1420          * this by putting it on the front of the highest priority run
1421          * queue in the TS class.  If the preemption has been put off
1422          * for too long, clear the "nopreempt" bit and let the thread
1423          * be preempted.
1424          */
1425         if (t->t_schedctl && schedctl_get_nopreempt(t)) {
1426                 if (tspp->ts_timeleft > -SC_MAX_TICKS) {
1427                         DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
1428                         if (!(tspp->ts_flags & TSKPRI)) {
1429                                 /*
1430                                  * If not already remembered, remember current
1431                                  * priority for restoration in ts_yield().
1432                                  */
1433                                 if (!(tspp->ts_flags & TSRESTORE)) {
1434                                         tspp->ts_scpri = t->t_pri;
1435                                         tspp->ts_flags |= TSRESTORE;
1436                                 }
1437                                 THREAD_CHANGE_PRI(t, ts_maxumdpri);
1438                                 t->t_schedflag |= TS_DONT_SWAP;
1439                         }
1440                         schedctl_set_yield(t, 1);
1441                         setfrontdq(t);
1442                         goto done;
1443                 } else {
1444                         if (tspp->ts_flags & TSRESTORE) {
1445                                 THREAD_CHANGE_PRI(t, tspp->ts_scpri);
1446                                 tspp->ts_flags &= ~TSRESTORE;
1447                         }
1448                         schedctl_set_nopreempt(t, 0);
1449                         DTRACE_SCHED1(schedctl__preempt, kthread_t *, t);
1450                         TNF_PROBE_2(schedctl_preempt, "schedctl TS ts_preempt",
1451                             /* CSTYLED */, tnf_pid, pid, ttoproc(t)->p_pid,
1452                             tnf_lwpid, lwpid, t->t_tid);
1453                         /*
1454                          * Fall through and be preempted below.
1455                          */
1456                 }
1457         }
1458 


1545                 tspp->ts_dispwait = 0;
1546 
1547                 THREAD_CHANGE_PRI(curthread,
1548                     ts_dptbl[tspp->ts_umdpri].ts_globpri);
1549                 ASSERT(curthread->t_pri >= 0 &&
1550                     curthread->t_pri <= ts_maxglobpri);
1551                 tspp->ts_flags = flags & ~TSKPRI;
1552 
1553                 if (DISP_MUST_SURRENDER(curthread))
1554                         cpu_surrender(curthread);
1555         } else if (flags & TSKPRI) {
1556                 THREAD_CHANGE_PRI(curthread,
1557                     ts_dptbl[tspp->ts_umdpri].ts_globpri);
1558                 ASSERT(curthread->t_pri >= 0 &&
1559                     curthread->t_pri <= ts_maxglobpri);
1560                 tspp->ts_flags = flags & ~TSKPRI;
1561 
1562                 if (DISP_MUST_SURRENDER(curthread))
1563                         cpu_surrender(curthread);
1564         }
1565         t->t_stime = ddi_get_lbolt();                /* time stamp for the swapper */
1566         TRACE_2(TR_FAC_DISP, TR_SLEEP,
1567             "sleep:tid %p old pri %d", t, old_pri);
1568 }
1569 
1570 
1571 /*
1572  * Return Values:
1573  *
1574  *      -1 if the thread is loaded or is not eligible to be swapped in.
1575  *
1576  *      effective priority of the specified thread based on swapout time
1577  *              and size of process (epri >= 0 , epri <= SHRT_MAX).
1578  */
1579 /* ARGSUSED */
1580 static pri_t
1581 ts_swapin(kthread_t *t, int flags)
1582 {
1583         tsproc_t        *tspp = (tsproc_t *)(t->t_cldata);
1584         long            epri = -1;
1585         proc_t          *pp = ttoproc(t);
1586 
1587         ASSERT(THREAD_LOCK_HELD(t));
1588 
1589         /*
1590          * We know that pri_t is a short.
1591          * Be sure not to overrun its range.
1592          */
1593         if (t->t_state == TS_RUN && (t->t_schedflag & TS_LOAD) == 0) {
1594                 time_t swapout_time;
1595 
1596                 swapout_time = (ddi_get_lbolt() - t->t_stime) / hz;
1597                 if (INHERITED(t) || (tspp->ts_flags & (TSKPRI | TSIASET)))
1598                         epri = (long)DISP_PRIO(t) + swapout_time;
1599                 else {
1600                         /*
1601                          * Threads which have been out for a long time,
1602                          * have high user mode priority and are associated
1603                          * with a small address space are more deserving
1604                          */
1605                         epri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
1606                         ASSERT(epri >= 0 && epri <= ts_maxumdpri);
1607                         epri += swapout_time - pp->p_swrss / nz(maxpgio)/2;
1608                 }
1609                 /*
1610                  * Scale epri so SHRT_MAX/2 represents zero priority.
1611                  */
1612                 epri += SHRT_MAX/2;
1613                 if (epri < 0)
1614                         epri = 0;
1615                 else if (epri > SHRT_MAX)
1616                         epri = SHRT_MAX;
1617         }
1618         return ((pri_t)epri);
1619 }
1620 
1621 /*
1622  * Return Values
1623  *      -1 if the thread isn't loaded or is not eligible to be swapped out.
1624  *
1625  *      effective priority of the specified thread based on if the swapper
1626  *              is in softswap or hardswap mode.
1627  *
1628  *              Softswap:  Return a low effective priority for threads
1629  *                         sleeping for more than maxslp secs.
1630  *
1631  *              Hardswap:  Return an effective priority such that threads
1632  *                         which have been in memory for a while and are
1633  *                         associated with a small address space are swapped
1634  *                         in before others.
1635  *
1636  *              (epri >= 0 , epri <= SHRT_MAX).
1637  */
1638 time_t  ts_minrun = 2;          /* XXX - t_pri becomes 59 within 2 secs */
1639 time_t  ts_minslp = 2;          /* min time on sleep queue for hardswap */
1640 
1641 static pri_t
1642 ts_swapout(kthread_t *t, int flags)
1643 {
1644         tsproc_t        *tspp = (tsproc_t *)(t->t_cldata);
1645         long            epri = -1;
1646         proc_t          *pp = ttoproc(t);
1647         time_t          swapin_time;
1648 
1649         ASSERT(THREAD_LOCK_HELD(t));
1650 
1651         if (INHERITED(t) || (tspp->ts_flags & (TSKPRI | TSIASET)) ||
1652             (t->t_proc_flag & TP_LWPEXIT) ||
1653             (t->t_state & (TS_ZOMB | TS_FREE | TS_STOPPED |
1654             TS_ONPROC | TS_WAIT)) ||
1655             !(t->t_schedflag & TS_LOAD) || !SWAP_OK(t))
1656                 return (-1);
1657 
1658         ASSERT(t->t_state & (TS_SLEEP | TS_RUN));
1659 
1660         /*
1661          * We know that pri_t is a short.
1662          * Be sure not to overrun its range.
1663          */
1664         swapin_time = (ddi_get_lbolt() - t->t_stime) / hz;
1665         if (flags == SOFTSWAP) {
1666                 if (t->t_state == TS_SLEEP && swapin_time > maxslp) {
1667                         epri = 0;
1668                 } else {
1669                         return ((pri_t)epri);
1670                 }
1671         } else {
1672                 pri_t pri;
1673 
1674                 if ((t->t_state == TS_SLEEP && swapin_time > ts_minslp) ||
1675                     (t->t_state == TS_RUN && swapin_time > ts_minrun)) {
1676                         pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
1677                         ASSERT(pri >= 0 && pri <= ts_maxumdpri);
1678                         epri = swapin_time -
1679                             (rm_asrss(pp->p_as) / nz(maxpgio)/2) - (long)pri;
1680                 } else {
1681                         return ((pri_t)epri);
1682                 }
1683         }
1684 
1685         /*
1686          * Scale epri so SHRT_MAX/2 represents zero priority.
1687          */
1688         epri += SHRT_MAX/2;
1689         if (epri < 0)
1690                 epri = 0;
1691         else if (epri > SHRT_MAX)
1692                 epri = SHRT_MAX;
1693 
1694         return ((pri_t)epri);
1695 }
1696 
1697 /*
1698  * Check for time slice expiration.  If time slice has expired
1699  * move thread to priority specified in tsdptbl for time slice expiration
1700  * and set runrun to cause preemption.
1701  */
1702 static void
1703 ts_tick(kthread_t *t)
1704 {
1705         tsproc_t *tspp = (tsproc_t *)(t->t_cldata);
1706         klwp_t *lwp;
1707         boolean_t call_cpu_surrender = B_FALSE;
1708         pri_t   oldpri = t->t_pri;
1709 
1710         ASSERT(MUTEX_HELD(&(ttoproc(t))->p_lock));
1711 
1712         thread_lock(t);
1713 
1714         /*
1715          * Keep track of thread's project CPU usage.  Note that projects
1716          * get charged even when threads are running in the kernel.


1741 
1742                                 TNF_PROBE_2(schedctl_failsafe,
1743                                     "schedctl TS ts_tick", /* CSTYLED */,
1744                                     tnf_pid, pid, ttoproc(t)->p_pid,
1745                                     tnf_lwpid, lwpid, t->t_tid);
1746                         }
1747                         tspp->ts_flags &= ~TSRESTORE;
1748                         tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_tqexp;
1749                         TS_NEWUMDPRI(tspp);
1750                         tspp->ts_dispwait = 0;
1751                         new_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
1752                         ASSERT(new_pri >= 0 && new_pri <= ts_maxglobpri);
1753                         /*
1754                          * When the priority of a thread is changed,
1755                          * it may be necessary to adjust its position
1756                          * on a sleep queue or dispatch queue.
1757                          * The function thread_change_pri accomplishes
1758                          * this.
1759                          */
1760                         if (thread_change_pri(t, new_pri, 0)) {
1761                                 if ((t->t_schedflag & TS_LOAD) &&
1762                                     (lwp = t->t_lwp) &&
1763                                     lwp->lwp_state == LWP_USER)
1764                                         t->t_schedflag &= ~TS_DONT_SWAP;
1765                                 tspp->ts_timeleft =
1766                                     ts_dptbl[tspp->ts_cpupri].ts_quantum;
1767                         } else {
1768                                 call_cpu_surrender = B_TRUE;
1769                         }
1770                         TRACE_2(TR_FAC_DISP, TR_TICK,
1771                             "tick:tid %p old pri %d", t, oldpri);
1772                 } else if (t->t_state == TS_ONPROC &&
1773                     t->t_pri < t->t_disp_queue->disp_maxrunpri) {
1774                         call_cpu_surrender = B_TRUE;
1775                 }
1776         }
1777 
1778         if (call_cpu_surrender) {
1779                 tspp->ts_flags |= TSBACKQ;
1780                 cpu_surrender(t);
1781         }
1782 
1783         thread_unlock_nopreempt(t);     /* clock thread can't be preempted */
1784 }


1818                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1819                 ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
1820                 tspp->ts_flags &= ~TSKPRI;
1821 
1822                 if (DISP_MUST_SURRENDER(t))
1823                         cpu_surrender(t);
1824         } else if (tspp->ts_flags & TSKPRI) {
1825                 /*
1826                  * If thread has blocked in the kernel (as opposed to
1827                  * being merely preempted), recompute the user mode priority.
1828                  */
1829                 THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
1830                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1831                 ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
1832                 tspp->ts_flags &= ~TSKPRI;
1833 
1834                 if (DISP_MUST_SURRENDER(t))
1835                         cpu_surrender(t);
1836         }
1837 
1838         /*
1839          * Swapout lwp if the swapper is waiting for this thread to
1840          * reach a safe point.
1841          */
1842         if ((t->t_schedflag & TS_SWAPENQ) && !(tspp->ts_flags & TSIASET)) {
1843                 thread_unlock(t);
1844                 swapout_lwp(ttolwp(t));
1845                 thread_lock(t);
1846         }
1847 
1848         TRACE_2(TR_FAC_DISP, TR_TRAPRET,
1849             "trapret:tid %p old pri %d", t, old_pri);
1850 }
1851 
1852 
1853 /*
1854  * Update the ts_dispwait values of all time sharing threads that
1855  * are currently runnable at a user mode priority and bump the priority
1856  * if ts_dispwait exceeds ts_maxwait.  Called once per second via
1857  * timeout which we reset here.
1858  *
1859  * There are several lists of time sharing threads broken up by a hash on
1860  * the thread pointer.  Each list has its own lock.  This avoids blocking
1861  * all ts_enterclass, ts_fork, and ts_exitclass operations while ts_update
1862  * runs.  ts_update traverses each list in turn.
1863  *
1864  * If multiple threads have their priorities updated to the same value,
1865  * the system implicitly favors the one that is updated first (since it
1866  * winds up first on the run queue).  To avoid this unfairness, the
1867  * traversal of threads starts at the list indicated by a marker.  When


1965         mutex_exit(&ts_list_lock[i]);
1966 
1967         return (updated);
1968 }
1969 
1970 /*
1971  * Processes waking up go to the back of their queue.  We don't
1972  * need to assign a time quantum here because thread is still
1973  * at a kernel mode priority and the time slicing is not done
1974  * for threads running in the kernel after sleeping.  The proper
1975  * time quantum will be assigned by ts_trapret before the thread
1976  * returns to user mode.
1977  */
1978 static void
1979 ts_wakeup(kthread_t *t)
1980 {
1981         tsproc_t        *tspp = (tsproc_t *)(t->t_cldata);
1982 
1983         ASSERT(THREAD_LOCK_HELD(t));
1984 
1985         t->t_stime = ddi_get_lbolt();                /* time stamp for the swapper */
1986 
1987         if (tspp->ts_flags & TSKPRI) {
1988                 tspp->ts_flags &= ~TSBACKQ;
1989                 if (tspp->ts_flags & TSIASET)
1990                         setfrontdq(t);
1991                 else
1992                         setbackdq(t);
1993         } else if (t->t_kpri_req) {
1994                 /*
1995                  * Give thread a priority boost if we were asked.
1996                  */
1997                 tspp->ts_flags |= TSKPRI;
1998                 THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
1999                 setbackdq(t);
2000                 t->t_trapret = 1;    /* so that ts_trapret will run */
2001                 aston(t);
2002         } else {
2003                 if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
2004                         tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
2005                         TS_NEWUMDPRI(tspp);
2006                         tspp->ts_timeleft =




 185 static int      ts_enterclass(kthread_t *, id_t, void *, cred_t *, void *);
 186 static int      ts_fork(kthread_t *, kthread_t *, void *);
 187 static int      ts_getclinfo(void *);
 188 static int      ts_getclpri(pcpri_t *);
 189 static int      ts_parmsin(void *);
 190 static int      ts_parmsout(void *, pc_vaparms_t *);
 191 static int      ts_vaparmsin(void *, pc_vaparms_t *);
 192 static int      ts_vaparmsout(void *, pc_vaparms_t *);
 193 static int      ts_parmsset(kthread_t *, void *, id_t, cred_t *);
 194 static void     ts_exit(kthread_t *);
 195 static int      ts_donice(kthread_t *, cred_t *, int, int *);
 196 static int      ts_doprio(kthread_t *, cred_t *, int, int *);
 197 static void     ts_exitclass(void *);
 198 static int      ts_canexit(kthread_t *, cred_t *);
 199 static void     ts_forkret(kthread_t *, kthread_t *);
 200 static void     ts_nullsys();
 201 static void     ts_parmsget(kthread_t *, void *);
 202 static void     ts_preempt(kthread_t *);
 203 static void     ts_setrun(kthread_t *);
 204 static void     ts_sleep(kthread_t *);


 205 static void     ts_tick(kthread_t *);
 206 static void     ts_trapret(kthread_t *);
 207 static void     ts_update(void *);
 208 static int      ts_update_list(int);
 209 static void     ts_wakeup(kthread_t *);
 210 static pri_t    ts_globpri(kthread_t *);
 211 static void     ts_yield(kthread_t *);
 212 extern tsdpent_t *ts_getdptbl(void);
 213 extern pri_t    *ts_getkmdpris(void);
 214 extern pri_t    td_getmaxumdpri(void);
 215 static int      ts_alloc(void **, int);
 216 static void     ts_free(void *);
 217 
 218 pri_t           ia_init(id_t, int, classfuncs_t **);
 219 static int      ia_getclinfo(void *);
 220 static int      ia_getclpri(pcpri_t *);
 221 static int      ia_parmsin(void *);
 222 static int      ia_vaparmsin(void *, pc_vaparms_t *);
 223 static int      ia_vaparmsout(void *, pc_vaparms_t *);
 224 static int      ia_parmsset(kthread_t *, void *, id_t, cred_t *);


 242         ts_parmsin,
 243         ts_parmsout,
 244         ts_vaparmsin,
 245         ts_vaparmsout,
 246         ts_getclpri,
 247         ts_alloc,
 248         ts_free,
 249 
 250         /* thread functions */
 251         ts_enterclass,
 252         ts_exitclass,
 253         ts_canexit,
 254         ts_fork,
 255         ts_forkret,
 256         ts_parmsget,
 257         ts_parmsset,
 258         ts_nullsys,     /* stop */
 259         ts_exit,
 260         ts_nullsys,     /* active */
 261         ts_nullsys,     /* inactive */


 262         ts_trapret,
 263         ts_preempt,
 264         ts_setrun,
 265         ts_sleep,
 266         ts_tick,
 267         ts_wakeup,
 268         ts_donice,
 269         ts_globpri,
 270         ts_nullsys,     /* set_process_group */
 271         ts_yield,
 272         ts_doprio,
 273 };
 274 
 275 /*
 276  * ia_classfuncs is used for interactive class threads; IA threads are stored
 277  * on the same class list as TS threads, and most of the class functions are
 278  * identical, but a few have different enough functionality to require their
 279  * own functions.
 280  */
 281 static struct classfuncs ia_classfuncs = {


 285         ia_parmsin,
 286         ts_parmsout,
 287         ia_vaparmsin,
 288         ia_vaparmsout,
 289         ia_getclpri,
 290         ts_alloc,
 291         ts_free,
 292 
 293         /* thread functions */
 294         ts_enterclass,
 295         ts_exitclass,
 296         ts_canexit,
 297         ts_fork,
 298         ts_forkret,
 299         ia_parmsget,
 300         ia_parmsset,
 301         ts_nullsys,     /* stop */
 302         ts_exit,
 303         ts_nullsys,     /* active */
 304         ts_nullsys,     /* inactive */


 305         ts_trapret,
 306         ts_preempt,
 307         ts_setrun,
 308         ts_sleep,
 309         ts_tick,
 310         ts_wakeup,
 311         ts_donice,
 312         ts_globpri,
 313         ia_set_process_group,
 314         ts_yield,
 315         ts_doprio,
 316 };
 317 
 318 
 319 /*
 320  * Time sharing class initialization.  Called by dispinit() at boot time.
 321  * We can ignore the clparmsz argument since we know that the smallest
 322  * possible parameter buffer is big enough for us.
 323  */
 324 /* ARGSUSED */


1381                 tspp->ts_flags |= TSKPRI;
1382                 THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
1383                 ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
1384                 t->t_trapret = 1;            /* so ts_trapret will run */
1385                 aston(t);
1386         }
1387 
1388         /*
1389          * This thread may be placed on wait queue by CPU Caps. In this case we
1390          * do not need to do anything until it is removed from the wait queue.
1391          * Do not enforce CPU caps on threads running at a kernel priority
1392          */
1393         if (CPUCAPS_ON()) {
1394                 (void) cpucaps_charge(t, &tspp->ts_caps,
1395                     CPUCAPS_CHARGE_ENFORCE);
1396                 if (!(tspp->ts_flags & TSKPRI) && CPUCAPS_ENFORCE(t))
1397                         return;
1398         }
1399 
1400         /*








1401          * Check to see if we're doing "preemption control" here.  If
1402          * we are, and if the user has requested that this thread not
1403          * be preempted, and if preemptions haven't been put off for
1404          * too long, let the preemption happen here but try to make
1405          * sure the thread is rescheduled as soon as possible.  We do
1406          * this by putting it on the front of the highest priority run
1407          * queue in the TS class.  If the preemption has been put off
1408          * for too long, clear the "nopreempt" bit and let the thread
1409          * be preempted.
1410          */
1411         if (t->t_schedctl && schedctl_get_nopreempt(t)) {
1412                 if (tspp->ts_timeleft > -SC_MAX_TICKS) {
1413                         DTRACE_SCHED1(schedctl__nopreempt, kthread_t *, t);
1414                         if (!(tspp->ts_flags & TSKPRI)) {
1415                                 /*
1416                                  * If not already remembered, remember current
1417                                  * priority for restoration in ts_yield().
1418                                  */
1419                                 if (!(tspp->ts_flags & TSRESTORE)) {
1420                                         tspp->ts_scpri = t->t_pri;
1421                                         tspp->ts_flags |= TSRESTORE;
1422                                 }
1423                                 THREAD_CHANGE_PRI(t, ts_maxumdpri);

1424                         }
1425                         schedctl_set_yield(t, 1);
1426                         setfrontdq(t);
1427                         goto done;
1428                 } else {
1429                         if (tspp->ts_flags & TSRESTORE) {
1430                                 THREAD_CHANGE_PRI(t, tspp->ts_scpri);
1431                                 tspp->ts_flags &= ~TSRESTORE;
1432                         }
1433                         schedctl_set_nopreempt(t, 0);
1434                         DTRACE_SCHED1(schedctl__preempt, kthread_t *, t);
1435                         TNF_PROBE_2(schedctl_preempt, "schedctl TS ts_preempt",
1436                             /* CSTYLED */, tnf_pid, pid, ttoproc(t)->p_pid,
1437                             tnf_lwpid, lwpid, t->t_tid);
1438                         /*
1439                          * Fall through and be preempted below.
1440                          */
1441                 }
1442         }
1443 


1530                 tspp->ts_dispwait = 0;
1531 
1532                 THREAD_CHANGE_PRI(curthread,
1533                     ts_dptbl[tspp->ts_umdpri].ts_globpri);
1534                 ASSERT(curthread->t_pri >= 0 &&
1535                     curthread->t_pri <= ts_maxglobpri);
1536                 tspp->ts_flags = flags & ~TSKPRI;
1537 
1538                 if (DISP_MUST_SURRENDER(curthread))
1539                         cpu_surrender(curthread);
1540         } else if (flags & TSKPRI) {
1541                 THREAD_CHANGE_PRI(curthread,
1542                     ts_dptbl[tspp->ts_umdpri].ts_globpri);
1543                 ASSERT(curthread->t_pri >= 0 &&
1544                     curthread->t_pri <= ts_maxglobpri);
1545                 tspp->ts_flags = flags & ~TSKPRI;
1546 
1547                 if (DISP_MUST_SURRENDER(curthread))
1548                         cpu_surrender(curthread);
1549         }

1550         TRACE_2(TR_FAC_DISP, TR_SLEEP,
1551             "sleep:tid %p old pri %d", t, old_pri);
1552 }
1553 































































































































1554 /*
1555  * Check for time slice expiration.  If time slice has expired
1556  * move thread to priority specified in tsdptbl for time slice expiration
1557  * and set runrun to cause preemption.
1558  */
1559 static void
1560 ts_tick(kthread_t *t)
1561 {
1562         tsproc_t *tspp = (tsproc_t *)(t->t_cldata);
1563         klwp_t *lwp;
1564         boolean_t call_cpu_surrender = B_FALSE;
1565         pri_t   oldpri = t->t_pri;
1566 
1567         ASSERT(MUTEX_HELD(&(ttoproc(t))->p_lock));
1568 
1569         thread_lock(t);
1570 
1571         /*
1572          * Keep track of thread's project CPU usage.  Note that projects
1573          * get charged even when threads are running in the kernel.


1598 
1599                                 TNF_PROBE_2(schedctl_failsafe,
1600                                     "schedctl TS ts_tick", /* CSTYLED */,
1601                                     tnf_pid, pid, ttoproc(t)->p_pid,
1602                                     tnf_lwpid, lwpid, t->t_tid);
1603                         }
1604                         tspp->ts_flags &= ~TSRESTORE;
1605                         tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_tqexp;
1606                         TS_NEWUMDPRI(tspp);
1607                         tspp->ts_dispwait = 0;
1608                         new_pri = ts_dptbl[tspp->ts_umdpri].ts_globpri;
1609                         ASSERT(new_pri >= 0 && new_pri <= ts_maxglobpri);
1610                         /*
1611                          * When the priority of a thread is changed,
1612                          * it may be necessary to adjust its position
1613                          * on a sleep queue or dispatch queue.
1614                          * The function thread_change_pri accomplishes
1615                          * this.
1616                          */
1617                         if (thread_change_pri(t, new_pri, 0)) {




1618                                 tspp->ts_timeleft =
1619                                     ts_dptbl[tspp->ts_cpupri].ts_quantum;
1620                         } else {
1621                                 call_cpu_surrender = B_TRUE;
1622                         }
1623                         TRACE_2(TR_FAC_DISP, TR_TICK,
1624                             "tick:tid %p old pri %d", t, oldpri);
1625                 } else if (t->t_state == TS_ONPROC &&
1626                     t->t_pri < t->t_disp_queue->disp_maxrunpri) {
1627                         call_cpu_surrender = B_TRUE;
1628                 }
1629         }
1630 
1631         if (call_cpu_surrender) {
1632                 tspp->ts_flags |= TSBACKQ;
1633                 cpu_surrender(t);
1634         }
1635 
1636         thread_unlock_nopreempt(t);     /* clock thread can't be preempted */
1637 }


1671                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1672                 ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
1673                 tspp->ts_flags &= ~TSKPRI;
1674 
1675                 if (DISP_MUST_SURRENDER(t))
1676                         cpu_surrender(t);
1677         } else if (tspp->ts_flags & TSKPRI) {
1678                 /*
1679                  * If thread has blocked in the kernel (as opposed to
1680                  * being merely preempted), recompute the user mode priority.
1681                  */
1682                 THREAD_CHANGE_PRI(t, ts_dptbl[tspp->ts_umdpri].ts_globpri);
1683                 cp->cpu_dispatch_pri = DISP_PRIO(t);
1684                 ASSERT(t->t_pri >= 0 && t->t_pri <= ts_maxglobpri);
1685                 tspp->ts_flags &= ~TSKPRI;
1686 
1687                 if (DISP_MUST_SURRENDER(t))
1688                         cpu_surrender(t);
1689         }
1690 










1691         TRACE_2(TR_FAC_DISP, TR_TRAPRET,
1692             "trapret:tid %p old pri %d", t, old_pri);
1693 }
1694 
1695 
1696 /*
1697  * Update the ts_dispwait values of all time sharing threads that
1698  * are currently runnable at a user mode priority and bump the priority
1699  * if ts_dispwait exceeds ts_maxwait.  Called once per second via
1700  * timeout which we reset here.
1701  *
1702  * There are several lists of time sharing threads broken up by a hash on
1703  * the thread pointer.  Each list has its own lock.  This avoids blocking
1704  * all ts_enterclass, ts_fork, and ts_exitclass operations while ts_update
1705  * runs.  ts_update traverses each list in turn.
1706  *
1707  * If multiple threads have their priorities updated to the same value,
1708  * the system implicitly favors the one that is updated first (since it
1709  * winds up first on the run queue).  To avoid this unfairness, the
1710  * traversal of threads starts at the list indicated by a marker.  When


1808         mutex_exit(&ts_list_lock[i]);
1809 
1810         return (updated);
1811 }
1812 
1813 /*
1814  * Processes waking up go to the back of their queue.  We don't
1815  * need to assign a time quantum here because thread is still
1816  * at a kernel mode priority and the time slicing is not done
1817  * for threads running in the kernel after sleeping.  The proper
1818  * time quantum will be assigned by ts_trapret before the thread
1819  * returns to user mode.
1820  */
1821 static void
1822 ts_wakeup(kthread_t *t)
1823 {
1824         tsproc_t        *tspp = (tsproc_t *)(t->t_cldata);
1825 
1826         ASSERT(THREAD_LOCK_HELD(t));
1827 


1828         if (tspp->ts_flags & TSKPRI) {
1829                 tspp->ts_flags &= ~TSBACKQ;
1830                 if (tspp->ts_flags & TSIASET)
1831                         setfrontdq(t);
1832                 else
1833                         setbackdq(t);
1834         } else if (t->t_kpri_req) {
1835                 /*
1836                  * Give thread a priority boost if we were asked.
1837                  */
1838                 tspp->ts_flags |= TSKPRI;
1839                 THREAD_CHANGE_PRI(t, ts_kmdpris[0]);
1840                 setbackdq(t);
1841                 t->t_trapret = 1;    /* so that ts_trapret will run */
1842                 aston(t);
1843         } else {
1844                 if (tspp->ts_dispwait > ts_dptbl[tspp->ts_umdpri].ts_maxwait) {
1845                         tspp->ts_cpupri = ts_dptbl[tspp->ts_cpupri].ts_slpret;
1846                         TS_NEWUMDPRI(tspp);
1847                         tspp->ts_timeleft =