Print this page
patch setfrontbackdq
@@ -1167,26 +1167,27 @@
* some affinity for t->t_cpu.
*/
#define THREAD_HAS_CACHE_WARMTH(thread) \
((thread == curthread) || \
((ddi_get_lbolt() - thread->t_disp_time) <= rechoose_interval))
+
/*
- * Put the specified thread on the back of the dispatcher
- * queue corresponding to its current priority.
+ * Put the specified thread on the front/back of the dispatcher queue
+ * corresponding to its current priority.
*
- * Called with the thread in transition, onproc or stopped state
- * and locked (transition implies locked) and at high spl.
- * Returns with the thread in TS_RUN state and still locked.
+ * Called with the thread in transition, onproc or stopped state and locked
+ * (transition implies locked) and at high spl. Returns with the thread in
+ * TS_RUN state and still locked.
*/
-void
-setbackdq(kthread_t *tp)
+static void
+setfrontbackdq(kthread_t *tp, boolean_t front)
{
dispq_t *dq;
disp_t *dp;
cpu_t *cp;
pri_t tpri;
- int bound;
+ boolean_t bound;
boolean_t self;
ASSERT(THREAD_LOCK_HELD(tp));
ASSERT((tp->t_schedflag & TS_ALLSTART) == 0);
ASSERT(!thread_on_queue(tp)); /* make sure tp isn't on a runq */
@@ -1199,44 +1200,64 @@
disp_swapped_setrun(tp);
return;
}
self = (tp == curthread);
-
- if (tp->t_bound_cpu || tp->t_weakbound_cpu)
- bound = 1;
- else
- bound = 0;
+ bound = (tp->t_bound_cpu || tp->t_weakbound_cpu);
tpri = DISP_PRIO(tp);
if (ncpus == 1)
cp = tp->t_cpu;
else if (!bound) {
if (tpri >= kpqpri) {
- setkpdq(tp, SETKP_BACK);
+ setkpdq(tp, front ? SETKP_FRONT : SETKP_BACK);
return;
}
+ cp = tp->t_cpu;
+
+ if (!front) {
/*
* We'll generally let this thread continue to run where
* it last ran...but will consider migration if:
* - We thread probably doesn't have much cache warmth.
* - The CPU where it last ran is the target of an offline
* request.
* - The thread last ran outside it's home lgroup.
*/
- if ((!THREAD_HAS_CACHE_WARMTH(tp)) ||
- (tp->t_cpu == cpu_inmotion)) {
- cp = disp_lowpri_cpu(tp->t_cpu, tp->t_lpl, tpri, NULL);
- } else if (!LGRP_CONTAINS_CPU(tp->t_lpl->lpl_lgrp, tp->t_cpu)) {
- cp = disp_lowpri_cpu(tp->t_cpu, tp->t_lpl, tpri,
- self ? tp->t_cpu : NULL);
- } else {
- cp = tp->t_cpu;
+ if ((!THREAD_HAS_CACHE_WARMTH(tp)) || (cp == cpu_inmotion)) {
+ cp = disp_lowpri_cpu(cp, tp->t_lpl, tpri, NULL);
+ } else if (!LGRP_CONTAINS_CPU(tp->t_lpl->lpl_lgrp, cp)) {
+ cp = disp_lowpri_cpu(cp, tp->t_lpl, tpri,
+ self ? cp : NULL);
+ }
+
}
if (tp->t_cpupart == cp->cpu_part) {
+ if (front) {
+ /*
+ * We'll generally let this thread continue to run
+ * where it last ran, but will consider migration if:
+ * - The thread last ran outside it's home lgroup.
+ * - The CPU where it last ran is the target of an
+ * offline request (a thread_nomigrate() on the in
+ * motion CPU relies on this when forcing a preempt).
+ * - The thread isn't the highest priority thread where
+ * it last ran, and it is considered not likely to
+ * have significant cache warmth.
+ */
+ if ((!LGRP_CONTAINS_CPU(tp->t_lpl->lpl_lgrp, cp)) ||
+ (cp == cpu_inmotion)) {
+ cp = disp_lowpri_cpu(cp, tp->t_lpl, tpri,
+ self ? cp : NULL);
+ } else if ((tpri < cp->cpu_disp->disp_maxrunpri) &&
+ (!THREAD_HAS_CACHE_WARMTH(tp))) {
+ cp = disp_lowpri_cpu(cp, tp->t_lpl, tpri,
+ NULL);
+ }
+ } else {
int qlen;
/*
* Perform any CMT load balancing
*/
@@ -1263,17 +1284,19 @@
kthread_t *, tp,
cpu_t *, cp, cpu_t *, newcp);
cp = newcp;
}
}
+ }
} else {
/*
* Migrate to a cpu in the new partition.
*/
cp = disp_lowpri_cpu(tp->t_cpupart->cp_cpulist,
tp->t_lpl, tp->t_pri, NULL);
}
+
ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0);
} else {
/*
* It is possible that t_weakbound_cpu != t_bound_cpu (for
* a short time until weak binding that existed when the
@@ -1281,10 +1304,11 @@
* favour weak binding over strong.
*/
cp = tp->t_weakbound_cpu ?
tp->t_weakbound_cpu : tp->t_bound_cpu;
}
+
/*
* A thread that is ONPROC may be temporarily placed on the run queue
* but then chosen to run again by disp. If the thread we're placing on
* the queue is in TS_ONPROC state, don't set its t_waitrq until a
* replacement process is actually scheduled in swtch(). In this
@@ -1302,13 +1326,18 @@
}
dp = cp->cpu_disp;
disp_lock_enter_high(&dp->disp_lock);
- DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, 0);
+ DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, front);
+ if (front) {
+ TRACE_2(TR_FAC_DISP, TR_FRONTQ, "frontq:pri %d tid %p", tpri,
+ tp);
+ } else {
TRACE_3(TR_FAC_DISP, TR_BACKQ, "setbackdq:pri %d cpu %p tid %p",
tpri, cp, tp);
+ }
#ifndef NPROBE
/* Kernel probe */
if (tnf_tracing_active)
tnf_thread_queue(tp, cp, tpri);
@@ -1325,13 +1354,19 @@
if (!bound)
dp->disp_steal = 0;
membar_enter();
if (dq->dq_sruncnt++ != 0) {
+ if (front) {
+ ASSERT(dq->dq_last != NULL);
+ tp->t_link = dq->dq_first;
+ dq->dq_first = tp;
+ } else {
ASSERT(dq->dq_first != NULL);
dq->dq_last->t_link = tp;
dq->dq_last = tp;
+ }
} else {
ASSERT(dq->dq_first == NULL);
ASSERT(dq->dq_last == NULL);
dq->dq_first = dq->dq_last = tp;
BT_SET(dp->disp_qactmap, tpri);
@@ -1354,171 +1389,40 @@
*/
cp->cpu_disp_flags |= CPU_DISP_DONTSTEAL;
}
dp->disp_max_unbound_pri = tpri;
}
+
(*disp_enq_thread)(cp, bound);
}
/*
- * Put the specified thread on the front of the dispatcher
+ * Put the specified thread on the back of the dispatcher
* queue corresponding to its current priority.
*
* Called with the thread in transition, onproc or stopped state
* and locked (transition implies locked) and at high spl.
* Returns with the thread in TS_RUN state and still locked.
*/
void
-setfrontdq(kthread_t *tp)
+setbackdq(kthread_t *tp)
{
- disp_t *dp;
- dispq_t *dq;
- cpu_t *cp;
- pri_t tpri;
- int bound;
-
- ASSERT(THREAD_LOCK_HELD(tp));
- ASSERT((tp->t_schedflag & TS_ALLSTART) == 0);
- ASSERT(!thread_on_queue(tp)); /* make sure tp isn't on a runq */
-
- /*
- * If thread is "swapped" or on the swap queue don't
- * queue it, but wake sched.
- */
- if ((tp->t_schedflag & (TS_LOAD | TS_ON_SWAPQ)) != TS_LOAD) {
- disp_swapped_setrun(tp);
- return;
- }
-
- if (tp->t_bound_cpu || tp->t_weakbound_cpu)
- bound = 1;
- else
- bound = 0;
-
- tpri = DISP_PRIO(tp);
- if (ncpus == 1)
- cp = tp->t_cpu;
- else if (!bound) {
- if (tpri >= kpqpri) {
- setkpdq(tp, SETKP_FRONT);
- return;
- }
- cp = tp->t_cpu;
- if (tp->t_cpupart == cp->cpu_part) {
- /*
- * We'll generally let this thread continue to run
- * where it last ran, but will consider migration if:
- * - The thread last ran outside it's home lgroup.
- * - The CPU where it last ran is the target of an
- * offline request (a thread_nomigrate() on the in
- * motion CPU relies on this when forcing a preempt).
- * - The thread isn't the highest priority thread where
- * it last ran, and it is considered not likely to
- * have significant cache warmth.
- */
- if ((!LGRP_CONTAINS_CPU(tp->t_lpl->lpl_lgrp, cp)) ||
- (cp == cpu_inmotion)) {
- cp = disp_lowpri_cpu(tp->t_cpu, tp->t_lpl, tpri,
- (tp == curthread) ? cp : NULL);
- } else if ((tpri < cp->cpu_disp->disp_maxrunpri) &&
- (!THREAD_HAS_CACHE_WARMTH(tp))) {
- cp = disp_lowpri_cpu(tp->t_cpu, tp->t_lpl, tpri,
- NULL);
- }
- } else {
- /*
- * Migrate to a cpu in the new partition.
- */
- cp = disp_lowpri_cpu(tp->t_cpupart->cp_cpulist,
- tp->t_lpl, tp->t_pri, NULL);
- }
- ASSERT((cp->cpu_flags & CPU_QUIESCED) == 0);
- } else {
- /*
- * It is possible that t_weakbound_cpu != t_bound_cpu (for
- * a short time until weak binding that existed when the
- * strong binding was established has dropped) so we must
- * favour weak binding over strong.
- */
- cp = tp->t_weakbound_cpu ?
- tp->t_weakbound_cpu : tp->t_bound_cpu;
- }
-
- /*
- * A thread that is ONPROC may be temporarily placed on the run queue
- * but then chosen to run again by disp. If the thread we're placing on
- * the queue is in TS_ONPROC state, don't set its t_waitrq until a
- * replacement process is actually scheduled in swtch(). In this
- * situation, curthread is the only thread that could be in the ONPROC
- * state.
- */
- if ((tp != curthread) && (tp->t_waitrq == 0)) {
- hrtime_t curtime;
-
- curtime = gethrtime_unscaled();
- (void) cpu_update_pct(tp, curtime);
- tp->t_waitrq = curtime;
- } else {
- (void) cpu_update_pct(tp, gethrtime_unscaled());
- }
-
- dp = cp->cpu_disp;
- disp_lock_enter_high(&dp->disp_lock);
-
- TRACE_2(TR_FAC_DISP, TR_FRONTQ, "frontq:pri %d tid %p", tpri, tp);
- DTRACE_SCHED3(enqueue, kthread_t *, tp, disp_t *, dp, int, 1);
-
-#ifndef NPROBE
- /* Kernel probe */
- if (tnf_tracing_active)
- tnf_thread_queue(tp, cp, tpri);
-#endif /* NPROBE */
-
- ASSERT(tpri >= 0 && tpri < dp->disp_npri);
-
- THREAD_RUN(tp, &dp->disp_lock); /* set TS_RUN state and lock */
- tp->t_disp_queue = dp;
-
- dq = &dp->disp_q[tpri];
- dp->disp_nrunnable++;
- if (!bound)
- dp->disp_steal = 0;
- membar_enter();
-
- if (dq->dq_sruncnt++ != 0) {
- ASSERT(dq->dq_last != NULL);
- tp->t_link = dq->dq_first;
- dq->dq_first = tp;
- } else {
- ASSERT(dq->dq_last == NULL);
- ASSERT(dq->dq_first == NULL);
- tp->t_link = NULL;
- dq->dq_first = dq->dq_last = tp;
- BT_SET(dp->disp_qactmap, tpri);
- if (tpri > dp->disp_maxrunpri) {
- dp->disp_maxrunpri = tpri;
- membar_enter();
- cpu_resched(cp, tpri);
- }
- }
+ setfrontbackdq(tp, B_FALSE);
+}
- if (!bound && tpri > dp->disp_max_unbound_pri) {
- if (tp == curthread && dp->disp_max_unbound_pri == -1 &&
- cp == CPU) {
- /*
- * If there are no other unbound threads on the
- * run queue, don't allow other CPUs to steal
- * this thread while we are in the middle of a
- * context switch. We may just switch to it
- * again right away. CPU_DISP_DONTSTEAL is cleared
- * in swtch and swtch_to.
+/*
+ * Put the specified thread on the front of the dispatcher
+ * queue corresponding to its current priority.
+ *
+ * Called with the thread in transition, onproc or stopped state
+ * and locked (transition implies locked) and at high spl.
+ * Returns with the thread in TS_RUN state and still locked.
*/
- cp->cpu_disp_flags |= CPU_DISP_DONTSTEAL;
- }
- dp->disp_max_unbound_pri = tpri;
- }
- (*disp_enq_thread)(cp, bound);
+void
+setfrontdq(kthread_t *tp)
+{
+ setfrontbackdq(tp, B_TRUE);
}
/*
* Put a high-priority unbound thread on the kp queue
*/