FreeBSD kernel kern code
sched_ule.c
Go to the documentation of this file.
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2002-2007, Jeffrey Roberson <jeff@freebsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  * notice unmodified, this list of conditions, and the following
12  * disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in the
15  * documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * This file implements the ULE scheduler. ULE supports independent CPU
31  * run queues and fine grain locking. It has superior interactive
32  * performance under load even on uni-processor systems.
33  *
34  * etymology:
35  * ULE is the last three letters in schedule. It owes its name to a
36  * generic user created for a scheduling system by Paul Mikesell at
37  * Isilon Systems and a general lack of creativity on the part of the author.
38  */
39 
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 333344 2018-05-07 23:36:16Z mjg $");
42 
43 #include "opt_hwpmc_hooks.h"
44 #include "opt_sched.h"
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kdb.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/limits.h>
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/proc.h>
55 #include <sys/resource.h>
56 #include <sys/resourcevar.h>
57 #include <sys/sched.h>
58 #include <sys/sdt.h>
59 #include <sys/smp.h>
60 #include <sys/sx.h>
61 #include <sys/sysctl.h>
62 #include <sys/sysproto.h>
63 #include <sys/turnstile.h>
64 #include <sys/umtx.h>
65 #include <sys/vmmeter.h>
66 #include <sys/cpuset.h>
67 #include <sys/sbuf.h>
68 
69 #ifdef HWPMC_HOOKS
70 #include <sys/pmckern.h>
71 #endif
72 
73 #ifdef KDTRACE_HOOKS
74 #include <sys/dtrace_bsd.h>
75 int dtrace_vtime_active;
76 dtrace_vtime_switch_func_t dtrace_vtime_switch_func;
77 #endif
78 
79 #include <machine/cpu.h>
80 #include <machine/smp.h>
81 
82 #define KTR_ULE 0
83 
84 #define TS_NAME_LEN (MAXCOMLEN + sizeof(" td ") + sizeof(__XSTRING(UINT_MAX)))
85 #define TDQ_NAME_LEN (sizeof("sched lock ") + sizeof(__XSTRING(MAXCPU)))
86 #define TDQ_LOADNAME_LEN (sizeof("CPU ") + sizeof(__XSTRING(MAXCPU)) - 1 + sizeof(" load"))
87 
88 /*
89  * Thread scheduler specific section. All fields are protected
90  * by the thread lock.
91  */
92 struct td_sched {
93  struct runq *ts_runq; /* Run-queue we're queued on. */
94  short ts_flags; /* TSF_* flags. */
95  int ts_cpu; /* CPU that we have affinity for. */
96  int ts_rltick; /* Real last tick, for affinity. */
97  int ts_slice; /* Ticks of slice remaining. */
98  u_int ts_slptime; /* Number of ticks we vol. slept */
99  u_int ts_runtime; /* Number of ticks we were running */
100  int ts_ltick; /* Last tick that we were running on */
101  int ts_ftick; /* First tick that we were running on */
102  int ts_ticks; /* Tick count */
103 #ifdef KTR
104  char ts_name[TS_NAME_LEN];
105 #endif
106 };
107 /* flags kept in ts_flags */
108 #define TSF_BOUND 0x0001 /* Thread can not migrate. */
109 #define TSF_XFERABLE 0x0002 /* Thread was added as transferable. */
110 
111 #define THREAD_CAN_MIGRATE(td) ((td)->td_pinned == 0)
112 #define THREAD_CAN_SCHED(td, cpu) \
113  CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
114 
115 _Static_assert(sizeof(struct thread) + sizeof(struct td_sched) <=
116  sizeof(struct thread0_storage),
117  "increase struct thread0_storage.t0st_sched size");
118 
119 /*
120  * Priority ranges used for interactive and non-interactive timeshare
121  * threads. The timeshare priorities are split up into four ranges.
122  * The first range handles interactive threads. The last three ranges
123  * (NHALF, x, and NHALF) handle non-interactive threads with the outer
124  * ranges supporting nice values.
125  */
126 #define PRI_TIMESHARE_RANGE (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE + 1)
127 #define PRI_INTERACT_RANGE ((PRI_TIMESHARE_RANGE - SCHED_PRI_NRESV) / 2)
128 #define PRI_BATCH_RANGE (PRI_TIMESHARE_RANGE - PRI_INTERACT_RANGE)
129 
130 #define PRI_MIN_INTERACT PRI_MIN_TIMESHARE
131 #define PRI_MAX_INTERACT (PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE - 1)
132 #define PRI_MIN_BATCH (PRI_MIN_TIMESHARE + PRI_INTERACT_RANGE)
133 #define PRI_MAX_BATCH PRI_MAX_TIMESHARE
134 
135 /*
136  * Cpu percentage computation macros and defines.
137  *
138  * SCHED_TICK_SECS: Number of seconds to average the cpu usage across.
139  * SCHED_TICK_TARG: Number of hz ticks to average the cpu usage across.
140  * SCHED_TICK_MAX: Maximum number of ticks before scaling back.
141  * SCHED_TICK_SHIFT: Shift factor to avoid rounding away results.
142  * SCHED_TICK_HZ: Compute the number of hz ticks for a given ticks count.
143  * SCHED_TICK_TOTAL: Gives the amount of time we've been recording ticks.
144  */
145 #define SCHED_TICK_SECS 10
146 #define SCHED_TICK_TARG (hz * SCHED_TICK_SECS)
147 #define SCHED_TICK_MAX (SCHED_TICK_TARG + hz)
148 #define SCHED_TICK_SHIFT 10
149 #define SCHED_TICK_HZ(ts) ((ts)->ts_ticks >> SCHED_TICK_SHIFT)
150 #define SCHED_TICK_TOTAL(ts) (max((ts)->ts_ltick - (ts)->ts_ftick, hz))
151 
152 /*
153  * These macros determine priorities for non-interactive threads. They are
154  * assigned a priority based on their recent cpu utilization as expressed
155  * by the ratio of ticks to the tick total. NHALF priorities at the start
156  * and end of the MIN to MAX timeshare range are only reachable with negative
157  * or positive nice respectively.
158  *
159  * PRI_RANGE: Priority range for utilization dependent priorities.
160  * PRI_NRESV: Number of nice values.
161  * PRI_TICKS: Compute a priority in PRI_RANGE from the ticks count and total.
162  * PRI_NICE: Determines the part of the priority inherited from nice.
163  */
164 #define SCHED_PRI_NRESV (PRIO_MAX - PRIO_MIN)
165 #define SCHED_PRI_NHALF (SCHED_PRI_NRESV / 2)
166 #define SCHED_PRI_MIN (PRI_MIN_BATCH + SCHED_PRI_NHALF)
167 #define SCHED_PRI_MAX (PRI_MAX_BATCH - SCHED_PRI_NHALF)
168 #define SCHED_PRI_RANGE (SCHED_PRI_MAX - SCHED_PRI_MIN + 1)
169 #define SCHED_PRI_TICKS(ts) \
170  (SCHED_TICK_HZ((ts)) / \
171  (roundup(SCHED_TICK_TOTAL((ts)), SCHED_PRI_RANGE) / SCHED_PRI_RANGE))
172 #define SCHED_PRI_NICE(nice) (nice)
173 
174 /*
175  * These determine the interactivity of a process. Interactivity differs from
176  * cpu utilization in that it expresses the voluntary time slept vs time ran
177  * while cpu utilization includes all time not running. This more accurately
178  * models the intent of the thread.
179  *
180  * SLP_RUN_MAX: Maximum amount of sleep time + run time we'll accumulate
181  * before throttling back.
182  * SLP_RUN_FORK: Maximum slp+run time to inherit at fork time.
183  * INTERACT_MAX: Maximum interactivity value. Smaller is better.
184  * INTERACT_THRESH: Threshold for placement on the current runq.
185  */
186 #define SCHED_SLP_RUN_MAX ((hz * 5) << SCHED_TICK_SHIFT)
187 #define SCHED_SLP_RUN_FORK ((hz / 2) << SCHED_TICK_SHIFT)
188 #define SCHED_INTERACT_MAX (100)
189 #define SCHED_INTERACT_HALF (SCHED_INTERACT_MAX / 2)
190 #define SCHED_INTERACT_THRESH (30)
191 
192 /*
193  * These parameters determine the slice behavior for batch work.
194  */
195 #define SCHED_SLICE_DEFAULT_DIVISOR 10 /* ~94 ms, 12 stathz ticks. */
196 #define SCHED_SLICE_MIN_DIVISOR 6 /* DEFAULT/MIN = ~16 ms. */
197 
198 /* Flags kept in td_flags. */
199 #define TDF_SLICEEND TDF_SCHED2 /* Thread time slice is over. */
200 
201 /*
202  * tickincr: Converts a stathz tick into a hz domain scaled by
203  * the shift factor. Without the shift the error rate
204  * due to rounding would be unacceptably high.
205  * realstathz: stathz is sometimes 0 and run off of hz.
206  * sched_slice: Runtime of each thread before rescheduling.
207  * preempt_thresh: Priority threshold for preemption and remote IPIs.
208  */
210 static int tickincr = 8 << SCHED_TICK_SHIFT;
211 static int realstathz = 127; /* reset during boot. */
212 static int sched_slice = 10; /* reset during boot. */
213 static int sched_slice_min = 1; /* reset during boot. */
214 #ifdef PREEMPTION
215 #ifdef FULL_PREEMPTION
216 static int preempt_thresh = PRI_MAX_IDLE;
217 #else
218 static int preempt_thresh = PRI_MIN_KERN;
219 #endif
220 #else
221 static int preempt_thresh = 0;
222 #endif
224 static int sched_idlespins = 10000;
225 static int sched_idlespinthresh = -1;
226 
227 /*
228  * tdq - per processor runqs and statistics. All fields are protected by the
229  * tdq_lock. The load and lowpri may be accessed without to avoid excess
230  * locking in sched_pickcpu();
231  */
232 struct tdq {
233  /*
234  * Ordered to improve efficiency of cpu_search() and switch().
235  * tdq_lock is padded to avoid false sharing with tdq_load and
236  * tdq_cpu_idle.
237  */
238  struct mtx_padalign tdq_lock; /* run queue lock. */
239  struct cpu_group *tdq_cg; /* Pointer to cpu topology. */
240  volatile int tdq_load; /* Aggregate load. */
241  volatile int tdq_cpu_idle; /* cpu_idle() is active. */
242  int tdq_sysload; /* For loadavg, !ITHD load. */
243  volatile int tdq_transferable; /* Transferable thread count. */
244  volatile short tdq_switchcnt; /* Switches this tick. */
245  volatile short tdq_oldswitchcnt; /* Switches last tick. */
246  u_char tdq_lowpri; /* Lowest priority thread. */
247  u_char tdq_ipipending; /* IPI pending. */
248  u_char tdq_idx; /* Current insert index. */
249  u_char tdq_ridx; /* Current removal index. */
250  struct runq tdq_realtime; /* real-time run queue. */
251  struct runq tdq_timeshare; /* timeshare run queue. */
252  struct runq tdq_idle; /* Queue of IDLE threads. */
254 #ifdef KTR
255  char tdq_loadname[TDQ_LOADNAME_LEN];
256 #endif
257 } __aligned(64);
258 
259 /* Idle thread states and config. */
260 #define TDQ_RUNNING 1
261 #define TDQ_IDLE 2
262 
263 #ifdef SMP
264 struct cpu_group *cpu_top; /* CPU topology */
265 
266 #define SCHED_AFFINITY_DEFAULT (max(1, hz / 1000))
267 #define SCHED_AFFINITY(ts, t) ((ts)->ts_rltick > ticks - ((t) * affinity))
268 
269 /*
270  * Run-time tunables.
271  */
272 static int rebalance = 1;
273 static int balance_interval = 128; /* Default set in sched_initticks(). */
274 static int affinity;
275 static int steal_idle = 1;
276 static int steal_thresh = 2;
277 static int always_steal = 0;
278 static int trysteal_limit = 2;
279 
280 /*
281  * One thread queue per processor.
282  */
283 static struct tdq tdq_cpu[MAXCPU];
284 static struct tdq *balance_tdq;
285 static int balance_ticks;
286 static DPCPU_DEFINE(uint32_t, randomval);
287 
288 #define TDQ_SELF() (&tdq_cpu[PCPU_GET(cpuid)])
289 #define TDQ_CPU(x) (&tdq_cpu[(x)])
290 #define TDQ_ID(x) ((int)((x) - tdq_cpu))
291 #else /* !SMP */
292 static struct tdq tdq_cpu;
293 
294 #define TDQ_ID(x) (0)
295 #define TDQ_SELF() (&tdq_cpu)
296 #define TDQ_CPU(x) (&tdq_cpu)
297 #endif
298 
299 #define TDQ_LOCK_ASSERT(t, type) mtx_assert(TDQ_LOCKPTR((t)), (type))
300 #define TDQ_LOCK(t) mtx_lock_spin(TDQ_LOCKPTR((t)))
301 #define TDQ_LOCK_FLAGS(t, f) mtx_lock_spin_flags(TDQ_LOCKPTR((t)), (f))
302 #define TDQ_UNLOCK(t) mtx_unlock_spin(TDQ_LOCKPTR((t)))
303 #define TDQ_LOCKPTR(t) ((struct mtx *)(&(t)->tdq_lock))
304 
305 static void sched_priority(struct thread *);
306 static void sched_thread_priority(struct thread *, u_char);
307 static int sched_interact_score(struct thread *);
308 static void sched_interact_update(struct thread *);
309 static void sched_interact_fork(struct thread *);
310 static void sched_pctcpu_update(struct td_sched *, int);
311 
312 /* Operations on per processor queues */
313 static struct thread *tdq_choose(struct tdq *);
314 static void tdq_setup(struct tdq *);
315 static void tdq_load_add(struct tdq *, struct thread *);
316 static void tdq_load_rem(struct tdq *, struct thread *);
317 static __inline void tdq_runq_add(struct tdq *, struct thread *, int);
318 static __inline void tdq_runq_rem(struct tdq *, struct thread *);
319 static inline int sched_shouldpreempt(int, int, int);
320 void tdq_print(int cpu);
321 static void runq_print(struct runq *rq);
322 static void tdq_add(struct tdq *, struct thread *, int);
323 #ifdef SMP
324 static struct thread *tdq_move(struct tdq *, struct tdq *);
325 static int tdq_idled(struct tdq *);
326 static void tdq_notify(struct tdq *, struct thread *);
327 static struct thread *tdq_steal(struct tdq *, int);
328 static struct thread *runq_steal(struct runq *, int);
329 static int sched_pickcpu(struct thread *, int);
330 static void sched_balance(void);
331 static int sched_balance_pair(struct tdq *, struct tdq *);
332 static inline struct tdq *sched_setcpu(struct thread *, int, int);
333 static inline void thread_unblock_switch(struct thread *, struct mtx *);
334 static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
335 static int sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS);
336 static int sysctl_kern_sched_topology_spec_internal(struct sbuf *sb,
337  struct cpu_group *cg, int indent);
338 #endif
339 
340 static void sched_setup(void *dummy);
341 SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL);
342 
343 static void sched_initticks(void *dummy);
344 SYSINIT(sched_initticks, SI_SUB_CLOCKS, SI_ORDER_THIRD, sched_initticks,
345  NULL);
346 
347 SDT_PROVIDER_DEFINE(sched);
348 
349 SDT_PROBE_DEFINE3(sched, , , change__pri, "struct thread *",
350  "struct proc *", "uint8_t");
351 SDT_PROBE_DEFINE3(sched, , , dequeue, "struct thread *",
352  "struct proc *", "void *");
353 SDT_PROBE_DEFINE4(sched, , , enqueue, "struct thread *",
354  "struct proc *", "void *", "int");
355 SDT_PROBE_DEFINE4(sched, , , lend__pri, "struct thread *",
356  "struct proc *", "uint8_t", "struct thread *");
357 SDT_PROBE_DEFINE2(sched, , , load__change, "int", "int");
358 SDT_PROBE_DEFINE2(sched, , , off__cpu, "struct thread *",
359  "struct proc *");
360 SDT_PROBE_DEFINE(sched, , , on__cpu);
361 SDT_PROBE_DEFINE(sched, , , remain__cpu);
362 SDT_PROBE_DEFINE2(sched, , , surrender, "struct thread *",
363  "struct proc *");
364 
365 /*
366  * Print the threads waiting on a run-queue.
367  */
368 static void
369 runq_print(struct runq *rq)
370 {
371  struct rqhead *rqh;
372  struct thread *td;
373  int pri;
374  int j;
375  int i;
376 
377  for (i = 0; i < RQB_LEN; i++) {
378  printf("\t\trunq bits %d 0x%zx\n",
379  i, rq->rq_status.rqb_bits[i]);
380  for (j = 0; j < RQB_BPW; j++)
381  if (rq->rq_status.rqb_bits[i] & (1ul << j)) {
382  pri = j + (i << RQB_L2BPW);
383  rqh = &rq->rq_queues[pri];
384  TAILQ_FOREACH(td, rqh, td_runq) {
385  printf("\t\t\ttd %p(%s) priority %d rqindex %d pri %d\n",
386  td, td->td_name, td->td_priority,
387  td->td_rqindex, pri);
388  }
389  }
390  }
391 }
392 
393 /*
394  * Print the status of a per-cpu thread queue. Should be a ddb show cmd.
395  */
396 void
397 tdq_print(int cpu)
398 {
399  struct tdq *tdq;
400 
401  tdq = TDQ_CPU(cpu);
402 
403  printf("tdq %d:\n", TDQ_ID(tdq));
404  printf("\tlock %p\n", TDQ_LOCKPTR(tdq));
405  printf("\tLock name: %s\n", tdq->tdq_name);
406  printf("\tload: %d\n", tdq->tdq_load);
407  printf("\tswitch cnt: %d\n", tdq->tdq_switchcnt);
408  printf("\told switch cnt: %d\n", tdq->tdq_oldswitchcnt);
409  printf("\ttimeshare idx: %d\n", tdq->tdq_idx);
410  printf("\ttimeshare ridx: %d\n", tdq->tdq_ridx);
411  printf("\tload transferable: %d\n", tdq->tdq_transferable);
412  printf("\tlowest priority: %d\n", tdq->tdq_lowpri);
413  printf("\trealtime runq:\n");
414  runq_print(&tdq->tdq_realtime);
415  printf("\ttimeshare runq:\n");
416  runq_print(&tdq->tdq_timeshare);
417  printf("\tidle runq:\n");
418  runq_print(&tdq->tdq_idle);
419 }
420 
421 static inline int
422 sched_shouldpreempt(int pri, int cpri, int remote)
423 {
424  /*
425  * If the new priority is not better than the current priority there is
426  * nothing to do.
427  */
428  if (pri >= cpri)
429  return (0);
430  /*
431  * Always preempt idle.
432  */
433  if (cpri >= PRI_MIN_IDLE)
434  return (1);
435  /*
436  * If preemption is disabled don't preempt others.
437  */
438  if (preempt_thresh == 0)
439  return (0);
440  /*
441  * Preempt if we exceed the threshold.
442  */
443  if (pri <= preempt_thresh)
444  return (1);
445  /*
446  * If we're interactive or better and there is non-interactive
447  * or worse running preempt only remote processors.
448  */
449  if (remote && pri <= PRI_MAX_INTERACT && cpri > PRI_MAX_INTERACT)
450  return (1);
451  return (0);
452 }
453 
454 /*
455  * Add a thread to the actual run-queue. Keeps transferable counts up to
456  * date with what is actually on the run-queue. Selects the correct
457  * queue position for timeshare threads.
458  */
459 static __inline void
460 tdq_runq_add(struct tdq *tdq, struct thread *td, int flags)
461 {
462  struct td_sched *ts;
463  u_char pri;
464 
465  TDQ_LOCK_ASSERT(tdq, MA_OWNED);
466  THREAD_LOCK_ASSERT(td, MA_OWNED);
467 
468  pri = td->td_priority;
469  ts = td_get_sched(td);
470  TD_SET_RUNQ(td);
471  if (THREAD_CAN_MIGRATE(td)) {
472  tdq->tdq_transferable++;
473  ts->ts_flags |= TSF_XFERABLE;
474  }
475  if (pri < PRI_MIN_BATCH) {
476  ts->ts_runq = &tdq->tdq_realtime;
477  } else if (pri <= PRI_MAX_BATCH) {
478  ts->ts_runq = &tdq->tdq_timeshare;
479  KASSERT(pri <= PRI_MAX_BATCH && pri >= PRI_MIN_BATCH,
480  ("Invalid priority %d on timeshare runq", pri));
481  /*
482  * This queue contains only priorities between MIN and MAX
483  * realtime. Use the whole queue to represent these values.
484  */
485  if ((flags & (SRQ_BORROWING|SRQ_PREEMPTED)) == 0) {
486  pri = RQ_NQS * (pri - PRI_MIN_BATCH) / PRI_BATCH_RANGE;
487  pri = (pri + tdq->tdq_idx) % RQ_NQS;
488  /*
489  * This effectively shortens the queue by one so we
490  * can have a one slot difference between idx and
491  * ridx while we wait for threads to drain.
492  */
493  if (tdq->tdq_ridx != tdq->tdq_idx &&
494  pri == tdq->tdq_ridx)
495  pri = (unsigned char)(pri - 1) % RQ_NQS;
496  } else
497  pri = tdq->tdq_ridx;
498  runq_add_pri(ts->ts_runq, td, pri, flags);
499  return;
500  } else
501  ts->ts_runq = &tdq->tdq_idle;
502  runq_add(ts->ts_runq, td, flags);
503 }
504 
505 /*
506  * Remove a thread from a run-queue. This typically happens when a thread
507  * is selected to run. Running threads are not on the queue and the
508  * transferable count does not reflect them.
509  */
510 static __inline void
511 tdq_runq_rem(struct tdq *tdq, struct thread *td)
512 {
513  struct td_sched *ts;
514 
515  ts = td_get_sched(td);
516  TDQ_LOCK_ASSERT(tdq, MA_OWNED);
517  KASSERT(ts->ts_runq != NULL,
518  ("tdq_runq_remove: thread %p null ts_runq", td));
519  if (ts->ts_flags & TSF_XFERABLE) {
520  tdq->tdq_transferable--;
521  ts->ts_flags &= ~TSF_XFERABLE;
522  }
523  if (ts->ts_runq == &tdq->tdq_timeshare) {
524  if (tdq->tdq_idx != tdq->tdq_ridx)
525  runq_remove_idx(ts->ts_runq, td, &tdq->tdq_ridx);
526  else
527  runq_remove_idx(ts->ts_runq, td, NULL);
528  } else
529  runq_remove(ts->ts_runq, td);
530 }
531 
532 /*
533  * Load is maintained for all threads RUNNING and ON_RUNQ. Add the load
534  * for this thread to the referenced thread queue.
535  */
536 static void
537 tdq_load_add(struct tdq *tdq, struct thread *td)
538 {
539 
540  TDQ_LOCK_ASSERT(tdq, MA_OWNED);
541  THREAD_LOCK_ASSERT(td, MA_OWNED);
542 
543  tdq->tdq_load++;
544  if ((td->td_flags & TDF_NOLOAD) == 0)
545  tdq->tdq_sysload++;
546  KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
547  SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load);
548 }
549 
550 /*
551  * Remove the load from a thread that is transitioning to a sleep state or
552  * exiting.
553  */
554 static void
555 tdq_load_rem(struct tdq *tdq, struct thread *td)
556 {
557 
558  THREAD_LOCK_ASSERT(td, MA_OWNED);
559  TDQ_LOCK_ASSERT(tdq, MA_OWNED);
560  KASSERT(tdq->tdq_load != 0,
561  ("tdq_load_rem: Removing with 0 load on queue %d", TDQ_ID(tdq)));
562 
563  tdq->tdq_load--;
564  if ((td->td_flags & TDF_NOLOAD) == 0)
565  tdq->tdq_sysload--;
566  KTR_COUNTER0(KTR_SCHED, "load", tdq->tdq_loadname, tdq->tdq_load);
567  SDT_PROBE2(sched, , , load__change, (int)TDQ_ID(tdq), tdq->tdq_load);
568 }
569 
570 /*
571  * Bound timeshare latency by decreasing slice size as load increases. We
572  * consider the maximum latency as the sum of the threads waiting to run
573  * aside from curthread and target no more than sched_slice latency but
574  * no less than sched_slice_min runtime.
575  */
576 static inline int
577 tdq_slice(struct tdq *tdq)
578 {
579  int load;
580 
581  /*
582  * It is safe to use sys_load here because this is called from
583  * contexts where timeshare threads are running and so there
584  * cannot be higher priority load in the system.
585  */
586  load = tdq->tdq_sysload - 1;
587  if (load >= SCHED_SLICE_MIN_DIVISOR)
588  return (sched_slice_min);
589  if (load <= 1)
590  return (sched_slice);
591  return (sched_slice / load);
592 }
593 
594 /*
595  * Set lowpri to its exact value by searching the run-queue and
596  * evaluating curthread. curthread may be passed as an optimization.
597  */
598 static void
599 tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
600 {
601  struct thread *td;
602 
603  TDQ_LOCK_ASSERT(tdq, MA_OWNED);
604  if (ctd == NULL)
605  ctd = pcpu_find(TDQ_ID(tdq))->pc_curthread;
606  td = tdq_choose(tdq);
607  if (td == NULL || td->td_priority > ctd->td_priority)
608  tdq->tdq_lowpri = ctd->td_priority;
609  else
610  tdq->tdq_lowpri = td->td_priority;
611 }
612 
613 #ifdef SMP
614 /*
615  * We need some randomness. Implement a classic Linear Congruential
616  * Generator X_{n+1}=(aX_n+c) mod m. These values are optimized for
617  * m = 2^32, a = 69069 and c = 5. We only return the upper 16 bits
618  * of the random state (in the low bits of our answer) to keep
619  * the maximum randomness.
620  */
621 static uint32_t
622 sched_random(void)
623 {
624  uint32_t *rndptr;
625 
626  rndptr = DPCPU_PTR(randomval);
627  *rndptr = *rndptr * 69069 + 5;
628 
629  return (*rndptr >> 16);
630 }
631 
632 struct cpu_search {
633  cpuset_t cs_mask;
634  u_int cs_prefer;
635  int cs_pri; /* Min priority for low. */
636  int cs_limit; /* Max load for low, min load for high. */
637  int cs_cpu;
638  int cs_load;
639 };
640 
641 #define CPU_SEARCH_LOWEST 0x1
642 #define CPU_SEARCH_HIGHEST 0x2
643 #define CPU_SEARCH_BOTH (CPU_SEARCH_LOWEST|CPU_SEARCH_HIGHEST)
644 
645 #define CPUSET_FOREACH(cpu, mask) \
646  for ((cpu) = 0; (cpu) <= mp_maxid; (cpu)++) \
647  if (CPU_ISSET(cpu, &mask))
648 
649 static __always_inline int cpu_search(const struct cpu_group *cg,
650  struct cpu_search *low, struct cpu_search *high, const int match);
651 int __noinline cpu_search_lowest(const struct cpu_group *cg,
652  struct cpu_search *low);
653 int __noinline cpu_search_highest(const struct cpu_group *cg,
654  struct cpu_search *high);
655 int __noinline cpu_search_both(const struct cpu_group *cg,
656  struct cpu_search *low, struct cpu_search *high);
657 
658 /*
659  * Search the tree of cpu_groups for the lowest or highest loaded cpu
660  * according to the match argument. This routine actually compares the
661  * load on all paths through the tree and finds the least loaded cpu on
662  * the least loaded path, which may differ from the least loaded cpu in
663  * the system. This balances work among caches and buses.
664  *
665  * This inline is instantiated in three forms below using constants for the
666  * match argument. It is reduced to the minimum set for each case. It is
667  * also recursive to the depth of the tree.
668  */
669 static __always_inline int
670 cpu_search(const struct cpu_group *cg, struct cpu_search *low,
671  struct cpu_search *high, const int match)
672 {
673  struct cpu_search lgroup;
674  struct cpu_search hgroup;
675  cpuset_t cpumask;
676  struct cpu_group *child;
677  struct tdq *tdq;
678  int cpu, i, hload, lload, load, total, rnd;
679 
680  total = 0;
681  cpumask = cg->cg_mask;
682  if (match & CPU_SEARCH_LOWEST) {
683  lload = INT_MAX;
684  lgroup = *low;
685  }
686  if (match & CPU_SEARCH_HIGHEST) {
687  hload = INT_MIN;
688  hgroup = *high;
689  }
690 
691  /* Iterate through the child CPU groups and then remaining CPUs. */
692  for (i = cg->cg_children, cpu = mp_maxid; ; ) {
693  if (i == 0) {
694 #ifdef HAVE_INLINE_FFSL
695  cpu = CPU_FFS(&cpumask) - 1;
696 #else
697  while (cpu >= 0 && !CPU_ISSET(cpu, &cpumask))
698  cpu--;
699 #endif
700  if (cpu < 0)
701  break;
702  child = NULL;
703  } else
704  child = &cg->cg_child[i - 1];
705 
706  if (match & CPU_SEARCH_LOWEST)
707  lgroup.cs_cpu = -1;
708  if (match & CPU_SEARCH_HIGHEST)
709  hgroup.cs_cpu = -1;
710  if (child) { /* Handle child CPU group. */
711  CPU_NAND(&cpumask, &child->cg_mask);
712  switch (match) {
713  case CPU_SEARCH_LOWEST:
714  load = cpu_search_lowest(child, &lgroup);
715  break;
716  case CPU_SEARCH_HIGHEST:
717  load = cpu_search_highest(child, &hgroup);
718  break;
719  case CPU_SEARCH_BOTH:
720  load = cpu_search_both(child, &lgroup, &hgroup);
721  break;
722  }
723  } else { /* Handle child CPU. */
724  CPU_CLR(cpu, &cpumask);
725  tdq = TDQ_CPU(cpu);
726  load = tdq->tdq_load * 256;
727  rnd = sched_random() % 32;
728  if (match & CPU_SEARCH_LOWEST) {
729  if (cpu == low->cs_prefer)
730  load -= 64;
731  /* If that CPU is allowed and get data. */
732  if (tdq->tdq_lowpri > lgroup.cs_pri &&
733  tdq->tdq_load <= lgroup.cs_limit &&
734  CPU_ISSET(cpu, &lgroup.cs_mask)) {
735  lgroup.cs_cpu = cpu;
736  lgroup.cs_load = load - rnd;
737  }
738  }
739  if (match & CPU_SEARCH_HIGHEST)
740  if (tdq->tdq_load >= hgroup.cs_limit &&
741  tdq->tdq_transferable &&
742  CPU_ISSET(cpu, &hgroup.cs_mask)) {
743  hgroup.cs_cpu = cpu;
744  hgroup.cs_load = load - rnd;
745  }
746  }
747  total += load;
748 
749  /* We have info about child item. Compare it. */
750  if (match & CPU_SEARCH_LOWEST) {
751  if (lgroup.cs_cpu >= 0 &&
752  (load < lload ||
753  (load == lload && lgroup.cs_load < low->cs_load))) {
754  lload = load;
755  low->cs_cpu = lgroup.cs_cpu;
756  low->cs_load = lgroup.cs_load;
757  }
758  }
759  if (match & CPU_SEARCH_HIGHEST)
760  if (hgroup.cs_cpu >= 0 &&
761  (load > hload ||
762  (load == hload && hgroup.cs_load > high->cs_load))) {
763  hload = load;
764  high->cs_cpu = hgroup.cs_cpu;
765  high->cs_load = hgroup.cs_load;
766  }
767  if (child) {
768  i--;
769  if (i == 0 && CPU_EMPTY(&cpumask))
770  break;
771  }
772 #ifndef HAVE_INLINE_FFSL
773  else
774  cpu--;
775 #endif
776  }
777  return (total);
778 }
779 
780 /*
781  * cpu_search instantiations must pass constants to maintain the inline
782  * optimization.
783  */
784 int
785 cpu_search_lowest(const struct cpu_group *cg, struct cpu_search *low)
786 {
787  return cpu_search(cg, low, NULL, CPU_SEARCH_LOWEST);
788 }
789 
790 int
791 cpu_search_highest(const struct cpu_group *cg, struct cpu_search *high)
792 {
793  return cpu_search(cg, NULL, high, CPU_SEARCH_HIGHEST);
794 }
795 
796 int
797 cpu_search_both(const struct cpu_group *cg, struct cpu_search *low,
798  struct cpu_search *high)
799 {
800  return cpu_search(cg, low, high, CPU_SEARCH_BOTH);
801 }
802 
803 /*
804  * Find the cpu with the least load via the least loaded path that has a
805  * lowpri greater than pri pri. A pri of -1 indicates any priority is
806  * acceptable.
807  */
808 static inline int
809 sched_lowest(const struct cpu_group *cg, cpuset_t mask, int pri, int maxload,
810  int prefer)
811 {
812  struct cpu_search low;
813 
814  low.cs_cpu = -1;
815  low.cs_prefer = prefer;
816  low.cs_mask = mask;
817  low.cs_pri = pri;
818  low.cs_limit = maxload;
819  cpu_search_lowest(cg, &low);
820  return low.cs_cpu;
821 }
822 
823 /*
824  * Find the cpu with the highest load via the highest loaded path.
825  */
826 static inline int
827 sched_highest(const struct cpu_group *cg, cpuset_t mask, int minload)
828 {
829  struct cpu_search high;
830 
831  high.cs_cpu = -1;
832  high.cs_mask = mask;
833  high.cs_limit = minload;
834  cpu_search_highest(cg, &high);
835  return high.cs_cpu;
836 }
837 
838 static void
839 sched_balance_group(struct cpu_group *cg)
840 {
841  cpuset_t hmask, lmask;
842  int high, low, anylow;
843 
844  CPU_FILL(&hmask);
845  for (;;) {
846  high = sched_highest(cg, hmask, 2);
847  /* Stop if there is no more CPU with transferrable threads. */
848  if (high == -1)
849  break;
850  CPU_CLR(high, &hmask);
851  CPU_COPY(&hmask, &lmask);
852  /* Stop if there is no more CPU left for low. */
853  if (CPU_EMPTY(&lmask))
854  break;
855  anylow = 1;
856 nextlow:
857  low = sched_lowest(cg, lmask, -1,
858  TDQ_CPU(high)->tdq_load - 1, high);
859  /* Stop if we looked well and found no less loaded CPU. */
860  if (anylow && low == -1)
861  break;
862  /* Go to next high if we found no less loaded CPU. */
863  if (low == -1)
864  continue;
865  /* Transfer thread from high to low. */
866  if (sched_balance_pair(TDQ_CPU(high), TDQ_CPU(low))) {
867  /* CPU that got thread can no longer be a donor. */
868  CPU_CLR(low, &hmask);
869  } else {
870  /*
871  * If failed, then there is no threads on high
872  * that can run on this low. Drop low from low
873  * mask and look for different one.
874  */
875  CPU_CLR(low, &lmask);
876  anylow = 0;
877  goto nextlow;
878  }
879  }
880 }
881 
882 static void
883 sched_balance(void)
884 {
885  struct tdq *tdq;
886 
887  if (smp_started == 0 || rebalance == 0)
888  return;
889 
890  balance_ticks = max(balance_interval / 2, 1) +
891  (sched_random() % balance_interval);
892  tdq = TDQ_SELF();
893  TDQ_UNLOCK(tdq);
894  sched_balance_group(cpu_top);
895  TDQ_LOCK(tdq);
896 }
897 
898 /*
899  * Lock two thread queues using their address to maintain lock order.
900  */
901 static void
902 tdq_lock_pair(struct tdq *one, struct tdq *two)
903 {
904  if (one < two) {
905  TDQ_LOCK(one);
906  TDQ_LOCK_FLAGS(two, MTX_DUPOK);
907  } else {
908  TDQ_LOCK(two);
909  TDQ_LOCK_FLAGS(one, MTX_DUPOK);
910  }
911 }
912 
913 /*
914  * Unlock two thread queues. Order is not important here.
915  */
916 static void
917 tdq_unlock_pair(struct tdq *one, struct tdq *two)
918 {
919  TDQ_UNLOCK(one);
920  TDQ_UNLOCK(two);
921 }
922 
923 /*
924  * Transfer load between two imbalanced thread queues.
925  */
926 static int
927 sched_balance_pair(struct tdq *high, struct tdq *low)
928 {
929  struct thread *td;
930  int cpu;
931 
932  tdq_lock_pair(high, low);
933  td = NULL;
934  /*
935  * Transfer a thread from high to low.
936  */
937  if (high->tdq_transferable != 0 && high->tdq_load > low->tdq_load &&
938  (td = tdq_move(high, low)) != NULL) {
939  /*
940  * In case the target isn't the current cpu notify it of the
941  * new load, possibly sending an IPI to force it to reschedule.
942  */
943  cpu = TDQ_ID(low);
944  if (cpu != PCPU_GET(cpuid))
945  tdq_notify(low, td);
946  }
947  tdq_unlock_pair(high, low);
948  return (td != NULL);
949 }
950 
951 /*
952  * Move a thread from one thread queue to another.
953  */
954 static struct thread *
955 tdq_move(struct tdq *from, struct tdq *to)
956 {
957  struct td_sched *ts;
958  struct thread *td;
959  struct tdq *tdq;
960  int cpu;
961 
962  TDQ_LOCK_ASSERT(from, MA_OWNED);
963  TDQ_LOCK_ASSERT(to, MA_OWNED);
964 
965  tdq = from;
966  cpu = TDQ_ID(to);
967  td = tdq_steal(tdq, cpu);
968  if (td == NULL)
969  return (NULL);
970  ts = td_get_sched(td);
971  /*
972  * Although the run queue is locked the thread may be blocked. Lock
973  * it to clear this and acquire the run-queue lock.
974  */
975  thread_lock(td);
976  /* Drop recursive lock on from acquired via thread_lock(). */
977  TDQ_UNLOCK(from);
978  sched_rem(td);
979  ts->ts_cpu = cpu;
980  td->td_lock = TDQ_LOCKPTR(to);
981  tdq_add(to, td, SRQ_YIELDING);
982  return (td);
983 }
984 
985 /*
986  * This tdq has idled. Try to steal a thread from another cpu and switch
987  * to it.
988  */
989 static int
990 tdq_idled(struct tdq *tdq)
991 {
992  struct cpu_group *cg;
993  struct tdq *steal;
994  cpuset_t mask;
995  int cpu, switchcnt;
996 
997  if (smp_started == 0 || steal_idle == 0 || tdq->tdq_cg == NULL)
998  return (1);
999  CPU_FILL(&mask);
1000  CPU_CLR(PCPU_GET(cpuid), &mask);
1001  restart:
1002  switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
1003  for (cg = tdq->tdq_cg; ; ) {
1004  cpu = sched_highest(cg, mask, steal_thresh);
1005  /*
1006  * We were assigned a thread but not preempted. Returning
1007  * 0 here will cause our caller to switch to it.
1008  */
1009  if (tdq->tdq_load)
1010  return (0);
1011  if (cpu == -1) {
1012  cg = cg->cg_parent;
1013  if (cg == NULL)
1014  return (1);
1015  continue;
1016  }
1017  steal = TDQ_CPU(cpu);
1018  /*
1019  * The data returned by sched_highest() is stale and
1020  * the chosen CPU no longer has an eligible thread.
1021  *
1022  * Testing this ahead of tdq_lock_pair() only catches
1023  * this situation about 20% of the time on an 8 core
1024  * 16 thread Ryzen 7, but it still helps performance.
1025  */
1026  if (steal->tdq_load < steal_thresh ||
1027  steal->tdq_transferable == 0)
1028  goto restart;
1029  tdq_lock_pair(tdq, steal);
1030  /*
1031  * We were assigned a thread while waiting for the locks.
1032  * Switch to it now instead of stealing a thread.
1033  */
1034  if (tdq->tdq_load)
1035  break;
1036  /*
1037  * The data returned by sched_highest() is stale and
1038  * the chosen CPU no longer has an eligible thread, or
1039  * we were preempted and the CPU loading info may be out
1040  * of date. The latter is rare. In either case restart
1041  * the search.
1042  */
1043  if (steal->tdq_load < steal_thresh ||
1044  steal->tdq_transferable == 0 ||
1045  switchcnt != tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt) {
1046  tdq_unlock_pair(tdq, steal);
1047  goto restart;
1048  }
1049  /*
1050  * Steal the thread and switch to it.
1051  */
1052  if (tdq_move(steal, tdq) != NULL)
1053  break;
1054  /*
1055  * We failed to acquire a thread even though it looked
1056  * like one was available. This could be due to affinity
1057  * restrictions or for other reasons. Loop again after
1058  * removing this CPU from the set. The restart logic
1059  * above does not restore this CPU to the set due to the
1060  * likelyhood of failing here again.
1061  */
1062  CPU_CLR(cpu, &mask);
1063  tdq_unlock_pair(tdq, steal);
1064  }
1065  TDQ_UNLOCK(steal);
1066  mi_switch(SW_VOL | SWT_IDLE, NULL);
1067  thread_unlock(curthread);
1068  return (0);
1069 }
1070 
1071 /*
1072  * Notify a remote cpu of new work. Sends an IPI if criteria are met.
1073  */
1074 static void
1075 tdq_notify(struct tdq *tdq, struct thread *td)
1076 {
1077  struct thread *ctd;
1078  int pri;
1079  int cpu;
1080 
1081  if (tdq->tdq_ipipending)
1082  return;
1083  cpu = td_get_sched(td)->ts_cpu;
1084  pri = td->td_priority;
1085  ctd = pcpu_find(cpu)->pc_curthread;
1086  if (!sched_shouldpreempt(pri, ctd->td_priority, 1))
1087  return;
1088 
1089  /*
1090  * Make sure that our caller's earlier update to tdq_load is
1091  * globally visible before we read tdq_cpu_idle. Idle thread
1092  * accesses both of them without locks, and the order is important.
1093  */
1094  atomic_thread_fence_seq_cst();
1095 
1096  if (TD_IS_IDLETHREAD(ctd)) {
1097  /*
1098  * If the MD code has an idle wakeup routine try that before
1099  * falling back to IPI.
1100  */
1101  if (!tdq->tdq_cpu_idle || cpu_idle_wakeup(cpu))
1102  return;
1103  }
1104  tdq->tdq_ipipending = 1;
1105  ipi_cpu(cpu, IPI_PREEMPT);
1106 }
1107 
1108 /*
1109  * Steals load from a timeshare queue. Honors the rotating queue head
1110  * index.
1111  */
1112 static struct thread *
1113 runq_steal_from(struct runq *rq, int cpu, u_char start)
1114 {
1115  struct rqbits *rqb;
1116  struct rqhead *rqh;
1117  struct thread *td, *first;
1118  int bit;
1119  int i;
1120 
1121  rqb = &rq->rq_status;
1122  bit = start & (RQB_BPW -1);
1123  first = NULL;
1124 again:
1125  for (i = RQB_WORD(start); i < RQB_LEN; bit = 0, i++) {
1126  if (rqb->rqb_bits[i] == 0)
1127  continue;
1128  if (bit == 0)
1129  bit = RQB_FFS(rqb->rqb_bits[i]);
1130  for (; bit < RQB_BPW; bit++) {
1131  if ((rqb->rqb_bits[i] & (1ul << bit)) == 0)
1132  continue;
1133  rqh = &rq->rq_queues[bit + (i << RQB_L2BPW)];
1134  TAILQ_FOREACH(td, rqh, td_runq) {
1135  if (first && THREAD_CAN_MIGRATE(td) &&
1136  THREAD_CAN_SCHED(td, cpu))
1137  return (td);
1138  first = td;
1139  }
1140  }
1141  }
1142  if (start != 0) {
1143  start = 0;
1144  goto again;
1145  }
1146 
1147  if (first && THREAD_CAN_MIGRATE(first) &&
1148  THREAD_CAN_SCHED(first, cpu))
1149  return (first);
1150  return (NULL);
1151 }
1152 
1153 /*
1154  * Steals load from a standard linear queue.
1155  */
1156 static struct thread *
1157 runq_steal(struct runq *rq, int cpu)
1158 {
1159  struct rqhead *rqh;
1160  struct rqbits *rqb;
1161  struct thread *td;
1162  int word;
1163  int bit;
1164 
1165  rqb = &rq->rq_status;
1166  for (word = 0; word < RQB_LEN; word++) {
1167  if (rqb->rqb_bits[word] == 0)
1168  continue;
1169  for (bit = 0; bit < RQB_BPW; bit++) {
1170  if ((rqb->rqb_bits[word] & (1ul << bit)) == 0)
1171  continue;
1172  rqh = &rq->rq_queues[bit + (word << RQB_L2BPW)];
1173  TAILQ_FOREACH(td, rqh, td_runq)
1174  if (THREAD_CAN_MIGRATE(td) &&
1175  THREAD_CAN_SCHED(td, cpu))
1176  return (td);
1177  }
1178  }
1179  return (NULL);
1180 }
1181 
1182 /*
1183  * Attempt to steal a thread in priority order from a thread queue.
1184  */
1185 static struct thread *
1186 tdq_steal(struct tdq *tdq, int cpu)
1187 {
1188  struct thread *td;
1189 
1190  TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1191  if ((td = runq_steal(&tdq->tdq_realtime, cpu)) != NULL)
1192  return (td);
1193  if ((td = runq_steal_from(&tdq->tdq_timeshare,
1194  cpu, tdq->tdq_ridx)) != NULL)
1195  return (td);
1196  return (runq_steal(&tdq->tdq_idle, cpu));
1197 }
1198 
1199 /*
1200  * Sets the thread lock and ts_cpu to match the requested cpu. Unlocks the
1201  * current lock and returns with the assigned queue locked.
1202  */
1203 static inline struct tdq *
1204 sched_setcpu(struct thread *td, int cpu, int flags)
1205 {
1206 
1207  struct tdq *tdq;
1208 
1209  THREAD_LOCK_ASSERT(td, MA_OWNED);
1210  tdq = TDQ_CPU(cpu);
1211  td_get_sched(td)->ts_cpu = cpu;
1212  /*
1213  * If the lock matches just return the queue.
1214  */
1215  if (td->td_lock == TDQ_LOCKPTR(tdq))
1216  return (tdq);
1217 #ifdef notyet
1218  /*
1219  * If the thread isn't running its lockptr is a
1220  * turnstile or a sleepqueue. We can just lock_set without
1221  * blocking.
1222  */
1223  if (TD_CAN_RUN(td)) {
1224  TDQ_LOCK(tdq);
1225  thread_lock_set(td, TDQ_LOCKPTR(tdq));
1226  return (tdq);
1227  }
1228 #endif
1229  /*
1230  * The hard case, migration, we need to block the thread first to
1231  * prevent order reversals with other cpus locks.
1232  */
1233  spinlock_enter();
1234  thread_lock_block(td);
1235  TDQ_LOCK(tdq);
1236  thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
1237  spinlock_exit();
1238  return (tdq);
1239 }
1240 
1241 SCHED_STAT_DEFINE(pickcpu_intrbind, "Soft interrupt binding");
1242 SCHED_STAT_DEFINE(pickcpu_idle_affinity, "Picked idle cpu based on affinity");
1243 SCHED_STAT_DEFINE(pickcpu_affinity, "Picked cpu based on affinity");
1244 SCHED_STAT_DEFINE(pickcpu_lowest, "Selected lowest load");
1245 SCHED_STAT_DEFINE(pickcpu_local, "Migrated to current cpu");
1246 SCHED_STAT_DEFINE(pickcpu_migration, "Selection may have caused migration");
1247 
1248 static int
1249 sched_pickcpu(struct thread *td, int flags)
1250 {
1251  struct cpu_group *cg, *ccg;
1252  struct td_sched *ts;
1253  struct tdq *tdq;
1254  cpuset_t mask;
1255  int cpu, pri, self;
1256 
1257  self = PCPU_GET(cpuid);
1258  ts = td_get_sched(td);
1259  KASSERT(!CPU_ABSENT(ts->ts_cpu), ("sched_pickcpu: Start scheduler on "
1260  "absent CPU %d for thread %s.", ts->ts_cpu, td->td_name));
1261  if (smp_started == 0)
1262  return (self);
1263  /*
1264  * Don't migrate a running thread from sched_switch().
1265  */
1266  if ((flags & SRQ_OURSELF) || !THREAD_CAN_MIGRATE(td))
1267  return (ts->ts_cpu);
1268  /*
1269  * Prefer to run interrupt threads on the processors that generate
1270  * the interrupt.
1271  */
1272  pri = td->td_priority;
1273  if (td->td_priority <= PRI_MAX_ITHD && THREAD_CAN_SCHED(td, self) &&
1274  curthread->td_intr_nesting_level && ts->ts_cpu != self) {
1275  SCHED_STAT_INC(pickcpu_intrbind);
1276  ts->ts_cpu = self;
1277  if (TDQ_CPU(self)->tdq_lowpri > pri) {
1278  SCHED_STAT_INC(pickcpu_affinity);
1279  return (ts->ts_cpu);
1280  }
1281  }
1282  /*
1283  * If the thread can run on the last cpu and the affinity has not
1284  * expired and it is idle, run it there.
1285  */
1286  tdq = TDQ_CPU(ts->ts_cpu);
1287  cg = tdq->tdq_cg;
1288  if (THREAD_CAN_SCHED(td, ts->ts_cpu) &&
1289  tdq->tdq_lowpri >= PRI_MIN_IDLE &&
1290  SCHED_AFFINITY(ts, CG_SHARE_L2)) {
1291  if (cg->cg_flags & CG_FLAG_THREAD) {
1292  CPUSET_FOREACH(cpu, cg->cg_mask) {
1293  if (TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE)
1294  break;
1295  }
1296  } else
1297  cpu = INT_MAX;
1298  if (cpu > mp_maxid) {
1299  SCHED_STAT_INC(pickcpu_idle_affinity);
1300  return (ts->ts_cpu);
1301  }
1302  }
1303  /*
1304  * Search for the last level cache CPU group in the tree.
1305  * Skip caches with expired affinity time and SMT groups.
1306  * Affinity to higher level caches will be handled less aggressively.
1307  */
1308  for (ccg = NULL; cg != NULL; cg = cg->cg_parent) {
1309  if (cg->cg_flags & CG_FLAG_THREAD)
1310  continue;
1311  if (!SCHED_AFFINITY(ts, cg->cg_level))
1312  continue;
1313  ccg = cg;
1314  }
1315  if (ccg != NULL)
1316  cg = ccg;
1317  cpu = -1;
1318  /* Search the group for the less loaded idle CPU we can run now. */
1319  mask = td->td_cpuset->cs_mask;
1320  if (cg != NULL && cg != cpu_top &&
1321  CPU_CMP(&cg->cg_mask, &cpu_top->cg_mask) != 0)
1322  cpu = sched_lowest(cg, mask, max(pri, PRI_MAX_TIMESHARE),
1323  INT_MAX, ts->ts_cpu);
1324  /* Search globally for the less loaded CPU we can run now. */
1325  if (cpu == -1)
1326  cpu = sched_lowest(cpu_top, mask, pri, INT_MAX, ts->ts_cpu);
1327  /* Search globally for the less loaded CPU. */
1328  if (cpu == -1)
1329  cpu = sched_lowest(cpu_top, mask, -1, INT_MAX, ts->ts_cpu);
1330  KASSERT(cpu != -1, ("sched_pickcpu: Failed to find a cpu."));
1331  KASSERT(!CPU_ABSENT(cpu), ("sched_pickcpu: Picked absent CPU %d.", cpu));
1332  /*
1333  * Compare the lowest loaded cpu to current cpu.
1334  */
1335  if (THREAD_CAN_SCHED(td, self) && TDQ_CPU(self)->tdq_lowpri > pri &&
1336  TDQ_CPU(cpu)->tdq_lowpri < PRI_MIN_IDLE &&
1337  TDQ_CPU(self)->tdq_load <= TDQ_CPU(cpu)->tdq_load + 1) {
1338  SCHED_STAT_INC(pickcpu_local);
1339  cpu = self;
1340  } else
1341  SCHED_STAT_INC(pickcpu_lowest);
1342  if (cpu != ts->ts_cpu)
1343  SCHED_STAT_INC(pickcpu_migration);
1344  return (cpu);
1345 }
1346 #endif
1347 
1348 /*
1349  * Pick the highest priority task we have and return it.
1350  */
1351 static struct thread *
1352 tdq_choose(struct tdq *tdq)
1353 {
1354  struct thread *td;
1355 
1356  TDQ_LOCK_ASSERT(tdq, MA_OWNED);
1357  td = runq_choose(&tdq->tdq_realtime);
1358  if (td != NULL)
1359  return (td);
1360  td = runq_choose_from(&tdq->tdq_timeshare, tdq->tdq_ridx);
1361  if (td != NULL) {
1362  KASSERT(td->td_priority >= PRI_MIN_BATCH,
1363  ("tdq_choose: Invalid priority on timeshare queue %d",
1364  td->td_priority));
1365  return (td);
1366  }
1367  td = runq_choose(&tdq->tdq_idle);
1368  if (td != NULL) {
1369  KASSERT(td->td_priority >= PRI_MIN_IDLE,
1370  ("tdq_choose: Invalid priority on idle queue %d",
1371  td->td_priority));
1372  return (td);
1373  }
1374 
1375  return (NULL);
1376 }
1377 
1378 /*
1379  * Initialize a thread queue.
1380  */
1381 static void
1382 tdq_setup(struct tdq *tdq)
1383 {
1384 
1385  if (bootverbose)
1386  printf("ULE: setup cpu %d\n", TDQ_ID(tdq));
1387  runq_init(&tdq->tdq_realtime);
1388  runq_init(&tdq->tdq_timeshare);
1389  runq_init(&tdq->tdq_idle);
1390  snprintf(tdq->tdq_name, sizeof(tdq->tdq_name),
1391  "sched lock %d", (int)TDQ_ID(tdq));
1392  mtx_init(&tdq->tdq_lock, tdq->tdq_name, "sched lock",
1393  MTX_SPIN | MTX_RECURSE);
1394 #ifdef KTR
1395  snprintf(tdq->tdq_loadname, sizeof(tdq->tdq_loadname),
1396  "CPU %d load", (int)TDQ_ID(tdq));
1397 #endif
1398 }
1399 
1400 #ifdef SMP
1401 static void
1402 sched_setup_smp(void)
1403 {
1404  struct tdq *tdq;
1405  int i;
1406 
1407  cpu_top = smp_topo();
1408  CPU_FOREACH(i) {
1409  tdq = TDQ_CPU(i);
1410  tdq_setup(tdq);
1411  tdq->tdq_cg = smp_topo_find(cpu_top, i);
1412  if (tdq->tdq_cg == NULL)
1413  panic("Can't find cpu group for %d\n", i);
1414  }
1415  balance_tdq = TDQ_SELF();
1416  sched_balance();
1417 }
1418 #endif
1419 
1420 /*
1421  * Setup the thread queues and initialize the topology based on MD
1422  * information.
1423  */
1424 static void
1426 {
1427  struct tdq *tdq;
1428 
1429  tdq = TDQ_SELF();
1430 #ifdef SMP
1431  sched_setup_smp();
1432 #else
1433  tdq_setup(tdq);
1434 #endif
1435 
1436  /* Add thread0's load since it's running. */
1437  TDQ_LOCK(tdq);
1438  thread0.td_lock = TDQ_LOCKPTR(TDQ_SELF());
1439  tdq_load_add(tdq, &thread0);
1440  tdq->tdq_lowpri = thread0.td_priority;
1441  TDQ_UNLOCK(tdq);
1442 }
1443 
1444 /*
1445  * This routine determines time constants after stathz and hz are setup.
1446  */
1447 /* ARGSUSED */
1448 static void
1450 {
1451  int incr;
1452 
1453  realstathz = stathz ? stathz : hz;
1454  sched_slice = realstathz / SCHED_SLICE_DEFAULT_DIVISOR;
1455  sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR;
1456  hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
1457  realstathz);
1458 
1459  /*
1460  * tickincr is shifted out by 10 to avoid rounding errors due to
1461  * hz not being evenly divisible by stathz on all platforms.
1462  */
1463  incr = (hz << SCHED_TICK_SHIFT) / realstathz;
1464  /*
1465  * This does not work for values of stathz that are more than
1466  * 1 << SCHED_TICK_SHIFT * hz. In practice this does not happen.
1467  */
1468  if (incr == 0)
1469  incr = 1;
1470  tickincr = incr;
1471 #ifdef SMP
1472  /*
1473  * Set the default balance interval now that we know
1474  * what realstathz is.
1475  */
1476  balance_interval = realstathz;
1477  affinity = SCHED_AFFINITY_DEFAULT;
1478 #endif
1479  if (sched_idlespinthresh < 0)
1480  sched_idlespinthresh = 2 * max(10000, 6 * hz) / realstathz;
1481 }
1482 
1483 
1484 /*
1485  * This is the core of the interactivity algorithm. Determines a score based
1486  * on past behavior. It is the ratio of sleep time to run time scaled to
1487  * a [0, 100] integer. This is the voluntary sleep time of a process, which
1488  * differs from the cpu usage because it does not account for time spent
1489  * waiting on a run-queue. Would be prettier if we had floating point.
1490  *
1491  * When a thread's sleep time is greater than its run time the
1492  * calculation is:
1493  *
1494  * scaling factor
1495  * interactivity score = ---------------------
1496  * sleep time / run time
1497  *
1498  *
1499  * When a thread's run time is greater than its sleep time the
1500  * calculation is:
1501  *
1502  * scaling factor
1503  * interactivity score = --------------------- + scaling factor
1504  * run time / sleep time
1505  */
1506 static int
1507 sched_interact_score(struct thread *td)
1508 {
1509  struct td_sched *ts;
1510  int div;
1511 
1512  ts = td_get_sched(td);
1513  /*
1514  * The score is only needed if this is likely to be an interactive
1515  * task. Don't go through the expense of computing it if there's
1516  * no chance.
1517  */
1518  if (sched_interact <= SCHED_INTERACT_HALF &&
1519  ts->ts_runtime >= ts->ts_slptime)
1520  return (SCHED_INTERACT_HALF);
1521 
1522  if (ts->ts_runtime > ts->ts_slptime) {
1523  div = max(1, ts->ts_runtime / SCHED_INTERACT_HALF);
1524  return (SCHED_INTERACT_HALF +
1525  (SCHED_INTERACT_HALF - (ts->ts_slptime / div)));
1526  }
1527  if (ts->ts_slptime > ts->ts_runtime) {
1528  div = max(1, ts->ts_slptime / SCHED_INTERACT_HALF);
1529  return (ts->ts_runtime / div);
1530  }
1531  /* runtime == slptime */
1532  if (ts->ts_runtime)
1533  return (SCHED_INTERACT_HALF);
1534 
1535  /*
1536  * This can happen if slptime and runtime are 0.
1537  */
1538  return (0);
1539 
1540 }
1541 
1542 /*
1543  * Scale the scheduling priority according to the "interactivity" of this
1544  * process.
1545  */
1546 static void
1547 sched_priority(struct thread *td)
1548 {
1549  int score;
1550  int pri;
1551 
1552  if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE)
1553  return;
1554  /*
1555  * If the score is interactive we place the thread in the realtime
1556  * queue with a priority that is less than kernel and interrupt
1557  * priorities. These threads are not subject to nice restrictions.
1558  *
1559  * Scores greater than this are placed on the normal timeshare queue
1560  * where the priority is partially decided by the most recent cpu
1561  * utilization and the rest is decided by nice value.
1562  *
1563  * The nice value of the process has a linear effect on the calculated
1564  * score. Negative nice values make it easier for a thread to be
1565  * considered interactive.
1566  */
1567  score = imax(0, sched_interact_score(td) + td->td_proc->p_nice);
1568  if (score < sched_interact) {
1569  pri = PRI_MIN_INTERACT;
1570  pri += ((PRI_MAX_INTERACT - PRI_MIN_INTERACT + 1) /
1571  sched_interact) * score;
1572  KASSERT(pri >= PRI_MIN_INTERACT && pri <= PRI_MAX_INTERACT,
1573  ("sched_priority: invalid interactive priority %d score %d",
1574  pri, score));
1575  } else {
1576  pri = SCHED_PRI_MIN;
1577  if (td_get_sched(td)->ts_ticks)
1578  pri += min(SCHED_PRI_TICKS(td_get_sched(td)),
1579  SCHED_PRI_RANGE - 1);
1580  pri += SCHED_PRI_NICE(td->td_proc->p_nice);
1581  KASSERT(pri >= PRI_MIN_BATCH && pri <= PRI_MAX_BATCH,
1582  ("sched_priority: invalid priority %d: nice %d, "
1583  "ticks %d ftick %d ltick %d tick pri %d",
1584  pri, td->td_proc->p_nice, td_get_sched(td)->ts_ticks,
1585  td_get_sched(td)->ts_ftick, td_get_sched(td)->ts_ltick,
1586  SCHED_PRI_TICKS(td_get_sched(td))));
1587  }
1588  sched_user_prio(td, pri);
1589 
1590  return;
1591 }
1592 
1593 /*
1594  * This routine enforces a maximum limit on the amount of scheduling history
1595  * kept. It is called after either the slptime or runtime is adjusted. This
1596  * function is ugly due to integer math.
1597  */
1598 static void
1599 sched_interact_update(struct thread *td)
1600 {
1601  struct td_sched *ts;
1602  u_int sum;
1603 
1604  ts = td_get_sched(td);
1605  sum = ts->ts_runtime + ts->ts_slptime;
1606  if (sum < SCHED_SLP_RUN_MAX)
1607  return;
1608  /*
1609  * This only happens from two places:
1610  * 1) We have added an unusual amount of run time from fork_exit.
1611  * 2) We have added an unusual amount of sleep time from sched_sleep().
1612  */
1613  if (sum > SCHED_SLP_RUN_MAX * 2) {
1614  if (ts->ts_runtime > ts->ts_slptime) {
1616  ts->ts_slptime = 1;
1617  } else {
1619  ts->ts_runtime = 1;
1620  }
1621  return;
1622  }
1623  /*
1624  * If we have exceeded by more than 1/5th then the algorithm below
1625  * will not bring us back into range. Dividing by two here forces
1626  * us into the range of [4/5 * SCHED_INTERACT_MAX, SCHED_INTERACT_MAX]
1627  */
1628  if (sum > (SCHED_SLP_RUN_MAX / 5) * 6) {
1629  ts->ts_runtime /= 2;
1630  ts->ts_slptime /= 2;
1631  return;
1632  }
1633  ts->ts_runtime = (ts->ts_runtime / 5) * 4;
1634  ts->ts_slptime = (ts->ts_slptime / 5) * 4;
1635 }
1636 
1637 /*
1638  * Scale back the interactivity history when a child thread is created. The
1639  * history is inherited from the parent but the thread may behave totally
1640  * differently. For example, a shell spawning a compiler process. We want
1641  * to learn that the compiler is behaving badly very quickly.
1642  */
1643 static void
1644 sched_interact_fork(struct thread *td)
1645 {
1646  struct td_sched *ts;
1647  int ratio;
1648  int sum;
1649 
1650  ts = td_get_sched(td);
1651  sum = ts->ts_runtime + ts->ts_slptime;
1652  if (sum > SCHED_SLP_RUN_FORK) {
1653  ratio = sum / SCHED_SLP_RUN_FORK;
1654  ts->ts_runtime /= ratio;
1655  ts->ts_slptime /= ratio;
1656  }
1657 }
1658 
1659 /*
1660  * Called from proc0_init() to setup the scheduler fields.
1661  */
1662 void
1664 {
1665  struct td_sched *ts0;
1666 
1667  /*
1668  * Set up the scheduler specific parts of thread0.
1669  */
1670  ts0 = td_get_sched(&thread0);
1671  ts0->ts_ltick = ticks;
1672  ts0->ts_ftick = ticks;
1673  ts0->ts_slice = 0;
1674  ts0->ts_cpu = curcpu; /* set valid CPU number */
1675 }
1676 
1677 /*
1678  * This is only somewhat accurate since given many processes of the same
1679  * priority they will switch when their slices run out, which will be
1680  * at most sched_slice stathz ticks.
1681  */
1682 int
1684 {
1685 
1686  /* Convert sched_slice from stathz to hz. */
1687  return (imax(1, (sched_slice * hz + realstathz / 2) / realstathz));
1688 }
1689 
1690 /*
1691  * Update the percent cpu tracking information when it is requested or
1692  * the total history exceeds the maximum. We keep a sliding history of
1693  * tick counts that slowly decays. This is less precise than the 4BSD
1694  * mechanism since it happens with less regular and frequent events.
1695  */
1696 static void
1698 {
1699  int t = ticks;
1700 
1701  /*
1702  * The signed difference may be negative if the thread hasn't run for
1703  * over half of the ticks rollover period.
1704  */
1705  if ((u_int)(t - ts->ts_ltick) >= SCHED_TICK_TARG) {
1706  ts->ts_ticks = 0;
1707  ts->ts_ftick = t - SCHED_TICK_TARG;
1708  } else if (t - ts->ts_ftick >= SCHED_TICK_MAX) {
1709  ts->ts_ticks = (ts->ts_ticks / (ts->ts_ltick - ts->ts_ftick)) *
1710  (ts->ts_ltick - (t - SCHED_TICK_TARG));
1711  ts->ts_ftick = t - SCHED_TICK_TARG;
1712  }
1713  if (run)
1714  ts->ts_ticks += (t - ts->ts_ltick) << SCHED_TICK_SHIFT;
1715  ts->ts_ltick = t;
1716 }
1717 
1718 /*
1719  * Adjust the priority of a thread. Move it to the appropriate run-queue
1720  * if necessary. This is the back-end for several priority related
1721  * functions.
1722  */
1723 static void
1724 sched_thread_priority(struct thread *td, u_char prio)
1725 {
1726  struct td_sched *ts;
1727  struct tdq *tdq;
1728  int oldpri;
1729 
1730  KTR_POINT3(KTR_SCHED, "thread", sched_tdname(td), "prio",
1731  "prio:%d", td->td_priority, "new prio:%d", prio,
1732  KTR_ATTR_LINKED, sched_tdname(curthread));
1733  SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio);
1734  if (td != curthread && prio < td->td_priority) {
1735  KTR_POINT3(KTR_SCHED, "thread", sched_tdname(curthread),
1736  "lend prio", "prio:%d", td->td_priority, "new prio:%d",
1737  prio, KTR_ATTR_LINKED, sched_tdname(td));
1738  SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
1739  curthread);
1740  }
1741  ts = td_get_sched(td);
1742  THREAD_LOCK_ASSERT(td, MA_OWNED);
1743  if (td->td_priority == prio)
1744  return;
1745  /*
1746  * If the priority has been elevated due to priority
1747  * propagation, we may have to move ourselves to a new
1748  * queue. This could be optimized to not re-add in some
1749  * cases.
1750  */
1751  if (TD_ON_RUNQ(td) && prio < td->td_priority) {
1752  sched_rem(td);
1753  td->td_priority = prio;
1754  sched_add(td, SRQ_BORROWING);
1755  return;
1756  }
1757  /*
1758  * If the thread is currently running we may have to adjust the lowpri
1759  * information so other cpus are aware of our current priority.
1760  */
1761  if (TD_IS_RUNNING(td)) {
1762  tdq = TDQ_CPU(ts->ts_cpu);
1763  oldpri = td->td_priority;
1764  td->td_priority = prio;
1765  if (prio < tdq->tdq_lowpri)
1766  tdq->tdq_lowpri = prio;
1767  else if (tdq->tdq_lowpri == oldpri)
1768  tdq_setlowpri(tdq, td);
1769  return;
1770  }
1771  td->td_priority = prio;
1772 }
1773 
1774 /*
1775  * Update a thread's priority when it is lent another thread's
1776  * priority.
1777  */
1778 void
1779 sched_lend_prio(struct thread *td, u_char prio)
1780 {
1781 
1782  td->td_flags |= TDF_BORROWING;
1783  sched_thread_priority(td, prio);
1784 }
1785 
1786 /*
1787  * Restore a thread's priority when priority propagation is
1788  * over. The prio argument is the minimum priority the thread
1789  * needs to have to satisfy other possible priority lending
1790  * requests. If the thread's regular priority is less
1791  * important than prio, the thread will keep a priority boost
1792  * of prio.
1793  */
1794 void
1795 sched_unlend_prio(struct thread *td, u_char prio)
1796 {
1797  u_char base_pri;
1798 
1799  if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
1800  td->td_base_pri <= PRI_MAX_TIMESHARE)
1801  base_pri = td->td_user_pri;
1802  else
1803  base_pri = td->td_base_pri;
1804  if (prio >= base_pri) {
1805  td->td_flags &= ~TDF_BORROWING;
1806  sched_thread_priority(td, base_pri);
1807  } else
1808  sched_lend_prio(td, prio);
1809 }
1810 
1811 /*
1812  * Standard entry for setting the priority to an absolute value.
1813  */
1814 void
1815 sched_prio(struct thread *td, u_char prio)
1816 {
1817  u_char oldprio;
1818 
1819  /* First, update the base priority. */
1820  td->td_base_pri = prio;
1821 
1822  /*
1823  * If the thread is borrowing another thread's priority, don't
1824  * ever lower the priority.
1825  */
1826  if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
1827  return;
1828 
1829  /* Change the real priority. */
1830  oldprio = td->td_priority;
1831  sched_thread_priority(td, prio);
1832 
1833  /*
1834  * If the thread is on a turnstile, then let the turnstile update
1835  * its state.
1836  */
1837  if (TD_ON_LOCK(td) && oldprio != prio)
1838  turnstile_adjust(td, oldprio);
1839 }
1840 
1841 /*
1842  * Set the base user priority, does not effect current running priority.
1843  */
1844 void
1845 sched_user_prio(struct thread *td, u_char prio)
1846 {
1847 
1848  td->td_base_user_pri = prio;
1849  if (td->td_lend_user_pri <= prio)
1850  return;
1851  td->td_user_pri = prio;
1852 }
1853 
1854 void
1855 sched_lend_user_prio(struct thread *td, u_char prio)
1856 {
1857 
1858  THREAD_LOCK_ASSERT(td, MA_OWNED);
1859  td->td_lend_user_pri = prio;
1860  td->td_user_pri = min(prio, td->td_base_user_pri);
1861  if (td->td_priority > td->td_user_pri)
1862  sched_prio(td, td->td_user_pri);
1863  else if (td->td_priority != td->td_user_pri)
1864  td->td_flags |= TDF_NEEDRESCHED;
1865 }
1866 
1867 #ifdef SMP
1868 /*
1869  * This tdq is about to idle. Try to steal a thread from another CPU before
1870  * choosing the idle thread.
1871  */
1872 static void
1873 tdq_trysteal(struct tdq *tdq)
1874 {
1875  struct cpu_group *cg;
1876  struct tdq *steal;
1877  cpuset_t mask;
1878  int cpu, i;
1879 
1880  if (smp_started == 0 || trysteal_limit == 0 || tdq->tdq_cg == NULL)
1881  return;
1882  CPU_FILL(&mask);
1883  CPU_CLR(PCPU_GET(cpuid), &mask);
1884  /* We don't want to be preempted while we're iterating. */
1885  spinlock_enter();
1886  TDQ_UNLOCK(tdq);
1887  for (i = 1, cg = tdq->tdq_cg; ; ) {
1888  cpu = sched_highest(cg, mask, steal_thresh);
1889  /*
1890  * If a thread was added while interrupts were disabled don't
1891  * steal one here.
1892  */
1893  if (tdq->tdq_load > 0) {
1894  TDQ_LOCK(tdq);
1895  break;
1896  }
1897  if (cpu == -1) {
1898  i++;
1899  cg = cg->cg_parent;
1900  if (cg == NULL || i > trysteal_limit) {
1901  TDQ_LOCK(tdq);
1902  break;
1903  }
1904  continue;
1905  }
1906  steal = TDQ_CPU(cpu);
1907  /*
1908  * The data returned by sched_highest() is stale and
1909  * the chosen CPU no longer has an eligible thread.
1910  */
1911  if (steal->tdq_load < steal_thresh ||
1912  steal->tdq_transferable == 0)
1913  continue;
1914  tdq_lock_pair(tdq, steal);
1915  /*
1916  * If we get to this point, unconditonally exit the loop
1917  * to bound the time spent in the critcal section.
1918  *
1919  * If a thread was added while interrupts were disabled don't
1920  * steal one here.
1921  */
1922  if (tdq->tdq_load > 0) {
1923  TDQ_UNLOCK(steal);
1924  break;
1925  }
1926  /*
1927  * The data returned by sched_highest() is stale and
1928  * the chosen CPU no longer has an eligible thread.
1929  */
1930  if (steal->tdq_load < steal_thresh ||
1931  steal->tdq_transferable == 0) {
1932  TDQ_UNLOCK(steal);
1933  break;
1934  }
1935  /*
1936  * If we fail to acquire one due to affinity restrictions,
1937  * bail out and let the idle thread to a more complete search
1938  * outside of a critical section.
1939  */
1940  if (tdq_move(steal, tdq) == NULL) {
1941  TDQ_UNLOCK(steal);
1942  break;
1943  }
1944  TDQ_UNLOCK(steal);
1945  break;
1946  }
1947  spinlock_exit();
1948 }
1949 #endif
1950 
1951 /*
1952  * Handle migration from sched_switch(). This happens only for
1953  * cpu binding.
1954  */
1955 static struct mtx *
1956 sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
1957 {
1958  struct tdq *tdn;
1959 
1960  KASSERT(!CPU_ABSENT(td_get_sched(td)->ts_cpu), ("sched_switch_migrate: "
1961  "thread %s queued on absent CPU %d.", td->td_name,
1962  td_get_sched(td)->ts_cpu));
1963  tdn = TDQ_CPU(td_get_sched(td)->ts_cpu);
1964 #ifdef SMP
1965  tdq_load_rem(tdq, td);
1966  /*
1967  * Do the lock dance required to avoid LOR. We grab an extra
1968  * spinlock nesting to prevent preemption while we're
1969  * not holding either run-queue lock.
1970  */
1971  spinlock_enter();
1972  thread_lock_block(td); /* This releases the lock on tdq. */
1973 
1974  /*
1975  * Acquire both run-queue locks before placing the thread on the new
1976  * run-queue to avoid deadlocks created by placing a thread with a
1977  * blocked lock on the run-queue of a remote processor. The deadlock
1978  * occurs when a third processor attempts to lock the two queues in
1979  * question while the target processor is spinning with its own
1980  * run-queue lock held while waiting for the blocked lock to clear.
1981  */
1982  tdq_lock_pair(tdn, tdq);
1983  tdq_add(tdn, td, flags);
1984  tdq_notify(tdn, td);
1985  TDQ_UNLOCK(tdn);
1986  spinlock_exit();
1987 #endif
1988  return (TDQ_LOCKPTR(tdn));
1989 }
1990 
1991 /*
1992  * Variadic version of thread_lock_unblock() that does not assume td_lock
1993  * is blocked.
1994  */
1995 static inline void
1996 thread_unblock_switch(struct thread *td, struct mtx *mtx)
1997 {
1998  atomic_store_rel_ptr((volatile uintptr_t *)&td->td_lock,
1999  (uintptr_t)mtx);
2000 }
2001 
2002 /*
2003  * Switch threads. This function has to handle threads coming in while
2004  * blocked for some reason, running, or idle. It also must deal with
2005  * migrating a thread from one queue to another as running threads may
2006  * be assigned elsewhere via binding.
2007  */
2008 void
2009 sched_switch(struct thread *td, struct thread *newtd, int flags)
2010 {
2011  struct tdq *tdq;
2012  struct td_sched *ts;
2013  struct mtx *mtx;
2014  int srqflag;
2015  int cpuid, preempted;
2016 
2017  THREAD_LOCK_ASSERT(td, MA_OWNED);
2018  KASSERT(newtd == NULL, ("sched_switch: Unsupported newtd argument"));
2019 
2020  cpuid = PCPU_GET(cpuid);
2021  tdq = TDQ_CPU(cpuid);
2022  ts = td_get_sched(td);
2023  mtx = td->td_lock;
2024  sched_pctcpu_update(ts, 1);
2025  ts->ts_rltick = ticks;
2026  td->td_lastcpu = td->td_oncpu;
2027  td->td_oncpu = NOCPU;
2028  preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
2029  (flags & SW_PREEMPT) != 0;
2030  td->td_flags &= ~(TDF_NEEDRESCHED | TDF_SLICEEND);
2031  td->td_owepreempt = 0;
2032  if (!TD_IS_IDLETHREAD(td))
2033  tdq->tdq_switchcnt++;
2034  /*
2035  * The lock pointer in an idle thread should never change. Reset it
2036  * to CAN_RUN as well.
2037  */
2038  if (TD_IS_IDLETHREAD(td)) {
2039  MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2040  TD_SET_CAN_RUN(td);
2041  } else if (TD_IS_RUNNING(td)) {
2042  MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2043  srqflag = preempted ?
2044  SRQ_OURSELF|SRQ_YIELDING|SRQ_PREEMPTED :
2045  SRQ_OURSELF|SRQ_YIELDING;
2046 #ifdef SMP
2047  if (THREAD_CAN_MIGRATE(td) && !THREAD_CAN_SCHED(td, ts->ts_cpu))
2048  ts->ts_cpu = sched_pickcpu(td, 0);
2049 #endif
2050  if (ts->ts_cpu == cpuid)
2051  tdq_runq_add(tdq, td, srqflag);
2052  else {
2053  KASSERT(THREAD_CAN_MIGRATE(td) ||
2054  (ts->ts_flags & TSF_BOUND) != 0,
2055  ("Thread %p shouldn't migrate", td));
2056  mtx = sched_switch_migrate(tdq, td, srqflag);
2057  }
2058  } else {
2059  /* This thread must be going to sleep. */
2060  TDQ_LOCK(tdq);
2061  mtx = thread_lock_block(td);
2062  tdq_load_rem(tdq, td);
2063 #ifdef SMP
2064  if (tdq->tdq_load == 0)
2065  tdq_trysteal(tdq);
2066 #endif
2067  }
2068 
2069 #if (KTR_COMPILE & KTR_SCHED) != 0
2070  if (TD_IS_IDLETHREAD(td))
2071  KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "idle",
2072  "prio:%d", td->td_priority);
2073  else
2074  KTR_STATE3(KTR_SCHED, "thread", sched_tdname(td), KTDSTATE(td),
2075  "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
2076  "lockname:\"%s\"", td->td_lockname);
2077 #endif
2078 
2079  /*
2080  * We enter here with the thread blocked and assigned to the
2081  * appropriate cpu run-queue or sleep-queue and with the current
2082  * thread-queue locked.
2083  */
2084  TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2085  newtd = choosethread();
2086  /*
2087  * Call the MD code to switch contexts if necessary.
2088  */
2089  if (td != newtd) {
2090 #ifdef HWPMC_HOOKS
2091  if (PMC_PROC_IS_USING_PMCS(td->td_proc))
2092  PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
2093 #endif
2094  SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc);
2095  lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
2096  TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
2097  sched_pctcpu_update(td_get_sched(newtd), 0);
2098 
2099 #ifdef KDTRACE_HOOKS
2100  /*
2101  * If DTrace has set the active vtime enum to anything
2102  * other than INACTIVE (0), then it should have set the
2103  * function to call.
2104  */
2105  if (dtrace_vtime_active)
2106  (*dtrace_vtime_switch_func)(newtd);
2107 #endif
2108 
2109  cpu_switch(td, newtd, mtx);
2110  /*
2111  * We may return from cpu_switch on a different cpu. However,
2112  * we always return with td_lock pointing to the current cpu's
2113  * run queue lock.
2114  */
2115  cpuid = PCPU_GET(cpuid);
2116  tdq = TDQ_CPU(cpuid);
2117  lock_profile_obtain_lock_success(
2118  &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
2119 
2120  SDT_PROBE0(sched, , , on__cpu);
2121 #ifdef HWPMC_HOOKS
2122  if (PMC_PROC_IS_USING_PMCS(td->td_proc))
2123  PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_IN);
2124 #endif
2125  } else {
2126  thread_unblock_switch(td, mtx);
2127  SDT_PROBE0(sched, , , remain__cpu);
2128  }
2129 
2130  KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
2131  "prio:%d", td->td_priority);
2132 
2133  /*
2134  * Assert that all went well and return.
2135  */
2136  TDQ_LOCK_ASSERT(tdq, MA_OWNED|MA_NOTRECURSED);
2137  MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2138  td->td_oncpu = cpuid;
2139 }
2140 
2141 /*
2142  * Adjust thread priorities as a result of a nice request.
2143  */
2144 void
2145 sched_nice(struct proc *p, int nice)
2146 {
2147  struct thread *td;
2148 
2149  PROC_LOCK_ASSERT(p, MA_OWNED);
2150 
2151  p->p_nice = nice;
2152  FOREACH_THREAD_IN_PROC(p, td) {
2153  thread_lock(td);
2154  sched_priority(td);
2155  sched_prio(td, td->td_base_user_pri);
2156  thread_unlock(td);
2157  }
2158 }
2159 
2160 /*
2161  * Record the sleep time for the interactivity scorer.
2162  */
2163 void
2164 sched_sleep(struct thread *td, int prio)
2165 {
2166 
2167  THREAD_LOCK_ASSERT(td, MA_OWNED);
2168 
2169  td->td_slptick = ticks;
2170  if (TD_IS_SUSPENDED(td) || prio >= PSOCK)
2171  td->td_flags |= TDF_CANSWAP;
2172  if (PRI_BASE(td->td_pri_class) != PRI_TIMESHARE)
2173  return;
2174  if (static_boost == 1 && prio)
2175  sched_prio(td, prio);
2176  else if (static_boost && td->td_priority > static_boost)
2177  sched_prio(td, static_boost);
2178 }
2179 
2180 /*
2181  * Schedule a thread to resume execution and record how long it voluntarily
2182  * slept. We also update the pctcpu, interactivity, and priority.
2183  */
2184 void
2185 sched_wakeup(struct thread *td)
2186 {
2187  struct td_sched *ts;
2188  int slptick;
2189 
2190  THREAD_LOCK_ASSERT(td, MA_OWNED);
2191  ts = td_get_sched(td);
2192  td->td_flags &= ~TDF_CANSWAP;
2193  /*
2194  * If we slept for more than a tick update our interactivity and
2195  * priority.
2196  */
2197  slptick = td->td_slptick;
2198  td->td_slptick = 0;
2199  if (slptick && slptick != ticks) {
2200  ts->ts_slptime += (ticks - slptick) << SCHED_TICK_SHIFT;
2202  sched_pctcpu_update(ts, 0);
2203  }
2204  /*
2205  * Reset the slice value since we slept and advanced the round-robin.
2206  */
2207  ts->ts_slice = 0;
2208  sched_add(td, SRQ_BORING);
2209 }
2210 
2211 /*
2212  * Penalize the parent for creating a new child and initialize the child's
2213  * priority.
2214  */
2215 void
2216 sched_fork(struct thread *td, struct thread *child)
2217 {
2218  THREAD_LOCK_ASSERT(td, MA_OWNED);
2219  sched_pctcpu_update(td_get_sched(td), 1);
2220  sched_fork_thread(td, child);
2221  /*
2222  * Penalize the parent and child for forking.
2223  */
2224  sched_interact_fork(child);
2225  sched_priority(child);
2226  td_get_sched(td)->ts_runtime += tickincr;
2228  sched_priority(td);
2229 }
2230 
2231 /*
2232  * Fork a new thread, may be within the same process.
2233  */
2234 void
2235 sched_fork_thread(struct thread *td, struct thread *child)
2236 {
2237  struct td_sched *ts;
2238  struct td_sched *ts2;
2239  struct tdq *tdq;
2240 
2241  tdq = TDQ_SELF();
2242  THREAD_LOCK_ASSERT(td, MA_OWNED);
2243  /*
2244  * Initialize child.
2245  */
2246  ts = td_get_sched(td);
2247  ts2 = td_get_sched(child);
2248  child->td_oncpu = NOCPU;
2249  child->td_lastcpu = NOCPU;
2250  child->td_lock = TDQ_LOCKPTR(tdq);
2251  child->td_cpuset = cpuset_ref(td->td_cpuset);
2252  child->td_domain.dr_policy = td->td_cpuset->cs_domain;
2253  ts2->ts_cpu = ts->ts_cpu;
2254  ts2->ts_flags = 0;
2255  /*
2256  * Grab our parents cpu estimation information.
2257  */
2258  ts2->ts_ticks = ts->ts_ticks;
2259  ts2->ts_ltick = ts->ts_ltick;
2260  ts2->ts_ftick = ts->ts_ftick;
2261  /*
2262  * Do not inherit any borrowed priority from the parent.
2263  */
2264  child->td_priority = child->td_base_pri;
2265  /*
2266  * And update interactivity score.
2267  */
2268  ts2->ts_slptime = ts->ts_slptime;
2269  ts2->ts_runtime = ts->ts_runtime;
2270  /* Attempt to quickly learn interactivity. */
2271  ts2->ts_slice = tdq_slice(tdq) - sched_slice_min;
2272 #ifdef KTR
2273  bzero(ts2->ts_name, sizeof(ts2->ts_name));
2274 #endif
2275 }
2276 
2277 /*
2278  * Adjust the priority class of a thread.
2279  */
2280 void
2281 sched_class(struct thread *td, int class)
2282 {
2283 
2284  THREAD_LOCK_ASSERT(td, MA_OWNED);
2285  if (td->td_pri_class == class)
2286  return;
2287  td->td_pri_class = class;
2288 }
2289 
2290 /*
2291  * Return some of the child's priority and interactivity to the parent.
2292  */
2293 void
2294 sched_exit(struct proc *p, struct thread *child)
2295 {
2296  struct thread *td;
2297 
2298  KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "proc exit",
2299  "prio:%d", child->td_priority);
2300  PROC_LOCK_ASSERT(p, MA_OWNED);
2301  td = FIRST_THREAD_IN_PROC(p);
2302  sched_exit_thread(td, child);
2303 }
2304 
2305 /*
2306  * Penalize another thread for the time spent on this one. This helps to
2307  * worsen the priority and interactivity of processes which schedule batch
2308  * jobs such as make. This has little effect on the make process itself but
2309  * causes new processes spawned by it to receive worse scores immediately.
2310  */
2311 void
2312 sched_exit_thread(struct thread *td, struct thread *child)
2313 {
2314 
2315  KTR_STATE1(KTR_SCHED, "thread", sched_tdname(child), "thread exit",
2316  "prio:%d", child->td_priority);
2317  /*
2318  * Give the child's runtime to the parent without returning the
2319  * sleep time as a penalty to the parent. This causes shells that
2320  * launch expensive things to mark their children as expensive.
2321  */
2322  thread_lock(td);
2323  td_get_sched(td)->ts_runtime += td_get_sched(child)->ts_runtime;
2325  sched_priority(td);
2326  thread_unlock(td);
2327 }
2328 
2329 void
2330 sched_preempt(struct thread *td)
2331 {
2332  struct tdq *tdq;
2333 
2334  SDT_PROBE2(sched, , , surrender, td, td->td_proc);
2335 
2336  thread_lock(td);
2337  tdq = TDQ_SELF();
2338  TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2339  tdq->tdq_ipipending = 0;
2340  if (td->td_priority > tdq->tdq_lowpri) {
2341  int flags;
2342 
2343  flags = SW_INVOL | SW_PREEMPT;
2344  if (td->td_critnest > 1)
2345  td->td_owepreempt = 1;
2346  else if (TD_IS_IDLETHREAD(td))
2347  mi_switch(flags | SWT_REMOTEWAKEIDLE, NULL);
2348  else
2349  mi_switch(flags | SWT_REMOTEPREEMPT, NULL);
2350  }
2351  thread_unlock(td);
2352 }
2353 
2354 /*
2355  * Fix priorities on return to user-space. Priorities may be elevated due
2356  * to static priorities in msleep() or similar.
2357  */
2358 void
2359 sched_userret_slowpath(struct thread *td)
2360 {
2361 
2362  thread_lock(td);
2363  td->td_priority = td->td_user_pri;
2364  td->td_base_pri = td->td_user_pri;
2365  tdq_setlowpri(TDQ_SELF(), td);
2366  thread_unlock(td);
2367 }
2368 
2369 /*
2370  * Handle a stathz tick. This is really only relevant for timeshare
2371  * threads.
2372  */
2373 void
2374 sched_clock(struct thread *td)
2375 {
2376  struct tdq *tdq;
2377  struct td_sched *ts;
2378 
2379  THREAD_LOCK_ASSERT(td, MA_OWNED);
2380  tdq = TDQ_SELF();
2381 #ifdef SMP
2382  /*
2383  * We run the long term load balancer infrequently on the first cpu.
2384  */
2385  if (balance_tdq == tdq) {
2386  if (balance_ticks && --balance_ticks == 0)
2387  sched_balance();
2388  }
2389 #endif
2390  /*
2391  * Save the old switch count so we have a record of the last ticks
2392  * activity. Initialize the new switch count based on our load.
2393  * If there is some activity seed it to reflect that.
2394  */
2395  tdq->tdq_oldswitchcnt = tdq->tdq_switchcnt;
2396  tdq->tdq_switchcnt = tdq->tdq_load;
2397  /*
2398  * Advance the insert index once for each tick to ensure that all
2399  * threads get a chance to run.
2400  */
2401  if (tdq->tdq_idx == tdq->tdq_ridx) {
2402  tdq->tdq_idx = (tdq->tdq_idx + 1) % RQ_NQS;
2403  if (TAILQ_EMPTY(&tdq->tdq_timeshare.rq_queues[tdq->tdq_ridx]))
2404  tdq->tdq_ridx = tdq->tdq_idx;
2405  }
2406  ts = td_get_sched(td);
2407  sched_pctcpu_update(ts, 1);
2408  if (td->td_pri_class & PRI_FIFO_BIT)
2409  return;
2410  if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE) {
2411  /*
2412  * We used a tick; charge it to the thread so
2413  * that we can compute our interactivity.
2414  */
2415  td_get_sched(td)->ts_runtime += tickincr;
2417  sched_priority(td);
2418  }
2419 
2420  /*
2421  * Force a context switch if the current thread has used up a full
2422  * time slice (default is 100ms).
2423  */
2424  if (!TD_IS_IDLETHREAD(td) && ++ts->ts_slice >= tdq_slice(tdq)) {
2425  ts->ts_slice = 0;
2426  td->td_flags |= TDF_NEEDRESCHED | TDF_SLICEEND;
2427  }
2428 }
2429 
2430 u_int
2431 sched_estcpu(struct thread *td __unused)
2432 {
2433 
2434  return (0);
2435 }
2436 
2437 /*
2438  * Return whether the current CPU has runnable tasks. Used for in-kernel
2439  * cooperative idle threads.
2440  */
2441 int
2443 {
2444  struct tdq *tdq;
2445  int load;
2446 
2447  load = 1;
2448 
2449  tdq = TDQ_SELF();
2450  if ((curthread->td_flags & TDF_IDLETD) != 0) {
2451  if (tdq->tdq_load > 0)
2452  goto out;
2453  } else
2454  if (tdq->tdq_load - 1 > 0)
2455  goto out;
2456  load = 0;
2457 out:
2458  return (load);
2459 }
2460 
2461 /*
2462  * Choose the highest priority thread to run. The thread is removed from
2463  * the run-queue while running however the load remains. For SMP we set
2464  * the tdq in the global idle bitmask if it idles here.
2465  */
2466 struct thread *
2468 {
2469  struct thread *td;
2470  struct tdq *tdq;
2471 
2472  tdq = TDQ_SELF();
2473  TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2474  td = tdq_choose(tdq);
2475  if (td) {
2476  tdq_runq_rem(tdq, td);
2477  tdq->tdq_lowpri = td->td_priority;
2478  return (td);
2479  }
2480  tdq->tdq_lowpri = PRI_MAX_IDLE;
2481  return (PCPU_GET(idlethread));
2482 }
2483 
2484 /*
2485  * Set owepreempt if necessary. Preemption never happens directly in ULE,
2486  * we always request it once we exit a critical section.
2487  */
2488 static inline void
2489 sched_setpreempt(struct thread *td)
2490 {
2491  struct thread *ctd;
2492  int cpri;
2493  int pri;
2494 
2495  THREAD_LOCK_ASSERT(curthread, MA_OWNED);
2496 
2497  ctd = curthread;
2498  pri = td->td_priority;
2499  cpri = ctd->td_priority;
2500  if (pri < cpri)
2501  ctd->td_flags |= TDF_NEEDRESCHED;
2502  if (panicstr != NULL || pri >= cpri || cold || TD_IS_INHIBITED(ctd))
2503  return;
2504  if (!sched_shouldpreempt(pri, cpri, 0))
2505  return;
2506  ctd->td_owepreempt = 1;
2507 }
2508 
2509 /*
2510  * Add a thread to a thread queue. Select the appropriate runq and add the
2511  * thread to it. This is the internal function called when the tdq is
2512  * predetermined.
2513  */
2514 void
2515 tdq_add(struct tdq *tdq, struct thread *td, int flags)
2516 {
2517 
2518  TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2519  KASSERT((td->td_inhibitors == 0),
2520  ("sched_add: trying to run inhibited thread"));
2521  KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)),
2522  ("sched_add: bad thread state"));
2523  KASSERT(td->td_flags & TDF_INMEM,
2524  ("sched_add: thread swapped out"));
2525 
2526  if (td->td_priority < tdq->tdq_lowpri)
2527  tdq->tdq_lowpri = td->td_priority;
2528  tdq_runq_add(tdq, td, flags);
2529  tdq_load_add(tdq, td);
2530 }
2531 
2532 /*
2533  * Select the target thread queue and add a thread to it. Request
2534  * preemption or IPI a remote processor if required.
2535  */
2536 void
2537 sched_add(struct thread *td, int flags)
2538 {
2539  struct tdq *tdq;
2540 #ifdef SMP
2541  int cpu;
2542 #endif
2543 
2544  KTR_STATE2(KTR_SCHED, "thread", sched_tdname(td), "runq add",
2545  "prio:%d", td->td_priority, KTR_ATTR_LINKED,
2546  sched_tdname(curthread));
2547  KTR_POINT1(KTR_SCHED, "thread", sched_tdname(curthread), "wokeup",
2548  KTR_ATTR_LINKED, sched_tdname(td));
2549  SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
2550  flags & SRQ_PREEMPTED);
2551  THREAD_LOCK_ASSERT(td, MA_OWNED);
2552  /*
2553  * Recalculate the priority before we select the target cpu or
2554  * run-queue.
2555  */
2556  if (PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
2557  sched_priority(td);
2558 #ifdef SMP
2559  /*
2560  * Pick the destination cpu and if it isn't ours transfer to the
2561  * target cpu.
2562  */
2563  cpu = sched_pickcpu(td, flags);
2564  tdq = sched_setcpu(td, cpu, flags);
2565  tdq_add(tdq, td, flags);
2566  if (cpu != PCPU_GET(cpuid)) {
2567  tdq_notify(tdq, td);
2568  return;
2569  }
2570 #else
2571  tdq = TDQ_SELF();
2572  TDQ_LOCK(tdq);
2573  /*
2574  * Now that the thread is moving to the run-queue, set the lock
2575  * to the scheduler's lock.
2576  */
2577  thread_lock_set(td, TDQ_LOCKPTR(tdq));
2578  tdq_add(tdq, td, flags);
2579 #endif
2580  if (!(flags & SRQ_YIELDING))
2581  sched_setpreempt(td);
2582 }
2583 
2584 /*
2585  * Remove a thread from a run-queue without running it. This is used
2586  * when we're stealing a thread from a remote queue. Otherwise all threads
2587  * exit by calling sched_exit_thread() and sched_throw() themselves.
2588  */
2589 void
2590 sched_rem(struct thread *td)
2591 {
2592  struct tdq *tdq;
2593 
2594  KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "runq rem",
2595  "prio:%d", td->td_priority);
2596  SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
2597  tdq = TDQ_CPU(td_get_sched(td)->ts_cpu);
2598  TDQ_LOCK_ASSERT(tdq, MA_OWNED);
2599  MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2600  KASSERT(TD_ON_RUNQ(td),
2601  ("sched_rem: thread not on run queue"));
2602  tdq_runq_rem(tdq, td);
2603  tdq_load_rem(tdq, td);
2604  TD_SET_CAN_RUN(td);
2605  if (td->td_priority == tdq->tdq_lowpri)
2606  tdq_setlowpri(tdq, NULL);
2607 }
2608 
2609 /*
2610  * Fetch cpu utilization information. Updates on demand.
2611  */
2612 fixpt_t
2613 sched_pctcpu(struct thread *td)
2614 {
2615  fixpt_t pctcpu;
2616  struct td_sched *ts;
2617 
2618  pctcpu = 0;
2619  ts = td_get_sched(td);
2620 
2621  THREAD_LOCK_ASSERT(td, MA_OWNED);
2622  sched_pctcpu_update(ts, TD_IS_RUNNING(td));
2623  if (ts->ts_ticks) {
2624  int rtick;
2625 
2626  /* How many rtick per second ? */
2627  rtick = min(SCHED_TICK_HZ(ts) / SCHED_TICK_SECS, hz);
2628  pctcpu = (FSCALE * ((FSCALE * rtick)/hz)) >> FSHIFT;
2629  }
2630 
2631  return (pctcpu);
2632 }
2633 
2634 /*
2635  * Enforce affinity settings for a thread. Called after adjustments to
2636  * cpumask.
2637  */
2638 void
2639 sched_affinity(struct thread *td)
2640 {
2641 #ifdef SMP
2642  struct td_sched *ts;
2643 
2644  THREAD_LOCK_ASSERT(td, MA_OWNED);
2645  ts = td_get_sched(td);
2646  if (THREAD_CAN_SCHED(td, ts->ts_cpu))
2647  return;
2648  if (TD_ON_RUNQ(td)) {
2649  sched_rem(td);
2650  sched_add(td, SRQ_BORING);
2651  return;
2652  }
2653  if (!TD_IS_RUNNING(td))
2654  return;
2655  /*
2656  * Force a switch before returning to userspace. If the
2657  * target thread is not running locally send an ipi to force
2658  * the issue.
2659  */
2660  td->td_flags |= TDF_NEEDRESCHED;
2661  if (td != curthread)
2662  ipi_cpu(ts->ts_cpu, IPI_PREEMPT);
2663 #endif
2664 }
2665 
2666 /*
2667  * Bind a thread to a target cpu.
2668  */
2669 void
2670 sched_bind(struct thread *td, int cpu)
2671 {
2672  struct td_sched *ts;
2673 
2674  THREAD_LOCK_ASSERT(td, MA_OWNED|MA_NOTRECURSED);
2675  KASSERT(td == curthread, ("sched_bind: can only bind curthread"));
2676  ts = td_get_sched(td);
2677  if (ts->ts_flags & TSF_BOUND)
2678  sched_unbind(td);
2679  KASSERT(THREAD_CAN_MIGRATE(td), ("%p must be migratable", td));
2680  ts->ts_flags |= TSF_BOUND;
2681  sched_pin();
2682  if (PCPU_GET(cpuid) == cpu)
2683  return;
2684  ts->ts_cpu = cpu;
2685  /* When we return from mi_switch we'll be on the correct cpu. */
2686  mi_switch(SW_VOL, NULL);
2687 }
2688 
2689 /*
2690  * Release a bound thread.
2691  */
2692 void
2693 sched_unbind(struct thread *td)
2694 {
2695  struct td_sched *ts;
2696 
2697  THREAD_LOCK_ASSERT(td, MA_OWNED);
2698  KASSERT(td == curthread, ("sched_unbind: can only bind curthread"));
2699  ts = td_get_sched(td);
2700  if ((ts->ts_flags & TSF_BOUND) == 0)
2701  return;
2702  ts->ts_flags &= ~TSF_BOUND;
2703  sched_unpin();
2704 }
2705 
2706 int
2707 sched_is_bound(struct thread *td)
2708 {
2709  THREAD_LOCK_ASSERT(td, MA_OWNED);
2710  return (td_get_sched(td)->ts_flags & TSF_BOUND);
2711 }
2712 
2713 /*
2714  * Basic yield call.
2715  */
2716 void
2717 sched_relinquish(struct thread *td)
2718 {
2719  thread_lock(td);
2720  mi_switch(SW_VOL | SWT_RELINQUISH, NULL);
2721  thread_unlock(td);
2722 }
2723 
2724 /*
2725  * Return the total system load.
2726  */
2727 int
2729 {
2730 #ifdef SMP
2731  int total;
2732  int i;
2733 
2734  total = 0;
2735  CPU_FOREACH(i)
2736  total += TDQ_CPU(i)->tdq_sysload;
2737  return (total);
2738 #else
2739  return (TDQ_SELF()->tdq_sysload);
2740 #endif
2741 }
2742 
2743 int
2745 {
2746  return (sizeof(struct proc));
2747 }
2748 
2749 int
2751 {
2752  return (sizeof(struct thread) + sizeof(struct td_sched));
2753 }
2754 
2755 #ifdef SMP
2756 #define TDQ_IDLESPIN(tdq) \
2757  ((tdq)->tdq_cg != NULL && ((tdq)->tdq_cg->cg_flags & CG_FLAG_THREAD) == 0)
2758 #else
2759 #define TDQ_IDLESPIN(tdq) 1
2760 #endif
2761 
2762 /*
2763  * The actual idle process.
2764  */
2765 void
2767 {
2768  struct thread *td;
2769  struct tdq *tdq;
2770  int oldswitchcnt, switchcnt;
2771  int i;
2772 
2773  mtx_assert(&Giant, MA_NOTOWNED);
2774  td = curthread;
2775  tdq = TDQ_SELF();
2776  THREAD_NO_SLEEPING();
2777  oldswitchcnt = -1;
2778  for (;;) {
2779  if (tdq->tdq_load) {
2780  thread_lock(td);
2781  mi_switch(SW_VOL | SWT_IDLE, NULL);
2782  thread_unlock(td);
2783  }
2784  switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
2785 #ifdef SMP
2786  if (always_steal || switchcnt != oldswitchcnt) {
2787  oldswitchcnt = switchcnt;
2788  if (tdq_idled(tdq) == 0)
2789  continue;
2790  }
2791  switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
2792 #else
2793  oldswitchcnt = switchcnt;
2794 #endif
2795  /*
2796  * If we're switching very frequently, spin while checking
2797  * for load rather than entering a low power state that
2798  * may require an IPI. However, don't do any busy
2799  * loops while on SMT machines as this simply steals
2800  * cycles from cores doing useful work.
2801  */
2802  if (TDQ_IDLESPIN(tdq) && switchcnt > sched_idlespinthresh) {
2803  for (i = 0; i < sched_idlespins; i++) {
2804  if (tdq->tdq_load)
2805  break;
2806  cpu_spinwait();
2807  }
2808  }
2809 
2810  /* If there was context switch during spin, restart it. */
2811  switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
2812  if (tdq->tdq_load != 0 || switchcnt != oldswitchcnt)
2813  continue;
2814 
2815  /* Run main MD idle handler. */
2816  tdq->tdq_cpu_idle = 1;
2817  /*
2818  * Make sure that tdq_cpu_idle update is globally visible
2819  * before cpu_idle() read tdq_load. The order is important
2820  * to avoid race with tdq_notify.
2821  */
2822  atomic_thread_fence_seq_cst();
2823  /*
2824  * Checking for again after the fence picks up assigned
2825  * threads often enough to make it worthwhile to do so in
2826  * order to avoid calling cpu_idle().
2827  */
2828  if (tdq->tdq_load != 0) {
2829  tdq->tdq_cpu_idle = 0;
2830  continue;
2831  }
2832  cpu_idle(switchcnt * 4 > sched_idlespinthresh);
2833  tdq->tdq_cpu_idle = 0;
2834 
2835  /*
2836  * Account thread-less hardware interrupts and
2837  * other wakeup reasons equal to context switches.
2838  */
2839  switchcnt = tdq->tdq_switchcnt + tdq->tdq_oldswitchcnt;
2840  if (switchcnt != oldswitchcnt)
2841  continue;
2842  tdq->tdq_switchcnt++;
2843  oldswitchcnt++;
2844  }
2845 }
2846 
2847 /*
2848  * A CPU is entering for the first time or a thread is exiting.
2849  */
2850 void
2851 sched_throw(struct thread *td)
2852 {
2853  struct thread *newtd;
2854  struct tdq *tdq;
2855 
2856  tdq = TDQ_SELF();
2857  if (td == NULL) {
2858  /* Correct spinlock nesting and acquire the correct lock. */
2859  TDQ_LOCK(tdq);
2860  spinlock_exit();
2861  PCPU_SET(switchtime, cpu_ticks());
2862  PCPU_SET(switchticks, ticks);
2863  } else {
2864  MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2865  tdq_load_rem(tdq, td);
2866  lock_profile_release_lock(&TDQ_LOCKPTR(tdq)->lock_object);
2867  td->td_lastcpu = td->td_oncpu;
2868  td->td_oncpu = NOCPU;
2869  }
2870  KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
2871  newtd = choosethread();
2872  TDQ_LOCKPTR(tdq)->mtx_lock = (uintptr_t)newtd;
2873  cpu_throw(td, newtd); /* doesn't return */
2874 }
2875 
2876 /*
2877  * This is called from fork_exit(). Just acquire the correct locks and
2878  * let fork do the rest of the work.
2879  */
2880 void
2881 sched_fork_exit(struct thread *td)
2882 {
2883  struct tdq *tdq;
2884  int cpuid;
2885 
2886  /*
2887  * Finish setting up thread glue so that it begins execution in a
2888  * non-nested critical section with the scheduler lock held.
2889  */
2890  cpuid = PCPU_GET(cpuid);
2891  tdq = TDQ_CPU(cpuid);
2892  if (TD_IS_IDLETHREAD(td))
2893  td->td_lock = TDQ_LOCKPTR(tdq);
2894  MPASS(td->td_lock == TDQ_LOCKPTR(tdq));
2895  td->td_oncpu = cpuid;
2896  TDQ_LOCK_ASSERT(tdq, MA_OWNED | MA_NOTRECURSED);
2897  lock_profile_obtain_lock_success(
2898  &TDQ_LOCKPTR(tdq)->lock_object, 0, 0, __FILE__, __LINE__);
2899 
2900  KTR_STATE1(KTR_SCHED, "thread", sched_tdname(td), "running",
2901  "prio:%d", td->td_priority);
2902  SDT_PROBE0(sched, , , on__cpu);
2903 }
2904 
2905 /*
2906  * Create on first use to catch odd startup conditons.
2907  */
2908 char *
2909 sched_tdname(struct thread *td)
2910 {
2911 #ifdef KTR
2912  struct td_sched *ts;
2913 
2914  ts = td_get_sched(td);
2915  if (ts->ts_name[0] == '\0')
2916  snprintf(ts->ts_name, sizeof(ts->ts_name),
2917  "%s tid %d", td->td_name, td->td_tid);
2918  return (ts->ts_name);
2919 #else
2920  return (td->td_name);
2921 #endif
2922 }
2923 
2924 #ifdef KTR
2925 void
2926 sched_clear_tdname(struct thread *td)
2927 {
2928  struct td_sched *ts;
2929 
2930  ts = td_get_sched(td);
2931  ts->ts_name[0] = '\0';
2932 }
2933 #endif
2934 
2935 #ifdef SMP
2936 
2937 /*
2938  * Build the CPU topology dump string. Is recursively called to collect
2939  * the topology tree.
2940  */
2941 static int
2942 sysctl_kern_sched_topology_spec_internal(struct sbuf *sb, struct cpu_group *cg,
2943  int indent)
2944 {
2945  char cpusetbuf[CPUSETBUFSIZ];
2946  int i, first;
2947 
2948  sbuf_printf(sb, "%*s<group level=\"%d\" cache-level=\"%d\">\n", indent,
2949  "", 1 + indent / 2, cg->cg_level);
2950  sbuf_printf(sb, "%*s <cpu count=\"%d\" mask=\"%s\">", indent, "",
2951  cg->cg_count, cpusetobj_strprint(cpusetbuf, &cg->cg_mask));
2952  first = TRUE;
2953  for (i = 0; i < MAXCPU; i++) {
2954  if (CPU_ISSET(i, &cg->cg_mask)) {
2955  if (!first)
2956  sbuf_printf(sb, ", ");
2957  else
2958  first = FALSE;
2959  sbuf_printf(sb, "%d", i);
2960  }
2961  }
2962  sbuf_printf(sb, "</cpu>\n");
2963 
2964  if (cg->cg_flags != 0) {
2965  sbuf_printf(sb, "%*s <flags>", indent, "");
2966  if ((cg->cg_flags & CG_FLAG_HTT) != 0)
2967  sbuf_printf(sb, "<flag name=\"HTT\">HTT group</flag>");
2968  if ((cg->cg_flags & CG_FLAG_THREAD) != 0)
2969  sbuf_printf(sb, "<flag name=\"THREAD\">THREAD group</flag>");
2970  if ((cg->cg_flags & CG_FLAG_SMT) != 0)
2971  sbuf_printf(sb, "<flag name=\"SMT\">SMT group</flag>");
2972  sbuf_printf(sb, "</flags>\n");
2973  }
2974 
2975  if (cg->cg_children > 0) {
2976  sbuf_printf(sb, "%*s <children>\n", indent, "");
2977  for (i = 0; i < cg->cg_children; i++)
2978  sysctl_kern_sched_topology_spec_internal(sb,
2979  &cg->cg_child[i], indent+2);
2980  sbuf_printf(sb, "%*s </children>\n", indent, "");
2981  }
2982  sbuf_printf(sb, "%*s</group>\n", indent, "");
2983  return (0);
2984 }
2985 
2986 /*
2987  * Sysctl handler for retrieving topology dump. It's a wrapper for
2988  * the recursive sysctl_kern_smp_topology_spec_internal().
2989  */
2990 static int
2991 sysctl_kern_sched_topology_spec(SYSCTL_HANDLER_ARGS)
2992 {
2993  struct sbuf *topo;
2994  int err;
2995 
2996  KASSERT(cpu_top != NULL, ("cpu_top isn't initialized"));
2997 
2998  topo = sbuf_new_for_sysctl(NULL, NULL, 512, req);
2999  if (topo == NULL)
3000  return (ENOMEM);
3001 
3002  sbuf_printf(topo, "<groups>\n");
3003  err = sysctl_kern_sched_topology_spec_internal(topo, cpu_top, 1);
3004  sbuf_printf(topo, "</groups>\n");
3005 
3006  if (err == 0) {
3007  err = sbuf_finish(topo);
3008  }
3009  sbuf_delete(topo);
3010  return (err);
3011 }
3012 
3013 #endif
3014 
3015 static int
3016 sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
3017 {
3018  int error, new_val, period;
3019 
3020  period = 1000000 / realstathz;
3021  new_val = period * sched_slice;
3022  error = sysctl_handle_int(oidp, &new_val, 0, req);
3023  if (error != 0 || req->newptr == NULL)
3024  return (error);
3025  if (new_val <= 0)
3026  return (EINVAL);
3027  sched_slice = imax(1, (new_val + period / 2) / period);
3028  sched_slice_min = sched_slice / SCHED_SLICE_MIN_DIVISOR;
3029  hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
3030  realstathz);
3031  return (0);
3032 }
3033 
3034 SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler");
3035 SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0,
3036  "Scheduler name");
3037 SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT | CTLFLAG_RW,
3038  NULL, 0, sysctl_kern_quantum, "I",
3039  "Quantum for timeshare threads in microseconds");
3040 SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0,
3041  "Quantum for timeshare threads in stathz ticks");
3042 SYSCTL_INT(_kern_sched, OID_AUTO, interact, CTLFLAG_RW, &sched_interact, 0,
3043  "Interactivity score threshold");
3044 SYSCTL_INT(_kern_sched, OID_AUTO, preempt_thresh, CTLFLAG_RW,
3045  &preempt_thresh, 0,
3046  "Maximal (lowest) priority for preemption");
3047 SYSCTL_INT(_kern_sched, OID_AUTO, static_boost, CTLFLAG_RW, &static_boost, 0,
3048  "Assign static kernel priorities to sleeping threads");
3049 SYSCTL_INT(_kern_sched, OID_AUTO, idlespins, CTLFLAG_RW, &sched_idlespins, 0,
3050  "Number of times idle thread will spin waiting for new work");
3051 SYSCTL_INT(_kern_sched, OID_AUTO, idlespinthresh, CTLFLAG_RW,
3052  &sched_idlespinthresh, 0,
3053  "Threshold before we will permit idle thread spinning");
3054 #ifdef SMP
3055 SYSCTL_INT(_kern_sched, OID_AUTO, affinity, CTLFLAG_RW, &affinity, 0,
3056  "Number of hz ticks to keep thread affinity for");
3057 SYSCTL_INT(_kern_sched, OID_AUTO, balance, CTLFLAG_RW, &rebalance, 0,
3058  "Enables the long-term load balancer");
3059 SYSCTL_INT(_kern_sched, OID_AUTO, balance_interval, CTLFLAG_RW,
3060  &balance_interval, 0,
3061  "Average period in stathz ticks to run the long-term balancer");
3062 SYSCTL_INT(_kern_sched, OID_AUTO, steal_idle, CTLFLAG_RW, &steal_idle, 0,
3063  "Attempts to steal work from other cores before idling");
3064 SYSCTL_INT(_kern_sched, OID_AUTO, steal_thresh, CTLFLAG_RW, &steal_thresh, 0,
3065  "Minimum load on remote CPU before we'll steal");
3066 SYSCTL_INT(_kern_sched, OID_AUTO, trysteal_limit, CTLFLAG_RW, &trysteal_limit,
3067  0, "Topological distance limit for stealing threads in sched_switch()");
3068 SYSCTL_INT(_kern_sched, OID_AUTO, always_steal, CTLFLAG_RW, &always_steal, 0,
3069  "Always run the stealer from the idle thread");
3070 SYSCTL_PROC(_kern_sched, OID_AUTO, topology_spec, CTLTYPE_STRING |
3071  CTLFLAG_MPSAFE | CTLFLAG_RD, NULL, 0, sysctl_kern_sched_topology_spec, "A",
3072  "XML dump of detected CPU topology");
3073 #endif
3074 
3075 /* ps compat. All cpu percentages from ULE are weighted. */
3076 static int ccpu = 0;
3077 SYSCTL_INT(_kern, OID_AUTO, ccpu, CTLFLAG_RD, &ccpu, 0, "");
static struct tdq tdq_cpu
Definition: sched_ule.c:292
volatile int smp_started
Definition: subr_smp.c:77
u_int ts_runtime
Definition: sched_ule.c:99
static void sched_initticks(void *dummy)
Definition: sched_ule.c:1449
int ts_ticks
Definition: sched_ule.c:102
#define TS_NAME_LEN
Definition: sched_ule.c:84
#define TDQ_LOCK_ASSERT(t, type)
Definition: sched_ule.c:299
static int sched_shouldpreempt(int, int, int)
Definition: sched_ule.c:422
device_t child
Definition: msi_if.m:40
volatile int tdq_transferable
Definition: sched_ule.c:243
int sched_rr_interval(void)
Definition: sched_ule.c:1683
#define SCHED_TICK_SHIFT
Definition: sched_ule.c:148
#define TDQ_CPU(x)
Definition: sched_ule.c:296
void runq_add(struct runq *rq, struct thread *td, int flags)
Definition: kern_switch.c:373
SYSCTL_PROC(_kern_sched, OID_AUTO, quantum, CTLTYPE_INT|CTLFLAG_RW, NULL, 0, sysctl_kern_quantum, "I", "Quantum for timeshare threads in microseconds")
void schedinit(void)
Definition: sched_ule.c:1663
int tdq_sysload
Definition: sched_ule.c:235
volatile int tdq_cpu_idle
Definition: sched_ule.c:241
void turnstile_adjust(struct thread *td, u_char oldpri)
void sched_lend_prio(struct thread *td, u_char prio)
Definition: sched_ule.c:1779
struct thread * sched_choose(void)
Definition: sched_ule.c:2467
void sched_class(struct thread *td, int class)
Definition: sched_ule.c:2281
int snprintf(char *str, size_t size, const char *format,...)
Definition: subr_prf.c:542
struct timespec * ts
Definition: clock_if.m:39
#define TDF_SLICEEND
Definition: sched_ule.c:199
short ts_flags
Definition: sched_ule.c:94
void sched_exit_thread(struct thread *td, struct thread *child)
Definition: sched_ule.c:2312
void sched_fork_thread(struct thread *td, struct thread *child)
Definition: sched_ule.c:2235
int bootverbose
Definition: init_main.c:118
void *** start
Definition: linker_if.m:86
u_int mp_maxid
Definition: subr_smp.c:78
u_int ts_slptime
Definition: sched_ule.c:98
#define SCHED_PRI_MIN
Definition: sched_ule.c:166
#define SCHED_PRI_RANGE
Definition: sched_ule.c:168
static void sched_setup(void *dummy)
Definition: sched_ule.c:1425
#define THREAD_CAN_SCHED(td, cpu)
Definition: sched_ule.c:112
void sched_userret_slowpath(struct thread *td)
Definition: sched_ule.c:2359
static int sched_interact_score(struct thread *)
Definition: sched_ule.c:1507
static __inline void tdq_runq_rem(struct tdq *, struct thread *)
Definition: sched_ule.c:511
void sched_user_prio(struct thread *td, u_char prio)
Definition: sched_ule.c:1845
const char * panicstr
static int sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
Definition: sched_ule.c:3016
void panic(const char *fmt,...)
static void sched_interact_update(struct thread *)
Definition: sched_ule.c:1599
struct thread * choosethread(void)
Definition: kern_switch.c:178
struct mtx_padalign tdq_lock
Definition: sched_ule.c:238
u_char tdq_ipipending
Definition: sched_ule.c:247
struct tdq __aligned(64)
#define PRI_MIN_BATCH
Definition: sched_ule.c:132
void runq_remove_idx(struct runq *rq, struct thread *td, u_char *idx)
Definition: kern_switch.c:529
char * sched_tdname(struct thread *td)
Definition: sched_ule.c:2909
struct pcpu * pcpu_find(u_int cpuid)
Definition: subr_pcpu.c:280
static int ccpu
Definition: sched_ule.c:3076
int ts_flags
Definition: sched_4bsd.c:101
int ts_ftick
Definition: sched_ule.c:101
static void tdq_setup(struct tdq *)
Definition: sched_ule.c:1382
const char * name
Definition: kern_fail.c:145
int ts_slptime
Definition: sched_4bsd.c:99
char tdq_name[TDQ_NAME_LEN]
Definition: sched_ule.c:246
void mi_switch(int flags, struct thread *newtd)
Definition: kern_synch.c:384
void sched_sleep(struct thread *td, int prio)
Definition: sched_ule.c:2164
#define TSF_XFERABLE
Definition: sched_ule.c:109
int ts_rltick
Definition: sched_ule.c:96
SYSINIT(sched_setup, SI_SUB_RUN_QUEUE, SI_ORDER_FIRST, sched_setup, NULL)
static void tdq_load_rem(struct tdq *, struct thread *)
Definition: sched_ule.c:555
__FBSDID("$FreeBSD: head/sys/kern/sched_ule.c 333344 2018-05-07 23:36:16Z mjg $")
struct runq tdq_idle
Definition: sched_ule.c:245
SDT_PROBE_DEFINE(sched,,, on__cpu)
u_char tdq_ridx
Definition: sched_ule.c:249
int ts_ltick
Definition: sched_ule.c:100
volatile int tdq_load
Definition: sched_ule.c:233
struct thread * runq_choose_from(struct runq *rq, u_char idx)
Definition: kern_switch.c:497
static int sched_idlespins
Definition: sched_ule.c:224
#define PRI_MAX_INTERACT
Definition: sched_ule.c:131
int ts_slice
Definition: sched_4bsd.c:100
char tdq_name[TDQ_NAME_LEN]
Definition: sched_ule.c:253
#define SCHED_INTERACT_HALF
Definition: sched_ule.c:189
struct runq tdq_realtime
Definition: sched_ule.c:243
#define TDQ_IDLESPIN(tdq)
Definition: sched_ule.c:2759
SDT_PROBE_DEFINE2(sched,,, load__change, "int", "int")
static int sched_idlespinthresh
Definition: sched_ule.c:225
#define TDQ_ID(x)
Definition: sched_ule.c:294
static struct runq runq
Definition: sched_4bsd.c:163
SYSCTL_STRING(_kern_sched, OID_AUTO, name, CTLFLAG_RD, "ULE", 0, "Scheduler name")
void runq_add_pri(struct runq *rq, struct thread *td, u_char pri, int flags)
Definition: kern_switch.c:392
void runq_init(struct runq *rq)
Definition: kern_switch.c:270
#define SCHED_SLICE_MIN_DIVISOR
Definition: sched_ule.c:196
static int sched_slice_min
Definition: sched_ule.c:213
static void tdq_setlowpri(struct tdq *tdq, struct thread *ctd)
Definition: sched_ule.c:599
void sched_throw(struct thread *td)
Definition: sched_ule.c:2851
struct thread * runq_choose(struct runq *rq)
Definition: kern_switch.c:477
void thread_lock_set(struct thread *td, struct mtx *new)
Definition: kern_mutex.c:965
void sched_fork_exit(struct thread *td)
Definition: sched_ule.c:2881
struct cpuset * cpuset_ref(struct cpuset *set)
Definition: kern_cpuset.c:164
void sched_relinquish(struct thread *td)
Definition: sched_ule.c:2717
static int dummy
void tdq_print(int cpu)
Definition: sched_ule.c:397
u_int sched_estcpu(struct thread *td __unused)
Definition: sched_ule.c:2431
Definition: sched_ule.c:232
void sched_wakeup(struct thread *td)
Definition: sched_ule.c:2185
SDT_PROBE_DEFINE4(sched,,, enqueue, "struct thread *", "struct proc *", "void *", "int")
struct runq * ts_runq
Definition: sched_4bsd.c:102
int sched_sizeof_thread(void)
Definition: sched_ule.c:2750
struct mtx __exclusive_cache_line Giant
Definition: kern_mutex.c:171
void sched_switch(struct thread *td, struct thread *newtd, int flags)
Definition: sched_ule.c:2009
void sched_rem(struct thread *td)
Definition: sched_ule.c:2590
u_long flags
#define SCHED_TICK_SECS
Definition: sched_ule.c:145
struct cpu_group * tdq_cg
Definition: sched_ule.c:239
int ts_cpu
Definition: sched_ule.c:95
int sbuf_printf(struct sbuf *s, const char *fmt,...)
Definition: subr_sbuf.c:678
#define SCHED_SLP_RUN_FORK
Definition: sched_ule.c:187
void sched_lend_user_prio(struct thread *td, u_char prio)
Definition: sched_ule.c:1855
#define PRI_MIN_INTERACT
Definition: sched_ule.c:130
static struct thread * tdq_choose(struct tdq *)
Definition: sched_ule.c:1352
static int tickincr
Definition: sched_ule.c:210
int sched_is_bound(struct thread *td)
Definition: sched_ule.c:2707
int mask
Definition: subr_acl_nfs4.c:71
static struct mtx * sched_switch_migrate(struct tdq *tdq, struct thread *td, int flags)
Definition: sched_ule.c:1956
int sysctl_handle_int(SYSCTL_HANDLER_ARGS)
Definition: kern_sysctl.c:1475
#define TDQ_SELF()
Definition: sched_ule.c:295
#define TDQ_LOADNAME_LEN
Definition: sched_ule.c:86
#define SCHED_TICK_MAX
Definition: sched_ule.c:147
struct runq tdq_timeshare
Definition: sched_ule.c:251
void sched_clock(struct thread *td)
Definition: sched_ule.c:2374
static void runq_print(struct runq *rq)
Definition: sched_ule.c:369
static void sched_thread_priority(struct thread *, u_char)
Definition: sched_ule.c:1724
SDT_PROBE_DEFINE3(sched,,, change__pri, "struct thread *", "struct proc *", "uint8_t")
void runq_remove(struct runq *rq, struct thread *td)
Definition: kern_switch.c:522
volatile short tdq_oldswitchcnt
Definition: sched_ule.c:245
struct runq tdq_realtime
Definition: sched_ule.c:250
static __inline void tdq_runq_add(struct tdq *, struct thread *, int)
Definition: sched_ule.c:460
volatile short tdq_switchcnt
Definition: sched_ule.c:244
#define SCHED_PRI_NICE(nice)
Definition: sched_ule.c:172
void sched_affinity(struct thread *td)
Definition: sched_ule.c:2639
int printf(const char *fmt,...)
Definition: subr_prf.c:400
void sched_exit(struct proc *p, struct thread *child)
Definition: sched_ule.c:2294
struct runq tdq_idle
Definition: sched_ule.c:252
#define SCHED_PRI_TICKS(ts)
Definition: sched_ule.c:169
void sbuf_delete(struct sbuf *s)
Definition: subr_sbuf.c:805
#define TDQ_LOCK_FLAGS(t, f)
Definition: sched_ule.c:301
void sched_idletd(void *dummy)
Definition: sched_ule.c:2766
void sched_unlend_prio(struct thread *td, u_char prio)
Definition: sched_ule.c:1795
int tdq_sysload
Definition: sched_ule.c:242
#define TSF_BOUND
Definition: sched_ule.c:108
struct cpu_group * tdq_cg
Definition: sched_ule.c:232
struct runq tdq_timeshare
Definition: sched_ule.c:244
_Static_assert(sizeof(struct thread)+sizeof(struct td_sched)<=sizeof(struct thread0_storage), "increase struct thread0_storage.t0st_sched size")
#define SCHED_TICK_HZ(ts)
Definition: sched_ule.c:149
SYSCTL_INT(_kern_sched, OID_AUTO, slice, CTLFLAG_RW, &sched_slice, 0, "Quantum for timeshare threads in stathz ticks")
static void thread_unblock_switch(struct thread *td, struct mtx *mtx)
Definition: sched_ule.c:1996
int sbuf_finish(struct sbuf *s)
Definition: subr_sbuf.c:740
void sched_preempt(struct thread *td)
Definition: sched_ule.c:2330
static int realstathz
Definition: sched_ule.c:211
volatile int tdq_load
Definition: sched_ule.c:240
#define TDQ_LOCK(t)
Definition: sched_ule.c:300
#define SCHED_SLICE_DEFAULT_DIVISOR
Definition: sched_ule.c:195
volatile short tdq_oldswitchcnt
Definition: sched_ule.c:238
#define TDQ_UNLOCK(t)
Definition: sched_ule.c:302
volatile int ticks
Definition: kern_clock.c:388
int sched_load(void)
Definition: sched_ule.c:2728
int hogticks
Definition: kern_synch.c:75
SDT_PROVIDER_DEFINE(sched)
static int sched_interact
Definition: sched_ule.c:209
static int tdq_slice(struct tdq *tdq)
Definition: sched_ule.c:577
static void sched_interact_fork(struct thread *)
Definition: sched_ule.c:1644
int stathz
Definition: kern_clock.c:385
static void sched_priority(struct thread *)
Definition: sched_ule.c:1547
void sched_nice(struct proc *p, int nice)
Definition: sched_ule.c:2145
static DPCPU_DEFINE(int, pcputicks)
#define SCHED_SLP_RUN_MAX
Definition: sched_ule.c:186
static void sched_setpreempt(struct thread *td)
Definition: sched_ule.c:2489
static int sched_slice
Definition: sched_ule.c:212
char * cpusetobj_strprint(char *buf, const cpuset_t *set)
Definition: kern_cpuset.c:1206
#define TDQ_LOCKPTR(t)
Definition: sched_ule.c:303
void sched_fork(struct thread *td, struct thread *child)
Definition: sched_ule.c:2216
#define SCHED_INTERACT_THRESH
Definition: sched_ule.c:190
#define PRI_MAX_BATCH
Definition: sched_ule.c:133
static void tdq_add(struct tdq *, struct thread *, int)
Definition: sched_ule.c:2515
struct mtx * thread_lock_block(struct thread *td)
Definition: kern_mutex.c:944
u_char tdq_idx
Definition: sched_ule.c:248
#define SCHED_TICK_TARG
Definition: sched_ule.c:146
int sched_runnable(void)
Definition: sched_ule.c:2442
struct mtx_padalign tdq_lock
Definition: sched_ule.c:231
void sched_unbind(struct thread *td)
Definition: sched_ule.c:2693
static int preempt_thresh
Definition: sched_ule.c:221
#define PRI_BATCH_RANGE
Definition: sched_ule.c:128
#define THREAD_CAN_MIGRATE(td)
Definition: sched_ule.c:111
volatile int tdq_cpu_idle
Definition: sched_ule.c:234
void sched_prio(struct thread *td, u_char prio)
Definition: sched_ule.c:1815
volatile int tdq_transferable
Definition: sched_ule.c:236
void sched_add(struct thread *td, int flags)
Definition: sched_ule.c:2537
volatile short tdq_switchcnt
Definition: sched_ule.c:237
u_char tdq_lowpri
Definition: sched_ule.c:239
u_char tdq_lowpri
Definition: sched_ule.c:246
SYSCTL_NODE(_kern, OID_AUTO, sched, CTLFLAG_RW, 0, "Scheduler")
static void tdq_load_add(struct tdq *, struct thread *)
Definition: sched_ule.c:537
cpu_tick_f * cpu_ticks
Definition: kern_tc.c:2137
void thread_lock_unblock(struct thread *td, struct mtx *new)
Definition: kern_mutex.c:957
#define TDQ_NAME_LEN
Definition: sched_ule.c:85
static int static_boost
Definition: sched_ule.c:223
void sched_bind(struct thread *td, int cpu)
Definition: sched_ule.c:2670
struct sbuf * sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length, struct sysctl_req *req)
Definition: kern_sysctl.c:2167
int hz
Definition: subr_param.c:86
fixpt_t sched_pctcpu(struct thread *td)
Definition: sched_ule.c:2613
int sched_sizeof_proc(void)
Definition: sched_ule.c:2744
static void sched_pctcpu_update(struct td_sched *, int)
Definition: sched_ule.c:1697