76#include "opt_kstack_pages.h"
77#include "opt_kstack_max_pages.h"
82#include <sys/limits.h>
83#include <sys/kernel.h>
84#include <sys/eventhandler.h>
88#include <sys/kthread.h>
92#include <sys/resourcevar.h>
93#include <sys/refcount.h>
96#include <sys/signalvar.h>
100#include <sys/vmmeter.h>
101#include <sys/rwlock.h>
103#include <sys/sysctl.h>
127SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &
vm_kp);
134 "Enable entire process swapout");
137 "Allow swapout on idle criteria");
145 "Guaranteed swapped in time for a process");
154 "Time before a process will be swapped out");
159 "Time between vmdaemon runs");
172static int swapout(
struct proc *);
222 for (
object = first_object;;
object = backing_object) {
237 TAILQ_FOREACH(m, &object->
memq, listq) {
247 if (
object != first_object)
251 if (
object != first_object)
292 nothingwired = FALSE;
295 if (bigobj != NULL) {
321 if (desired == 0 && nothingwired) {
332#define VM_SWAP_NORMAL 1
333#define VM_SWAP_IDLE 2
361 static int lastrun = 0;
365 if ((ticks > (lastrun + hz)) || (ticks < lastrun)) {
375 struct rlimit rsslim;
379 int breakout, swapout_flags, tryagain, attempts;
381 uint64_t rsize, ravailable;
394 if (swapout_flags != 0) {
412 sx_slock(&allproc_lock);
413 FOREACH_PROC_IN_SYSTEM(p) {
414 vm_pindex_t limit, size;
421 if (p->p_state != PRS_NORMAL ||
422 p->p_flag & (P_INEXEC | P_SYSTEM | P_WEXIT)) {
431 FOREACH_THREAD_IN_PROC(p, td) {
433 if (!TD_ON_RUNQ(td) &&
434 !TD_IS_RUNNING(td) &&
435 !TD_IS_SLEEPING(td) &&
436 !TD_IS_SUSPENDED(td)) {
450 lim_rlimit_proc(p, RLIMIT_RSS, &rsslim);
452 qmin(rsslim.rlim_cur, rsslim.rlim_max));
459 if ((p->p_flag & P_INMEM) == 0)
468 sx_sunlock(&allproc_lock);
480 if (p->p_state == PRS_NORMAL)
481 racct_set(p, RACCT_RSS, rsize);
482 ravailable = racct_get_available(p, RACCT_RSS);
484 if (rsize > ravailable) {
494 if (ravailable < rsize -
507 if (p->p_state == PRS_NORMAL)
508 racct_set(p, RACCT_RSS, rsize);
510 if (rsize > ravailable)
516 sx_slock(&allproc_lock);
519 sx_sunlock(&allproc_lock);
520 if (tryagain != 0 && attempts <= 10) {
538 cpu_thread_swapout(td);
539 kaddr = td->td_kstack;
540 pages = td->td_kstack_pages;
541 pindex = atop(kaddr - VM_MIN_KERNEL_ADDRESS);
544 for (i = 0; i < pages; i++) {
547 panic(
"vm_thread_swapout: kstack already missing?");
563 int a, count, i, j, pages, rv;
565 kaddr = td->td_kstack;
566 pages = td->td_kstack_pages;
569 for (i = 0; i < pages;) {
576 for (j = i + 1; j < pages; j++)
582 KASSERT(rv == 1, (
"%s: missing page %p", __func__, ma[i]));
583 count = min(a + 1, j - i);
585 KASSERT(rv ==
VM_PAGER_OK, (
"%s: cannot get kstack for proc %d",
586 __func__, td->td_proc->p_pid));
591 cpu_thread_swapin(td);
600 PROC_LOCK_ASSERT(p, MA_OWNED);
606 if (p->p_flag & P_SWAPPINGIN) {
607 while (p->p_flag & P_SWAPPINGIN)
608 msleep(&p->p_flag, &p->p_mtx, PVM,
"faultin", 0);
612 if ((p->p_flag & P_INMEM) == 0) {
621 p->p_flag |= P_SWAPPINGIN;
623 sx_xlock(&allproc_lock);
626 if (curthread != &thread0)
628 sx_xunlock(&allproc_lock);
635 FOREACH_THREAD_IN_PROC(p, td)
638 if (curthread != &thread0) {
639 sx_xlock(&allproc_lock);
643 sx_xunlock(&allproc_lock);
664 struct proc *p, *res;
666 int ppri, pri, slptime, swtime;
668 sx_assert(&allproc_lock, SA_SLOCKED);
673 FOREACH_PROC_IN_SYSTEM(p) {
675 if (p->p_state == PRS_NEW || (p->p_flag & (P_SWAPPINGOUT |
676 P_SWAPPINGIN | P_INMEM)) != 0) {
680 if (p->p_state == PRS_NORMAL && (p->p_flag & P_WKILLED) != 0) {
694 swtime = (ticks - p->p_swtick) / hz;
695 FOREACH_THREAD_IN_PROC(p, td) {
701 if (td->td_inhibitors == TDI_SWAPPED) {
702 slptime = (ticks - td->td_slptick) / hz;
703 pri = swtime + slptime;
704 if ((td->td_flags & TDF_SWAPINREQ) == 0)
705 pri -= p->p_nice * 8;
727#define SWAPIN_INTERVAL (MAXSLP * hz / 2)
750 sx_slock(&allproc_lock);
752 sx_sunlock(&allproc_lock);
757 PROC_LOCK_ASSERT(p, MA_OWNED);
766 if (p->p_state == PRS_NORMAL && (p->p_flag & (P_INMEM |
767 P_SWAPPINGOUT | P_SWAPPINGIN)) == 0) {
789 bool didswap, doswap;
794 sx_slock(&allproc_lock);
795 FOREACH_PROC_IN_SYSTEM(p) {
803 if (p->p_state != PRS_NORMAL || p->p_lock != 0 || (p->p_flag &
804 (P_SYSTEM | P_WEXIT | P_INEXEC | P_STOPPED_SINGLE |
805 P_TRACED | P_SWAPPINGOUT | P_SWAPPINGIN | P_INMEM)) !=
824 sx_sunlock(&allproc_lock);
834 FOREACH_THREAD_IN_PROC(p, td) {
836 slptime = (ticks - td->td_slptick) / hz;
837 if (PRI_IS_REALTIME(td->td_pri_class) ||
839 !thread_safetoswapout(td) ||
852 sx_xlock(&allproc_lock);
854 sx_downgrade(&allproc_lock);
856 sx_slock(&allproc_lock);
859 sx_sunlock(&allproc_lock);
874 PROC_LOCK_ASSERT(p, MA_OWNED);
876 FOREACH_THREAD_IN_PROC(p, td) {
878 td->td_flags |= TDF_INMEM;
879 td->td_flags &= ~TDF_SWAPINREQ;
881 if (TD_CAN_RUN(td)) {
882 if (setrunnable(td, 0)) {
889 panic(
"not waking up swapper");
895 p->p_flag &= ~(P_SWAPPINGIN | P_SWAPPINGOUT);
896 p->p_flag |= P_INMEM;
904 PROC_LOCK_ASSERT(p, MA_OWNED);
911 KASSERT((p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) ==
912 P_INMEM, (
"swapout: lost a swapout race?"));
922 p->p_flag &= ~P_INMEM;
923 p->p_flag |= P_SWAPPINGOUT;
924 FOREACH_THREAD_IN_PROC(p, td) {
926 if (!thread_safetoswapout(td)) {
931 td->td_flags &= ~TDF_INMEM;
935 td = FIRST_THREAD_IN_PROC(p);
936 ++td->td_ru.ru_nswap;
944 FOREACH_THREAD_IN_PROC(p, td)
948 p->p_flag &= ~P_SWAPPINGOUT;
boolean_t pmap_is_referenced(vm_page_t m)
boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
void pmap_qenter(vm_offset_t, vm_page_t *, int)
#define pmap_resident_count(pm)
void pmap_qremove(vm_offset_t, int)
void pmap_remove(pmap_t, vm_offset_t, vm_offset_t)
union vm_map_object object
blockcount_t paging_in_progress
struct vm_object * backing_object
struct vm_object * vm_object
void vmspace_free(struct vmspace *)
struct vmspace * vmspace_acquire_ref(struct proc *)
void vm_thread_stack_back(struct domainset *ds, vm_offset_t kaddr, vm_page_t ma[], int npages, int req_class)
vm_object_t kstack_object
long vmspace_resident_count(struct vmspace *vmspace)
#define vm_map_unlock_read(map)
#define VM_MAP_ENTRY_FOREACH(it, map)
static __inline vm_offset_t vm_map_max(const struct vm_map *map)
static __inline pmap_t vm_map_pmap(vm_map_t map)
#define MAP_ENTRY_IS_SUB_MAP
#define vm_map_trylock_read(map)
static __inline vm_offset_t vm_map_min(const struct vm_map *map)
void vm_object_pip_add(vm_object_t object, short i)
void vm_object_pip_wakeup(vm_object_t object)
#define VM_OBJECT_RLOCK(object)
#define VM_OBJECT_RUNLOCK(object)
#define VM_OBJECT_ASSERT_LOCKED(object)
#define VM_OBJECT_WLOCK(object)
#define VM_OBJECT_WUNLOCK(object)
#define VM_OBJECT_TRYRLOCK(object)
void vm_page_deactivate(vm_page_t m)
vm_page_t vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
int vm_page_tryxbusy(vm_page_t m)
void vm_page_pqbatch_drain(void)
bool vm_page_try_remove_all(vm_page_t m)
void vm_page_unwire(vm_page_t m, uint8_t nqueue)
#define vm_page_assert_xbusied(m)
static bool vm_page_all_valid(vm_page_t m)
#define vm_page_xunbusy_unchecked(m)
static bool vm_page_active(vm_page_t m)
#define vm_page_xunbusy(m)
static __inline void vm_page_dirty(vm_page_t m)
static bool vm_page_wired(vm_page_t m)
#define VM_SWAPPING_ENABLED
domainset_t __read_mostly all_domains
void vm_swapout_run(void)
static void vm_swapout_object_deactivate_page(pmap_t pmap, vm_page_t m, bool unmap)
static struct kproc_desc vm_kp
static bool swapper_wkilled_only(void)
void faultin(struct proc *p)
SYSINIT(vmdaemon, SI_SUB_KTHREAD_VM, SI_ORDER_FIRST, kproc_start, &vm_kp)
static int vm_daemon_timeout
static int swapout(struct proc *)
MTX_SYSINIT(vm_daemon, &vm_daemon_mtx, "vm daemon", MTX_DEF)
void vm_swapout_run_idle(void)
static void vm_swapout_object_deactivate(pmap_t, vm_object_t, long)
static void vm_thread_swapout(struct thread *td)
static int swap_inprogress
static int vm_pageout_req_swapout
static void vm_swapout_map_deactivate_pages(vm_map_t, long)
static struct proc * vmproc
static int vm_swap_enabled
static void swapout_procs(int action)
static struct mtx vm_daemon_mtx
static int vm_daemon_needed
static struct proc * swapper_selector(bool wkilled_only)
SYSCTL_INT(_vm, VM_SWAPPING_ENABLED, swap_enabled, CTLFLAG_RW, &vm_swap_enabled, 0, "Enable entire process swapout")
static int vm_swap_idle_enabled
static int swap_idle_threshold2
static void vm_daemon(void)
static int swap_idle_threshold1
static void vm_thread_swapin(struct thread *td, int oom_alloc)
static void swapclear(struct proc *)
static void vm_req_vmdaemon(int req)