FreeBSD virtual memory subsystem code
vm_pagequeue.h
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)vm_page.h 8.2 (Berkeley) 12/13/93
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 *
62 * $FreeBSD$
63 */
64
65#ifndef _VM_PAGEQUEUE_
66#define _VM_PAGEQUEUE_
67
68#ifdef _KERNEL
70 struct mtx pq_mutex;
71 struct pglist pq_pl;
72 int pq_cnt;
73 const char * const pq_name;
74 uint64_t pq_pdpages;
75} __aligned(CACHE_LINE_SIZE);
76
77#ifndef VM_BATCHQUEUE_SIZE
78#define VM_BATCHQUEUE_SIZE 7
79#endif
80
83 int bq_cnt;
84} __aligned(CACHE_LINE_SIZE);
85
86#include <vm/uma.h>
87#include <sys/_blockcount.h>
88#include <sys/pidctrl.h>
89struct sysctl_oid;
90
91/*
92 * One vm_domain per NUMA domain. Contains pagequeues, free page structures,
93 * and accounting.
94 *
95 * Lock Key:
96 * f vmd_free_mtx
97 * p vmd_pageout_mtx
98 * d vm_domainset_lock
99 * a atomic
100 * c const after boot
101 * q page queue lock
102 *
103 * A unique page daemon thread manages each vm_domain structure and is
104 * responsible for ensuring that some free memory is available by freeing
105 * inactive pages and aging active pages. To decide how many pages to process,
106 * it uses thresholds derived from the number of pages in the domain:
107 *
108 * vmd_page_count
109 * ---
110 * |
111 * |-> vmd_inactive_target (~3%)
112 * | - The active queue scan target is given by
113 * | (vmd_inactive_target + vmd_free_target - vmd_free_count).
114 * |
115 * |
116 * |-> vmd_free_target (~2%)
117 * | - Target for page reclamation.
118 * |
119 * |-> vmd_pageout_wakeup_thresh (~1.8%)
120 * | - Threshold for waking up the page daemon.
121 * |
122 * |
123 * |-> vmd_free_min (~0.5%)
124 * | - First low memory threshold.
125 * | - Causes per-CPU caching to be lazily disabled in UMA.
126 * | - vm_wait() sleeps below this threshold.
127 * |
128 * |-> vmd_free_severe (~0.25%)
129 * | - Second low memory threshold.
130 * | - Triggers aggressive UMA reclamation, disables delayed buffer
131 * | writes.
132 * |
133 * |-> vmd_free_reserved (~0.13%)
134 * | - Minimum for VM_ALLOC_NORMAL page allocations.
135 * |-> vmd_pageout_free_min (32 + 2 pages)
136 * | - Minimum for waking a page daemon thread sleeping in vm_wait().
137 * |-> vmd_interrupt_free_min (2 pages)
138 * | - Minimum for VM_ALLOC_SYSTEM page allocations.
139 * ---
140 *
141 *--
142 * Free page count regulation:
143 *
144 * The page daemon attempts to ensure that the free page count is above the free
145 * target. It wakes up periodically (every 100ms) to input the current free
146 * page shortage (free_target - free_count) to a PID controller, which in
147 * response outputs the number of pages to attempt to reclaim. The shortage's
148 * current magnitude, rate of change, and cumulative value are together used to
149 * determine the controller's output. The page daemon target thus adapts
150 * dynamically to the system's demand for free pages, resulting in less
151 * burstiness than a simple hysteresis loop.
152 *
153 * When the free page count drops below the wakeup threshold,
154 * vm_domain_allocate() proactively wakes up the page daemon. This helps ensure
155 * that the system responds promptly to a large instantaneous free page
156 * shortage.
157 *
158 * The page daemon also attempts to ensure that some fraction of the system's
159 * memory is present in the inactive (I) and laundry (L) page queues, so that it
160 * can respond promptly to a sudden free page shortage. In particular, the page
161 * daemon thread aggressively scans active pages so long as the following
162 * condition holds:
163 *
164 * len(I) + len(L) + free_target - free_count < inactive_target
165 *
166 * Otherwise, when the inactive target is met, the page daemon periodically
167 * scans a small portion of the active queue in order to maintain up-to-date
168 * per-page access history. Unreferenced pages in the active queue thus
169 * eventually migrate to the inactive queue.
170 *
171 * The per-domain laundry thread periodically launders dirty pages based on the
172 * number of clean pages freed by the page daemon since the last laundering. If
173 * the page daemon fails to meet its scan target (i.e., the PID controller
174 * output) because of a shortage of clean inactive pages, the laundry thread
175 * attempts to launder enough pages to meet the free page target.
176 *
177 *--
178 * Page allocation priorities:
179 *
180 * The system defines three page allocation priorities: VM_ALLOC_NORMAL,
181 * VM_ALLOC_SYSTEM and VM_ALLOC_INTERRUPT. An interrupt-priority allocation can
182 * claim any free page. This priority is used in the pmap layer when attempting
183 * to allocate a page for the kernel page tables; in such cases an allocation
184 * failure will usually result in a kernel panic. The system priority is used
185 * for most other kernel memory allocations, for instance by UMA's slab
186 * allocator or the buffer cache. Such allocations will fail if the free count
187 * is below interrupt_free_min. All other allocations occur at the normal
188 * priority, which is typically used for allocation of user pages, for instance
189 * in the page fault handler or when allocating page table pages or pv_entry
190 * structures for user pmaps. Such allocations fail if the free count is below
191 * the free_reserved threshold.
192 *
193 *--
194 * Free memory shortages:
195 *
196 * The system uses the free_min and free_severe thresholds to apply
197 * back-pressure and give the page daemon a chance to recover. When a page
198 * allocation fails due to a shortage and the allocating thread cannot handle
199 * failure, it may call vm_wait() to sleep until free pages are available.
200 * vm_domain_freecnt_inc() wakes sleeping threads once the free page count rises
201 * above the free_min threshold; the page daemon and laundry threads are given
202 * priority and will wake up once free_count reaches the (much smaller)
203 * pageout_free_min threshold.
204 *
205 * On NUMA systems, the domainset iterators always prefer NUMA domains where the
206 * free page count is above the free_min threshold. This means that given the
207 * choice between two NUMA domains, one above the free_min threshold and one
208 * below, the former will be used to satisfy the allocation request regardless
209 * of the domain selection policy.
210 *
211 * In addition to reclaiming memory from the page queues, the vm_lowmem event
212 * fires every ten seconds so long as the system is under memory pressure (i.e.,
213 * vmd_free_count < vmd_free_target). This allows kernel subsystems to register
214 * for notifications of free page shortages, upon which they may shrink their
215 * caches. Following a vm_lowmem event, UMA's caches are pruned to ensure that
216 * they do not contain an excess of unused memory. When a domain is below the
217 * free_min threshold, UMA limits the population of per-CPU caches. When a
218 * domain falls below the free_severe threshold, UMA's caches are completely
219 * drained.
220 *
221 * If the system encounters a global memory shortage, it may resort to the
222 * out-of-memory (OOM) killer, which selects a process and delivers SIGKILL in a
223 * last-ditch attempt to free up some pages. Either of the two following
224 * conditions will activate the OOM killer:
225 *
226 * 1. The page daemons collectively fail to reclaim any pages during their
227 * inactive queue scans. After vm_pageout_oom_seq consecutive scans fail,
228 * the page daemon thread votes for an OOM kill, and an OOM kill is
229 * triggered when all page daemons have voted. This heuristic is strict and
230 * may fail to trigger even when the system is effectively deadlocked.
231 *
232 * 2. Threads in the user fault handler are repeatedly unable to make progress
233 * while allocating a page to satisfy the fault. After
234 * vm_pfault_oom_attempts page allocation failures with intervening
235 * vm_wait() calls, the faulting thread will trigger an OOM kill.
236 */
237struct vm_domain {
239 struct mtx_padalign vmd_free_mtx;
240 struct mtx_padalign vmd_pageout_mtx;
241 struct vm_pgcache {
243 int pool;
245 } vmd_pgcache[VM_NFREEPOOL];
246 struct vmem *vmd_kernel_arena; /* (c) per-domain kva R/W arena. */
247 struct vmem *vmd_kernel_rwx_arena; /* (c) per-domain kva R/W/X arena. */
248 u_int vmd_domain; /* (c) Domain number. */
249 u_int vmd_page_count; /* (c) Total page count. */
250 long vmd_segs; /* (c) bitmask of the segments */
251 u_int __aligned(CACHE_LINE_SIZE) vmd_free_count; /* (a,f) free page count */
252 u_int vmd_pageout_deficit; /* (a) Estimated number of pages deficit */
253 uint8_t vmd_pad[CACHE_LINE_SIZE - (sizeof(u_int) * 2)];
254
255 /* Paging control variables, used within single threaded page daemon. */
256 struct pidctrl vmd_pid; /* Pageout controller. */
257 boolean_t vmd_oom;
259 u_int vmd_inactive_shortage; /* Per-thread shortage. */
260 blockcount_t vmd_inactive_running; /* Number of inactive threads. */
261 blockcount_t vmd_inactive_starting; /* Number of threads started. */
262 volatile u_int vmd_addl_shortage; /* Shortage accumulator. */
263 volatile u_int vmd_inactive_freed; /* Successful inactive frees. */
264 volatile u_int vmd_inactive_us; /* Microseconds for above. */
265 u_int vmd_inactive_pps; /* Exponential decay frees/second. */
268 struct vm_page vmd_markers[PQ_COUNT]; /* (q) markers for queue scans */
269 struct vm_page vmd_inacthead; /* marker for LRU-defeating insertions */
270 struct vm_page vmd_clock[2]; /* markers for active queue scan */
271
272 int vmd_pageout_wanted; /* (a, p) pageout daemon wait channel */
273 int vmd_pageout_pages_needed; /* (d) page daemon waiting for pages? */
274 bool vmd_minset; /* (d) Are we in vm_min_domains? */
275 bool vmd_severeset; /* (d) Are we in vm_severe_domains? */
276 enum {
281
282 /* Paging thresholds and targets. */
283 u_int vmd_clean_pages_freed; /* (q) accumulator for laundry thread */
285 u_int vmd_free_reserved; /* (c) pages reserved for deadlock */
286 u_int vmd_free_target; /* (c) pages desired free */
287 u_int vmd_free_min; /* (c) pages desired free */
288 u_int vmd_inactive_target; /* (c) pages desired inactive */
289 u_int vmd_pageout_free_min; /* (c) min pages reserved for kernel */
290 u_int vmd_pageout_wakeup_thresh;/* (c) min pages to wake pagedaemon */
291 u_int vmd_interrupt_free_min; /* (c) reserved pages for int code */
292 u_int vmd_free_severe; /* (c) severe page depletion point */
293
294 /* Name for sysctl etc. */
295 struct sysctl_oid *vmd_oid;
296 char vmd_name[sizeof(__XSTRING(MAXMEMDOM))];
297} __aligned(CACHE_LINE_SIZE);
298
299extern struct vm_domain vm_dom[MAXMEMDOM];
300
301#define VM_DOMAIN(n) (&vm_dom[(n)])
302#define VM_DOMAIN_EMPTY(n) (vm_dom[(n)].vmd_page_count == 0)
303
304#define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
305#define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
306#define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
307#define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex)
308#define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
309
310#define vm_domain_free_assert_locked(n) \
311 mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
312#define vm_domain_free_assert_unlocked(n) \
313 mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
314#define vm_domain_free_lock(d) \
315 mtx_lock(vm_domain_free_lockptr((d)))
316#define vm_domain_free_lockptr(d) \
317 (&(d)->vmd_free_mtx)
318#define vm_domain_free_trylock(d) \
319 mtx_trylock(vm_domain_free_lockptr((d)))
320#define vm_domain_free_unlock(d) \
321 mtx_unlock(vm_domain_free_lockptr((d)))
322
323#define vm_domain_pageout_lockptr(d) \
324 (&(d)->vmd_pageout_mtx)
325#define vm_domain_pageout_assert_locked(n) \
326 mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
327#define vm_domain_pageout_assert_unlocked(n) \
328 mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
329#define vm_domain_pageout_lock(d) \
330 mtx_lock(vm_domain_pageout_lockptr((d)))
331#define vm_domain_pageout_unlock(d) \
332 mtx_unlock(vm_domain_pageout_lockptr((d)))
333
334static __inline void
335vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
336{
337
339 pq->pq_cnt += addend;
340}
341#define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1)
342#define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
343
344static inline void
345vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
346{
347
348 TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
350}
351
352static inline void
354{
355
356 bq->bq_cnt = 0;
357}
358
359static inline bool
360vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
361{
362
363 if (bq->bq_cnt < nitems(bq->bq_pa)) {
364 bq->bq_pa[bq->bq_cnt++] = m;
365 return (true);
366 }
367 return (false);
368}
369
370static inline vm_page_t
372{
373
374 if (bq->bq_cnt == 0)
375 return (NULL);
376 return (bq->bq_pa[--bq->bq_cnt]);
377}
378
379void vm_domain_set(struct vm_domain *vmd);
380void vm_domain_clear(struct vm_domain *vmd);
381int vm_domain_allocate(struct vm_domain *vmd, int req, int npages);
382
383/*
384 * vm_pagequeue_domain:
385 *
386 * Return the memory domain the page belongs to.
387 */
388static inline struct vm_domain *
390{
391
392 return (VM_DOMAIN(vm_page_domain(m)));
393}
394
395/*
396 * Return the number of pages we need to free-up or cache
397 * A positive number indicates that we do not have enough free pages.
398 */
399static inline int
401{
402
403 return (vmd->vmd_free_target - vmd->vmd_free_count);
404}
405
406/*
407 * Returns TRUE if the pagedaemon needs to be woken up.
408 */
409static inline int
410vm_paging_needed(struct vm_domain *vmd, u_int free_count)
411{
412
413 return (free_count < vmd->vmd_pageout_wakeup_thresh);
414}
415
416/*
417 * Returns TRUE if the domain is below the min paging target.
418 */
419static inline int
421{
422
423 return (vmd->vmd_free_min > vmd->vmd_free_count);
424}
425
426/*
427 * Returns TRUE if the domain is below the severe paging target.
428 */
429static inline int
431{
432
433 return (vmd->vmd_free_severe > vmd->vmd_free_count);
434}
435
436/*
437 * Return the number of pages we need to launder.
438 * A positive number indicates that we have a shortfall of clean pages.
439 */
440static inline int
442{
443
444 return (vm_paging_target(vmd));
445}
446
447void pagedaemon_wakeup(int domain);
448
449static inline void
450vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
451{
452 u_int old, new;
453
454 old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
455 new = old + adj;
456 /*
457 * Only update bitsets on transitions. Notice we short-circuit the
458 * rest of the checks if we're above min already.
459 */
460 if (old < vmd->vmd_free_min && (new >= vmd->vmd_free_min ||
461 (old < vmd->vmd_free_severe && new >= vmd->vmd_free_severe) ||
462 (old < vmd->vmd_pageout_free_min &&
463 new >= vmd->vmd_pageout_free_min)))
464 vm_domain_clear(vmd);
465}
466
467#endif /* _KERNEL */
468#endif /* !_VM_PAGEQUEUE_ */
vm_page_t bq_pa[VM_BATCHQUEUE_SIZE]
Definition: vm_pagequeue.h:82
u_int vmd_free_min
Definition: vm_pagequeue.h:287
u_int vmd_page_count
Definition: vm_pagequeue.h:249
volatile u_int vmd_inactive_freed
Definition: vm_pagequeue.h:263
u_int vmd_inactive_target
Definition: vm_pagequeue.h:288
u_int vmd_domain
Definition: vm_pagequeue.h:248
blockcount_t vmd_inactive_starting
Definition: vm_pagequeue.h:261
u_int vmd_free_severe
Definition: vm_pagequeue.h:292
struct vmem * vmd_kernel_rwx_arena
Definition: vm_pagequeue.h:247
struct vm_domain::vm_pgcache vmd_pgcache[VM_NFREEPOOL]
u_int __aligned(CACHE_LINE_SIZE) vmd_free_count
@ VM_LAUNDRY_SHORTFALL
Definition: vm_pagequeue.h:279
@ VM_LAUNDRY_BACKGROUND
Definition: vm_pagequeue.h:278
u_int vmd_clean_pages_freed
Definition: vm_pagequeue.h:283
struct vm_page vmd_clock[2]
Definition: vm_pagequeue.h:270
u_int vmd_free_reserved
Definition: vm_pagequeue.h:285
u_int vmd_inactive_threads
Definition: vm_pagequeue.h:258
struct vm_page vmd_markers[PQ_COUNT]
Definition: vm_pagequeue.h:268
bool vmd_minset
Definition: vm_pagequeue.h:274
struct mtx_padalign vmd_free_mtx
Definition: vm_pagequeue.h:239
int vmd_pageout_pages_needed
Definition: vm_pagequeue.h:273
u_int vmd_inactive_pps
Definition: vm_pagequeue.h:265
volatile u_int vmd_addl_shortage
Definition: vm_pagequeue.h:262
u_int vmd_pageout_free_min
Definition: vm_pagequeue.h:289
int vmd_pageout_wanted
Definition: vm_pagequeue.h:272
char vmd_name[sizeof(__XSTRING(MAXMEMDOM))]
Definition: vm_pagequeue.h:296
struct sysctl_oid * vmd_oid
Definition: vm_pagequeue.h:295
boolean_t vmd_oom
Definition: vm_pagequeue.h:257
u_int vmd_background_launder_target
Definition: vm_pagequeue.h:284
blockcount_t vmd_inactive_running
Definition: vm_pagequeue.h:260
volatile u_int vmd_inactive_us
Definition: vm_pagequeue.h:264
bool vmd_severeset
Definition: vm_pagequeue.h:275
struct vmem * vmd_kernel_arena
Definition: vm_pagequeue.h:246
int vmd_last_active_scan
Definition: vm_pagequeue.h:267
struct pidctrl vmd_pid
Definition: vm_pagequeue.h:256
u_int vmd_inactive_shortage
Definition: vm_pagequeue.h:259
u_int vmd_free_target
Definition: vm_pagequeue.h:286
long vmd_segs
Definition: vm_pagequeue.h:250
struct vm_page vmd_inacthead
Definition: vm_pagequeue.h:269
u_int vmd_pageout_wakeup_thresh
Definition: vm_pagequeue.h:290
u_int vmd_pageout_deficit
Definition: vm_pagequeue.h:252
int vmd_oom_seq
Definition: vm_pagequeue.h:266
enum vm_domain::@14 vmd_laundry_request
u_int vmd_interrupt_free_min
Definition: vm_pagequeue.h:291
struct vm_pagequeue vmd_pagequeues[PQ_COUNT]
Definition: vm_pagequeue.h:238
uint8_t vmd_pad[CACHE_LINE_SIZE -(sizeof(u_int) *2)]
Definition: vm_pagequeue.h:253
struct mtx_padalign vmd_pageout_mtx
Definition: vm_pagequeue.h:240
const char *const pq_name
Definition: vm_pagequeue.h:73
struct pglist pq_pl
Definition: vm_pagequeue.h:71
uint64_t pq_pdpages
Definition: vm_pagequeue.h:74
struct mtx pq_mutex
Definition: vm_pagequeue.h:70
#define PQ_COUNT
Definition: vm_page.h:336
static int vm_page_domain(vm_page_t m)
Definition: vm_page.h:1004
void pagedaemon_wakeup(int domain)
Definition: vm_pageout.c:2407
static int vm_paging_needed(struct vm_domain *vmd, u_int free_count)
Definition: vm_pagequeue.h:410
static bool vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
Definition: vm_pagequeue.h:360
#define vm_pagequeue_assert_locked(pq)
Definition: vm_pagequeue.h:304
static struct vm_domain * vm_pagequeue_domain(vm_page_t m)
Definition: vm_pagequeue.h:389
void vm_domain_set(struct vm_domain *vmd)
Definition: vm_page.c:3144
static void vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
Definition: vm_pagequeue.h:450
static int vm_paging_severe(struct vm_domain *vmd)
Definition: vm_pagequeue.h:430
u_int vmd_pageout_wakeup_thresh
Definition: vm_pagequeue.h:52
#define vm_pagequeue_cnt_dec(pq)
Definition: vm_pagequeue.h:342
struct vm_pagequeue __aligned(CACHE_LINE_SIZE)
static int vm_laundry_target(struct vm_domain *vmd)
Definition: vm_pagequeue.h:441
static __inline void vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
Definition: vm_pagequeue.h:335
static void vm_batchqueue_init(struct vm_batchqueue *bq)
Definition: vm_pagequeue.h:353
u_int vmd_pageout_free_min
Definition: vm_pagequeue.h:51
void vm_domain_clear(struct vm_domain *vmd)
Definition: vm_page.c:3163
u_int vmd_free_min
Definition: vm_pagequeue.h:49
u_int vmd_free_severe
Definition: vm_pagequeue.h:54
static int vm_paging_min(struct vm_domain *vmd)
Definition: vm_pagequeue.h:420
static vm_page_t vm_batchqueue_pop(struct vm_batchqueue *bq)
Definition: vm_pagequeue.h:371
int vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
Definition: vm_page.c:1989
struct vm_domain vm_dom[MAXMEMDOM]
Definition: vm_page.c:117
#define VM_DOMAIN(n)
Definition: vm_pagequeue.h:301
static void vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
Definition: vm_pagequeue.h:345
#define VM_BATCHQUEUE_SIZE
Definition: vm_pagequeue.h:78
static int vm_paging_target(struct vm_domain *vmd)
Definition: vm_pagequeue.h:400