76#include <sys/counter.h>
77#include <sys/domainset.h>
78#include <sys/kernel.h>
79#include <sys/limits.h>
80#include <sys/linker.h>
82#include <sys/malloc.h>
84#include <sys/msgbuf.h>
87#include <sys/rwlock.h>
88#include <sys/sleepqueue.h>
92#include <sys/sysctl.h>
93#include <sys/vmmeter.h>
115#include <machine/md_var.h>
131static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
132 "VM page statistics");
136 CTLFLAG_RD, &pqstate_commit_retries,
137 "Number of failed per-page atomic queue state updates");
141 CTLFLAG_RD, &queue_ops,
142 "Number of batched queue operations");
146 CTLFLAG_RD, &queue_nops,
147 "Number of batched queue operations with no effects");
164SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
171 vm_pindex_t pindex,
const char *wmesg,
int allocflags,
bool locked);
176static void vm_page_init(
void *dummy);
178 vm_pindex_t pindex, vm_page_t mpred);
182 const uint16_t nflag);
184 vm_page_t m_run, vm_paddr_t high);
195vm_page_init(
void *dummy)
215 TUNABLE_INT_FETCH(
"vm.pgcache_zone_max_pcpu", &maxcache);
216 maxcache *= mp_ncpus;
224 PAGE_SIZE, NULL, NULL, NULL, NULL,
232 cache = maxcache != 0 ? maxcache :
241#if PAGE_SIZE == 32768
257 if (
vm_cnt.v_page_size == 0)
258 vm_cnt.v_page_size = PAGE_SIZE;
259 if (((
vm_cnt.v_page_size - 1) &
vm_cnt.v_page_size) != 0)
260 panic(
"vm_set_page_size: page size not a power of two");
278 if (list == NULL || *list == NULL)
290 end = *list + strlen(*list);
294 if (*end ==
'\n' || *end ==
' ' || *end ==
',')
297 printf(
"Blacklist not terminated, skipping\n");
303 for (pos = *list; *pos !=
'\0'; pos = cp) {
304 bad = strtoq(pos, &cp, 0);
305 if (*cp ==
'\0' || *cp ==
' ' || *cp ==
',' || *cp ==
'\n') {
314 if (*cp ==
'\0' || ++cp >= end)
318 return (trunc_page(bad));
320 printf(
"Garbage in RAM blacklist, skipping\n");
342 TAILQ_INSERT_TAIL(&blacklist_head, m, listq);
344 printf(
"Skipping page with pa 0x%jx\n", (uintmax_t)pa);
363 while (next != NULL) {
387 mod = preload_search_by_type(
"ram_blacklist");
389 ptr = preload_fetch_addr(mod);
390 len = preload_fetch_size(mod);
408 error = sysctl_wire_old_buffer(req, 0);
411 sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
412 TAILQ_FOREACH(m, &blacklist_head, listq) {
413 sbuf_printf(&sbuf,
"%s%#jx", first ?
"" :
",",
414 (uintmax_t)m->phys_addr);
417 error = sbuf_finish(&sbuf);
431 bzero(marker,
sizeof(*marker));
433 marker->a.flags = aflags;
435 marker->a.queue = queue;
446 bzero(vmd,
sizeof(*vmd));
448 "vm inactive pagequeue";
450 "vm active pagequeue";
452 "vm laundry pagequeue";
453 *__DECONST(
const char **,
455 "vm unswappable pagequeue";
458 vmd->vmd_free_count = 0;
463 TAILQ_INIT(&pq->
pq_pl);
465 MTX_DEF | MTX_DUPOK);
469 mtx_init(&vmd->
vmd_free_mtx,
"vm page free queue", NULL, MTX_DEF);
506 m->flags = m->a.flags = 0;
511 m->order = VM_NFREEORDER;
512 m->pool = VM_FREEPOOL_DEFAULT;
513 m->valid = m->dirty = 0;
517#ifndef PMAP_HAS_PAGE_ARRAY
533 new_end = trunc_page(end - page_range *
sizeof(
struct vm_page));
556 char *list, *listend;
557 vm_paddr_t end, high_avail, low_avail, new_end, size;
558 vm_paddr_t page_range __unused;
559 vm_paddr_t last_pa, pa, startp, endp;
561#if MINIDUMP_PAGE_TRACKING
562 u_long vm_page_dump_size;
564 int biggestone, i, segind;
569#if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
573 vaddr = round_page(vaddr);
584 mtx_init(&
pa_lock[i],
"vm page", NULL, MTX_DEF);
590 witness_size = round_page(witness_startup_count());
591 new_end -= witness_size;
592 mapped =
pmap_map(&vaddr, new_end, new_end + witness_size,
594 bzero((
void *)mapped, witness_size);
595 witness_startup((
void *)mapped);
598#if MINIDUMP_PAGE_TRACKING
619 new_end -= vm_page_dump_size;
626#if defined(__aarch64__) || defined(__amd64__) || \
627 defined(__riscv) || defined(__powerpc64__)
633 for (pa = new_end; pa < end; pa += PAGE_SIZE)
643 pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
644 last_pa = pa + round_page(msgbufsize);
645 while (pa < last_pa) {
674#ifdef VM_PHYSSEG_SPARSE
680#elif defined(VM_PHYSSEG_DENSE)
681 size = high_avail - low_avail;
683#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
686#ifdef PMAP_HAS_PAGE_ARRAY
687 pmap_page_array_startup(size / PAGE_SIZE);
691#ifdef VM_PHYSSEG_DENSE
699 if (new_end != high_avail)
700 page_range = size / PAGE_SIZE;
704 page_range = size / (PAGE_SIZE +
sizeof(
struct vm_page));
714 if (size % (PAGE_SIZE +
sizeof(
struct vm_page)) >= PAGE_SIZE) {
715 if (new_end == high_avail)
716 high_avail -= PAGE_SIZE;
717 new_end -= PAGE_SIZE;
724#if VM_NRESERVLEVEL > 0
729 new_end = vm_reserv_startup(&vaddr, new_end);
731#if defined(__aarch64__) || defined(__amd64__) || \
732 defined(__riscv) || defined(__powerpc64__)
736 for (pa = new_end; pa < end; pa += PAGE_SIZE)
758#if defined(__i386__) && defined(VM_PHYSSEG_DENSE)
769 m++, pa += PAGE_SIZE)
783 pagecount = (u_long)atop(endp - startp);
793 vm_cnt.v_page_count += (u_int)pagecount;
802 TAILQ_INIT(&blacklist_head);
806 list = kern_getenv(
"vm.blacklist");
810#if VM_NRESERVLEVEL > 0
881 obj = atomic_load_ptr(&m->object);
897 KASSERT(m->object == obj || m->object == NULL,
898 (
"vm_page_busy_acquire: page %p does not belong to %p",
917 if (atomic_fcmpset_rel_int(&m->busy_lock,
944 (
"vm_page_busy_tryupgrade: invalid lock state"));
945 if (!atomic_fcmpset_acq_int(&m->busy_lock, &x,
981 (
"vm_page_sunbusy: Unlocking freed page."));
983 if (atomic_fcmpset_int(&m->busy_lock, &x,
989 (
"vm_page_sunbusy: invalid lock state"));
990 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x,
VPB_UNBUSIED))
1039 const char *wmesg,
int allocflags)
1060 const char *wmesg,
int allocflags,
bool locked)
1089 m->object != obj || m->pindex !=
pindex) {
1095 }
while (!atomic_fcmpset_int(&m->busy_lock, &x, x |
VPB_BIT_WAITERS));
1099 sleepq_add(m, NULL, wmesg, 0, 0);
1100 sleepq_wait(m, PVM);
1129 if (atomic_fcmpset_acq_int(&m->busy_lock, &x,
1199 atomic_thread_fence_rel();
1200 x = atomic_swap_int(&m->busy_lock,
VPB_FREED);
1214 for (; count != 0; count--) {
1225#ifdef VM_PHYSSEG_SPARSE
1230#elif defined(VM_PHYSSEG_DENSE)
1240#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
1256 m =
uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
1273 m->phys_addr = paddr;
1284 pmap_page_set_memattr(m, memattr);
1296 KASSERT((m->oflags &
VPO_UNMANAGED) != 0, (
"managed %p", m));
1298 (
"vm_page_putfake: bad page %p", m));
1315 (
"vm_page_updatefake: bad page %p", m));
1316 m->phys_addr = paddr;
1317 pmap_page_set_memattr(m, memattr);
1329 m->flags &= ~PG_ZERO;
1379 KASSERT(!pmap_page_is_mapped(m), (
"page %p is mapped", m));
1380 KASSERT(m->object != NULL, (
"page %p has no object", m));
1418 m->dirty = VM_PAGE_BITS_ALL;
1455 KASSERT(m->object == NULL,
1456 (
"vm_page_insert_after: page already inserted"));
1457 if (mpred != NULL) {
1458 KASSERT(mpred->object ==
object,
1459 (
"vm_page_insert_after: object doesn't contain mpred"));
1460 KASSERT(mpred->pindex <
pindex,
1461 (
"vm_page_insert_after: mpred doesn't precede pindex"));
1462 msucc = TAILQ_NEXT(mpred, listq);
1466 KASSERT(msucc->pindex >
pindex,
1467 (
"vm_page_insert_after: msucc doesn't succeed pindex"));
1482 m->ref_count &= ~VPRC_OBJREF;
1505 KASSERT(
object != NULL && m->object ==
object,
1506 (
"vm_page_insert_radixdone: page %p has inconsistent object", m));
1508 (
"vm_page_insert_radixdone: page %p is missing object ref", m));
1509 if (mpred != NULL) {
1510 KASSERT(mpred->object ==
object,
1511 (
"vm_page_insert_radixdone: object doesn't contain mpred"));
1512 KASSERT(mpred->pindex < m->pindex,
1513 (
"vm_page_insert_radixdone: mpred doesn't precede pindex"));
1517 TAILQ_INSERT_AFTER(&
object->
memq, mpred, m, listq);
1524 object->resident_page_count++;
1536 if (pmap_page_is_write_mapped(m))
1554 (
"page %p is missing its object ref", m));
1562 KASSERT(mrem == m, (
"removed page %p, expected page %p", mrem, m));
1572 object->resident_page_count--;
1662 m->object ==
object && m->pindex ==
pindex,
1663 (
"vm_page_relookup: Invalid page %p", m));
1683 if (atomic_fcmpset_int(&m->busy_lock, &x,
1690 (
"vm_page_busy_release: %p xbusy not owned.", m));
1691 if (!atomic_fcmpset_rel_int(&m->busy_lock, &x,
VPB_UNBUSIED))
1730 if ((next = TAILQ_NEXT(m, listq)) != NULL) {
1731 MPASS(next->object == m->object);
1732 if (next->pindex != m->pindex + 1)
1750 if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL) {
1751 MPASS(prev->object == m->object);
1752 if (prev->pindex != m->pindex - 1)
1778 KASSERT(mnew->object == NULL && (mnew->ref_count &
VPRC_OBJREF) == 0,
1779 (
"vm_page_replace: page %p already in object", mnew));
1791 KASSERT(mret == mold,
1792 (
"invalid page replacement, mold=%p, mret=%p", mold, mret));
1795 (
"vm_page_replace: mismatched VPO_UNMANAGED"));
1798 TAILQ_INSERT_AFTER(&
object->
memq, mold, mnew, listq);
1800 mold->object = NULL;
1807 if (pmap_page_is_write_mapped(mnew))
1852 KASSERT(m->ref_count != 0, (
"vm_page_rename: page %p has no refs", m));
1854 KASSERT(mpred == NULL || mpred->pindex != new_pindex,
1855 (
"vm_page_rename: pindex already renamed"));
1863 m->pindex = new_pindex;
1877 m->pindex = new_pindex;
1878 m->object = new_object;
1932 int req, vm_page_t mpred)
1956 u_int limit, old,
new;
1969 old = vmd->vmd_free_count;
1974 }
while (atomic_fcmpset_int(&vmd->vmd_free_count, &old,
new) == 0);
2004 int req, vm_page_t mpred)
2010#define VPA_FLAGS (VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL | \
2011 VM_ALLOC_NOWAIT | VM_ALLOC_NOBUSY | \
2012 VM_ALLOC_SBUSY | VM_ALLOC_WIRED | \
2013 VM_ALLOC_NODUMP | VM_ALLOC_ZERO | VM_ALLOC_COUNT_MASK)
2015 (
"invalid request %#x", req));
2018 (
"invalid request %#x", req));
2019 KASSERT(mpred == NULL || mpred->pindex < pindex,
2020 (
"mpred %p doesn't precede pindex 0x%jx", mpred,
2021 (uintmax_t)pindex));
2027#if VM_NRESERVLEVEL > 0
2032 (m = vm_reserv_alloc_page(
object, pindex, domain, req, mpred)) !=
2055#if VM_NRESERVLEVEL > 0
2056 if (vm_reserv_reclaim_inactive(domain))
2103 KASSERT(m->object == NULL, (
"page %p has object", m));
2117 if (object->
memattr != VM_MEMATTR_DEFAULT &&
2119 pmap_page_set_memattr(m, object->
memattr);
2163 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
2164 vm_paddr_t boundary, vm_memattr_t memattr)
2173 npages, low, high, alignment, boundary, memattr);
2183 vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
2200 alignment, boundary);
2204#if VM_NRESERVLEVEL > 0
2209 m_ret = vm_reserv_reclaim_contig(domain, npages, low,
2210 high, alignment, boundary);
2221 int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
2222 vm_paddr_t boundary, vm_memattr_t memattr)
2224 vm_page_t m, m_ret, mpred;
2225 u_int busy_lock, flags, oflags;
2227#define VPAC_FLAGS (VPA_FLAGS | VM_ALLOC_NORECLAIM)
2229 (
"invalid request %#x", req));
2232 (
"invalid request %#x", req));
2235 (
"invalid request %#x", req));
2238 (
"vm_page_alloc_contig: object %p has fictitious pages",
2240 KASSERT(npages > 0, (
"vm_page_alloc_contig: npages is zero"));
2243 KASSERT(mpred == NULL || mpred->pindex != pindex,
2244 (
"vm_page_alloc_contig: pindex already allocated"));
2246#if VM_NRESERVLEVEL > 0
2251 (m_ret = vm_reserv_alloc_contig(
object, pindex, domain, req,
2252 mpred, npages, low, high, alignment, boundary)) != NULL) {
2257 low, high, alignment, boundary)) != NULL)
2262 for (m = m_ret; m < &m_ret[npages]; m++) {
2281 vm_wire_add(npages);
2282 if (object->
memattr != VM_MEMATTR_DEFAULT &&
2283 memattr == VM_MEMATTR_DEFAULT)
2284 memattr =
object->memattr;
2285 for (m = m_ret; m < &m_ret[npages]; m++) {
2287 m->flags = (m->flags |
PG_NODUMP) & flags;
2288 m->busy_lock = busy_lock;
2295 vm_wire_sub(npages);
2296 KASSERT(m->object == NULL,
2297 (
"page %p has object", m));
2299 for (m = m_ret; m < &m_ret[npages]; m++) {
2316 if (memattr != VM_MEMATTR_DEFAULT)
2317 pmap_page_set_memattr(m, memattr);
2328static __always_inline vm_page_t
2335#define VPAN_FLAGS (VM_ALLOC_CLASS_MASK | VM_ALLOC_WAITFAIL | \
2336 VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | \
2337 VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | \
2338 VM_ALLOC_NODUMP | VM_ALLOC_ZERO | VM_ALLOC_COUNT_MASK)
2340 (
"invalid request %#x", req));
2345 if (freelist == VM_NFREELIST &&
2357 if (freelist == VM_NFREELIST)
2361 VM_FREEPOOL_DIRECT, 0);
2365#if VM_NRESERVLEVEL > 0
2366 if (freelist == VM_NFREELIST &&
2367 vm_reserv_reclaim_inactive(domain))
2385 m->pindex = 0xdeadc0dedeadc0de;
2386 m->flags = (m->flags &
PG_ZERO) | flags;
2421 KASSERT(freelist >= 0 && freelist < VM_NFREELIST,
2422 (
"%s: invalid freelist %d", __func__, freelist));
2452 vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
2453 vm_memattr_t memattr)
2462 high, alignment, boundary, memattr);
2472 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
2473 vm_memattr_t memattr)
2478#define VPANC_FLAGS (VPAN_FLAGS | VM_ALLOC_NORECLAIM)
2480 (
"invalid request %#x", req));
2483 (
"invalid request %#x", req));
2486 (
"invalid request %#x", req));
2487 KASSERT(npages > 0, (
"vm_page_alloc_contig: npages is zero"));
2490 low, high, alignment, boundary)) == NULL) {
2502 vm_wire_add(npages);
2503 for (m = m_ret; m < &m_ret[npages]; m++) {
2510 m->pindex = 0xdeadc0dedeadc0de;
2512 m->flags = (m->flags |
PG_NODUMP) & flags;
2527 if (memattr != VM_MEMATTR_DEFAULT)
2528 pmap_page_set_memattr(m, memattr);
2540 KASSERT(m->object == NULL, (
"page %p has object", m));
2541 KASSERT(m->a.queue ==
PQ_NONE &&
2543 (
"page %p has unexpected queue %d, flags %#x",
2545 KASSERT(m->ref_count == 0, (
"page %p has references", m));
2547 KASSERT(m->dirty == 0, (
"page %p is dirty", m));
2548 KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
2549 (
"page %p has unexpected memattr %d",
2550 m, pmap_page_get_memattr(m)));
2551 KASSERT(m->valid == 0, (
"free page %p is valid", m));
2552 pmap_vm_page_alloc_check(m);
2575 (vm_page_t *)store);
2594 for (i = 0; i < cnt; i++) {
2595 m = (vm_page_t)store[i];
2603#define VPSC_NORESERV 1
2604#define VPSC_NOSUPER 2
2626 u_long alignment, vm_paddr_t boundary,
int options)
2631#if VM_NRESERVLEVEL > 0
2634 int m_inc, order, run_ext, run_len;
2636 KASSERT(npages > 0, (
"npages is 0"));
2637 KASSERT(powerof2(alignment), (
"alignment is not a power of 2"));
2638 KASSERT(powerof2(boundary), (
"boundary is not a power of 2"));
2641 for (m = m_start; m < m_end && run_len < npages; m += m_inc) {
2643 (
"page %p is PG_MARKER", m));
2644 KASSERT((m->flags &
PG_FICTITIOUS) == 0 || m->ref_count >= 1,
2645 (
"fictitious page %p has invalid ref count", m));
2655 KASSERT(m_run == NULL, (
"m_run != NULL"));
2656 if (m + npages > m_end)
2660 m_inc = atop(roundup2(pa, alignment) - pa);
2664 m_inc = atop(roundup2(pa, boundary) - pa);
2668 KASSERT(m_run != NULL, (
"m_run == NULL"));
2674#if VM_NRESERVLEVEL > 0
2675 else if ((level = vm_reserv_level(m)) >= 0 &&
2680 m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) -
2684 else if ((
object = atomic_load_ptr(&m->object)) != NULL) {
2691 if (
object != m->object) {
2700#if VM_NRESERVLEVEL > 0
2702 (level = vm_reserv_level_iffullpop(m)) >= 0) {
2706 m_inc = atop(roundup2(pa + 1,
2707 vm_reserv_size(level)) - pa);
2709 }
else if (object->
memattr == VM_MEMATTR_DEFAULT &&
2716 KASSERT(pmap_page_get_memattr(m) ==
2718 (
"page %p has an unexpected memattr", m));
2721 (
"page %p has unexpected oflags", m));
2727#if VM_NRESERVLEVEL > 0
2728 }
else if (level >= 0) {
2736 }
else if ((order = m->order) < VM_NFREEORDER) {
2744 run_ext = 1 << order;
2777 if (run_len >= npages)
2804 struct spglist free;
2807 vm_page_t m, m_end, m_new;
2808 int error, order, req;
2811 (
"req_class is not an allocation class"));
2815 m_end = m_run + npages;
2816 for (; error == 0 && m < m_end; m++) {
2818 (
"page %p is PG_FICTITIOUS or PG_MARKER", m));
2826 else if ((
object = atomic_load_ptr(&m->object)) != NULL) {
2833 if (m->object !=
object ||
2838 else if (object->memattr != VM_MEMATTR_DEFAULT)
2847 KASSERT(pmap_page_get_memattr(m) ==
2849 (
"page %p has an unexpected memattr", m));
2850 KASSERT(m->oflags == 0,
2851 (
"page %p has unexpected oflags", m));
2866 if (trunc_page(high) !=
2867 ~(vm_paddr_t)PAGE_MASK) {
2870 req, 1, round_page(high),
2871 ~(vm_paddr_t)0, PAGE_SIZE,
2872 0, VM_MEMATTR_DEFAULT);
2875 if (m_new == NULL) {
2881 VM_MEMATTR_DEFAULT);
2883 if (m_new == NULL) {
2887 req, 1, pa, high, PAGE_SIZE,
2888 0, VM_MEMATTR_DEFAULT);
2890 if (m_new == NULL) {
2901 if (object->ref_count != 0 &&
2915 m_new->a.flags = m->a.flags &
2916 ~PGA_QUEUE_STATE_MASK;
2918 (
"page %p is managed", m_new));
2921 m_new->valid = m->valid;
2922 m_new->dirty = m->dirty;
2923 m->flags &= ~PG_ZERO;
2928 SLIST_INSERT_HEAD(&free, m,
2937 m->flags &= ~PG_ZERO;
2940 SLIST_INSERT_HEAD(&free, m,
2942 KASSERT(m->dirty == 0,
2943 (
"page %p is dirty", m));
2954 if (order < VM_NFREEORDER) {
2963 m += (1 << order) - 1;
2965#if VM_NRESERVLEVEL > 0
2966 else if (vm_reserv_is_page_free(m))
2970 if (order == VM_NFREEORDER)
2974 if ((m = SLIST_FIRST(&free)) != NULL) {
2982 SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2985 }
while ((m = SLIST_FIRST(&free)) != NULL);
2996#define RUN_INDEX(count) ((count) & (NRUNS - 1))
2998#define MIN_RECLAIM 8
3025 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
3028 vm_paddr_t curr_low;
3029 vm_page_t m_run, m_runs[
NRUNS];
3030 u_long count, minalign, reclaimed;
3031 int error, i, options, req_class;
3033 KASSERT(npages > 0, (
"npages is 0"));
3034 KASSERT(powerof2(alignment), (
"alignment is not a power of 2"));
3035 KASSERT(powerof2(boundary), (
"boundary is not a power of 2"));
3044 minalign = 1ul << imin(flsl(npages - 1), VM_NFREEORDER - 1);
3045 npages = roundup2(npages, minalign);
3046 if (alignment < ptoa(minalign))
3047 alignment = ptoa(minalign);
3061 count = vmd->vmd_free_count;
3080 high, alignment, boundary, options);
3096 for (i = 0; count > 0 && i <
NRUNS; i++) {
3102 reclaimed += npages;
3117 return (reclaimed != 0);
3123 u_long alignment, vm_paddr_t boundary)
3132 high, alignment, boundary);
3210 while (vm_page_count_min()) {
3225 while (vm_page_count_severe()) {
3259 PVM | PDROP | mflags,
"pageprocwait", 1);
3267 if (vm_page_count_min_set(wdoms)) {
3270 PVM | PDROP | mflags,
"vmwait", 0);
3302 panic(
"vm_wait in early boot");
3303 DOMAINSET_ZERO(&wdom);
3312 struct domainset *d;
3321 d = obj->
domain.dr_policy;
3323 d = curthread->td_domain.dr_policy;
3399 if (vm_page_count_min_set(&dset->ds_mask)) {
3416vm_page_pagequeue(vm_page_t m)
3423static __always_inline
bool
3432 counter_u64_add(pqstate_commit_retries, 1);
3449 KASSERT(vm_page_pagequeue(m) == pq,
3450 (
"%s: queue %p does not match page %p", __func__, pq, m));
3452 (
"%s: invalid queue indices %d %d",
3453 __func__, old->
queue,
new.queue));
3464 new.flags &= ~PGA_ENQUEUED;
3465 next = TAILQ_NEXT(m, plinks.q);
3466 TAILQ_REMOVE(&pq->
pq_pl, m, plinks.q);
3470 TAILQ_INSERT_TAIL(&pq->
pq_pl, m, plinks.q);
3472 TAILQ_INSERT_BEFORE(next, m, plinks.q);
3499 if (__predict_false(as.
_bits != old->
_bits)) {
3520 (
"%s: invalid queue indices %d %d",
3521 __func__, old->
queue,
new.queue));
3528 TAILQ_REMOVE(&pq->
pq_pl, m, plinks.q);
3540 (
"%s: invalid page queue for page %p", __func__, m));
3543 TAILQ_INSERT_TAIL(&pq->
pq_pl, m, plinks.q);
3557 KASSERT(old->
queue ==
new.queue ||
new.queue !=
PQ_NONE,
3558 (
"%s: invalid state, queue %d flags %x",
3559 __func__,
new.queue,
new.flags));
3561 if (old->
_bits !=
new._bits &&
3576 if (old->
_bits ==
new._bits)
3602 CRITICAL_ASSERT(curthread);
3605 (
"%s: invalid queue index %d", __func__, queue));
3607 (
"%s: page %p does not belong to queue %p", __func__, m, pq));
3610 if (__predict_false(old.
queue != queue ||
3612 counter_u64_add(queue_nops, 1);
3616 (
"%s: page %p is unmanaged", __func__, m));
3620 new.
flags &= ~PGA_QUEUE_OP_MASK;
3624 counter_u64_add(queue_ops, 1);
3631 counter_u64_add(queue_ops, 1);
3644 for (i = 0; i < bq->
bq_cnt; i++)
3663 KASSERT(queue <
PQ_COUNT, (
"invalid queue %d", queue));
3667 bq = DPCPU_PTR(pqbatch[domain][queue]);
3674 pq = &
VM_DOMAIN(domain)->vmd_pagequeues[queue];
3677 bq = DPCPU_PTR(pqbatch[domain][queue]);
3697 int cpu, domain, queue;
3702 sched_bind(td, cpu);
3705 for (domain = 0; domain <
vm_ndomains; domain++) {
3707 for (queue = 0; queue <
PQ_COUNT; queue++) {
3712 DPCPU_PTR(pqbatch[domain][queue]), queue);
3739 (
"%s: page %p has unexpected queue state",
3763 (
"%s: page %p has unexpected queue state",
3768 new.
flags &= ~PGA_QUEUE_OP_MASK;
3782 KASSERT(m->a.queue ==
PQ_NONE &&
3784 (
"%s: page %p is already enqueued", __func__, m));
3785 KASSERT(m->ref_count > 0,
3786 (
"%s: page %p does not carry any references", __func__, m));
3813 atomic_thread_fence_acq();
3815#if defined(DIAGNOSTIC) && defined(PHYS_TO_DMAP)
3816 if (PMAP_HAS_DMAP && (m->flags &
PG_ZERO) != 0) {
3820 for (i = 0; i < PAGE_SIZE /
sizeof(uint64_t); i++, p++)
3821 KASSERT(*p == 0, (
"vm_page_free_prep %p PG_ZERO %d %jx",
3822 m, i, (uintmax_t)*p));
3826 KASSERT(!pmap_page_is_mapped(m),
3827 (
"vm_page_free_prep: freeing mapped page %p", m));
3829 (
"vm_page_free_prep: mapping flags set in page %p", m));
3831 KASSERT(m->a.queue ==
PQ_NONE,
3832 (
"vm_page_free_prep: unmanaged page %p is queued", m));
3834 VM_CNT_INC(v_tfree);
3836 if (m->object != NULL) {
3839 (
"vm_page_free_prep: managed flag mismatch for page %p",
3849 (
"vm_page_free_prep: page %p has unexpected ref_count %u",
3863 KASSERT(m->ref_count == 1,
3864 (
"fictitious page %p is referenced", m));
3865 KASSERT(m->a.queue ==
PQ_NONE,
3866 (
"fictitious page %p is queued", m));
3881 if (m->ref_count != 0)
3882 panic(
"vm_page_free_prep: page %p has references", m);
3887 if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
3888 pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
3890#if VM_NRESERVLEVEL > 0
3896 if ((m->flags &
PG_PCPU_CACHE) == 0 && vm_reserv_free_page(m))
3946 if (SLIST_EMPTY(free))
3950 while ((m = SLIST_FIRST(free)) != NULL) {
3952 SLIST_REMOVE_HEAD(free, plinks.s.ss);
3956 if (update_wire_count)
3976 (
"vm_page_wire: fictitious page %p has zero wirings", m));
3978 old = atomic_fetchadd_int(&m->ref_count, 1);
3980 (
"vm_page_wire: counter overflow for page %p", m));
4003 (
"vm_page_wire_mapped: wiring unreferenced page %p", m));
4006 }
while (!atomic_fcmpset_int(&m->ref_count, &old, old + 1));
4027 (
"%s: page %p is unmanaged", __func__, m));
4037 (
"vm_page_unwire: wire count underflow for page %p", m));
4057 }
else if (old == 1) {
4060 }
while (!atomic_fcmpset_rel_int(&m->ref_count, &old, old - 1));
4083 (
"vm_page_unwire: invalid queue %u request for page %p",
4107 (
"%s: counter underflow for page %p", __func__, m));
4109 (
"%s: missing ref on fictitious page %p", __func__, m));
4124static __always_inline
void
4129 KASSERT(m->ref_count > 0,
4130 (
"%s: page %p does not carry any references", __func__, m));
4132 (
"%s: invalid flags %x", __func__, nflag));
4142 new.
flags &= ~PGA_QUEUE_OP_MASK;
4145 if (old.
queue == nqueue) {
4201 (
"page %p already unswappable", m));
4226 if (noreuse || m->valid == 0) {
4242 new.
flags &= ~PGA_QUEUE_OP_MASK;
4261 (
"vm_page_release: page %p is unmanaged", m));
4265 object = atomic_load_ptr(&m->object);
4271 if (
object == m->object) {
4289 (
"vm_page_release_locked: page %p is unmanaged", m));
4293 (m->object->ref_count == 0 || !pmap_page_is_mapped(m)) &&
4316 KASSERT(m->object != NULL && (m->oflags &
VPO_UNMANAGED) == 0,
4317 (
"vm_page_try_blocked_op: page %p has no object", m));
4319 (
"vm_page_try_blocked_op: page %p is not busy", m));
4325 (
"vm_page_try_blocked_op: page %p has no references", m));
4328 }
while (!atomic_fcmpset_int(&m->ref_count, &old, old |
VPRC_BLOCKED));
4339 (
"vm_page_try_blocked_op: unexpected refcount value %u for %p",
4376 if (advice == MADV_FREE)
4384 else if (advice != MADV_DONTNEED) {
4385 if (advice == MADV_WILLNEED)
4440 const char *wmesg,
int allocflags,
bool locked)
4471 (
"vm_page_grab*: the pages must be busied or wired"));
4475 (
"vm_page_grab*: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
4486 pflags = allocflags &
4562 vm_page_t prev, vm_page_t *mp,
int allocflags)
4576 if (prev == NULL || (m = TAILQ_NEXT(prev, listq)) == NULL ||
4577 QMD_IS_TRASHED(m) || m->pindex != pindex ||
4578 atomic_load_ptr(&m->object) !=
object) {
4589 if (m->object ==
object && m->pindex == pindex)
4650 vm_page_t ma[VM_INITIAL_PAGEIN];
4651 int after, i, pflags, rv;
4655 (
"vm_page_grab_valid: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
4656 KASSERT((allocflags &
4658 (
"vm_page_grab_valid: Invalid flags 0x%X", allocflags));
4691 }
else if ((m =
vm_page_alloc(
object, pindex, pflags)) == NULL) {
4697 after = MIN(after, VM_INITIAL_PAGEIN);
4699 after = MAX(after, 1);
4701 for (i = 1; i < after; i++) {
4721 for (i = 0; i < after; i++) {
4730 for (i = 1; i < after; i++)
4753 vm_pindex_t pindex,
int allocflags)
4761 (
"vm_page_grab_valid_unlocked: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY "
4763 KASSERT((allocflags &
4765 (
"vm_page_grab_valid_unlocked: Invalid flags 0x%X", allocflags));
4774 if ((m = *mp) != NULL) {
4822 vm_page_t *ma,
int count)
4830 (
"vm_page_grap_pages: VM_ALLOC_COUNT() is not allowed"));
4832 (
"vm_page_grab_pages: invalid page count %d", count));
4839 if (m == NULL || m->pindex != pindex + i) {
4843 mpred = TAILQ_PREV(m, pglist, listq);
4844 for (; i < count; i++) {
4848 "grbmaw", allocflags,
true))
4866 if ((m->flags &
PG_ZERO) == 0)
4883 int allocflags, vm_page_t *ma,
int count)
4890 (
"vm_page_grab_pages_unlocked: invalid page count %d", count));
4897 flags = allocflags & ~VM_ALLOC_NOBUSY;
4899 for (i = 0; i < count; i++, pindex++) {
4905 if ((m->flags &
PG_ZERO) == 0)
4935 base + size <= PAGE_SIZE,
4936 (
"vm_page_bits: illegal base/size %d/%d", base, size)
4942 first_bit = base >> DEV_BSHIFT;
4943 last_bit = (base + size - 1) >> DEV_BSHIFT;
4945 return (((vm_page_bits_t)2 << last_bit) -
4946 ((vm_page_bits_t)1 << first_bit));
4953#if PAGE_SIZE == 32768
4954 atomic_set_64((uint64_t *)bits, set);
4955#elif PAGE_SIZE == 16384
4956 atomic_set_32((uint32_t *)bits, set);
4957#elif (PAGE_SIZE == 8192) && defined(atomic_set_16)
4958 atomic_set_16((uint16_t *)bits, set);
4959#elif (PAGE_SIZE == 4096) && defined(atomic_set_8)
4960 atomic_set_8((uint8_t *)bits, set);
4965 addr = (uintptr_t)bits;
4971 shift = addr & (
sizeof(uint32_t) - 1);
4972#if BYTE_ORDER == BIG_ENDIAN
4973 shift = (
sizeof(uint32_t) -
sizeof(vm_page_bits_t) - shift) * NBBY;
4977 addr &= ~(
sizeof(uint32_t) - 1);
4978 atomic_set_32((uint32_t *)addr, set << shift);
4986#if PAGE_SIZE == 32768
4987 atomic_clear_64((uint64_t *)bits, clear);
4988#elif PAGE_SIZE == 16384
4989 atomic_clear_32((uint32_t *)bits, clear);
4990#elif (PAGE_SIZE == 8192) && defined(atomic_clear_16)
4991 atomic_clear_16((uint16_t *)bits, clear);
4992#elif (PAGE_SIZE == 4096) && defined(atomic_clear_8)
4993 atomic_clear_8((uint8_t *)bits, clear);
4998 addr = (uintptr_t)bits;
5004 shift = addr & (
sizeof(uint32_t) - 1);
5005#if BYTE_ORDER == BIG_ENDIAN
5006 shift = (
sizeof(uint32_t) -
sizeof(vm_page_bits_t) - shift) * NBBY;
5010 addr &= ~(
sizeof(uint32_t) - 1);
5011 atomic_clear_32((uint32_t *)addr, clear << shift);
5015static inline vm_page_bits_t
5018#if PAGE_SIZE == 32768
5022 while (atomic_fcmpset_64(bits, &old, newbits) == 0);
5024#elif PAGE_SIZE == 16384
5028 while (atomic_fcmpset_32(bits, &old, newbits) == 0);
5030#elif (PAGE_SIZE == 8192) && defined(atomic_fcmpset_16)
5034 while (atomic_fcmpset_16(bits, &old, newbits) == 0);
5036#elif (PAGE_SIZE == 4096) && defined(atomic_fcmpset_8)
5040 while (atomic_fcmpset_8(bits, &old, newbits) == 0);
5044 uint32_t old,
new, mask;
5047 addr = (uintptr_t)bits;
5053 shift = addr & (
sizeof(uint32_t) - 1);
5054#if BYTE_ORDER == BIG_ENDIAN
5055 shift = (
sizeof(uint32_t) -
sizeof(vm_page_bits_t) - shift) * NBBY;
5059 addr &= ~(
sizeof(uint32_t) - 1);
5060 mask = VM_PAGE_BITS_ALL << shift;
5065 new |= newbits << shift;
5066 }
while (atomic_fcmpset_32((uint32_t *)addr, &old,
new) == 0);
5067 return (old >> shift);
5085 vm_page_bits_t pagebits;
5096 if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
5097 (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
5105 endoff = base + size;
5106 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
5107 (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
5109 DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
5115 KASSERT((~m->valid &
vm_page_bits(base, size) & m->dirty) == 0,
5116 (
"vm_page_set_valid_range: page %p is dirty", m));
5123 m->valid |= pagebits;
5141 m->dirty = VM_PAGE_BITS_ALL;
5167 m->dirty &= ~pagebits;
5185 vm_page_bits_t oldvalid, pagebits;
5197 if ((frag = rounddown2(base, DEV_BSIZE)) != base &&
5198 (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
5206 endoff = base + size;
5207 if ((frag = rounddown2(endoff, DEV_BSIZE)) != endoff &&
5208 (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
5210 DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
5223 oldvalid = m->valid;
5226 m->valid |= pagebits;
5230 if ((frag = base & (DEV_BSIZE - 1)) != 0) {
5231 frag = DEV_BSIZE - frag;
5239 if (base == 0 && size == PAGE_SIZE) {
5245 if (oldvalid == VM_PAGE_BITS_ALL)
5258 m->dirty &= ~pagebits;
5279 vm_page_bits_t bits;
5292 bits = VM_PAGE_BITS_ALL;
5298 !pmap_page_is_mapped(m),
5299 (
"vm_page_set_invalid: page %p is mapped", m));
5323 MPASS(!pmap_page_is_mapped(m));
5354 for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
5355 if (i == (PAGE_SIZE / DEV_BSIZE) ||
5356 (m->valid & ((vm_page_bits_t)1 << i))) {
5359 b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
5388 vm_page_bits_t bits;
5391 return (m->valid != 0 && (m->valid & bits) == bits);
5405 if (skip_m != NULL && skip_m->object !=
object)
5408 npages = atop(pagesizes[m->psind]);
5415 for (i = 0; i < npages; i++) {
5417 if (m[i].
object !=
object)
5419 if (&m[i] == skip_m)
5430 if (m[i].dirty != VM_PAGE_BITS_ALL)
5434 m[i].valid != VM_PAGE_BITS_ALL)
5458 m->valid = VM_PAGE_BITS_ALL;
5484#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
5486vm_page_assert_locked_KBI(vm_page_t m,
const char *file,
int line)
5489 vm_page_lock_assert_KBI(m, MA_OWNED, file, line);
5493vm_page_lock_assert_KBI(vm_page_t m,
int a,
const char *file,
int line)
5502vm_page_object_busy_assert(vm_page_t m)
5514vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits)
5526 (
"PGA_WRITEABLE on unmanaged page"));
5534#include <sys/kernel.h>
5538DB_SHOW_COMMAND(page, vm_page_print_page_info)
5545 db_printf(
"vm_cnt.v_wire_count: %d\n", vm_wire_count());
5546 db_printf(
"vm_cnt.v_free_reserved: %d\n",
vm_cnt.v_free_reserved);
5547 db_printf(
"vm_cnt.v_free_min: %d\n",
vm_cnt.v_free_min);
5548 db_printf(
"vm_cnt.v_free_target: %d\n",
vm_cnt.v_free_target);
5549 db_printf(
"vm_cnt.v_inactive_target: %d\n",
vm_cnt.v_inactive_target);
5552DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
5559 "dom %d page_cnt %d free %d pq_act %d pq_inact %d pq_laund %d pq_unsw %d\n",
5562 vm_dom[dom].vmd_free_count,
5570DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
5573 boolean_t phys, virt;
5576 db_printf(
"show pginfo addr\n");
5580 phys = strchr(modif,
'p') != NULL;
5581 virt = strchr(modif,
'v') != NULL;
5587 m = (vm_page_t)addr;
5589 "page %p obj %p pidx 0x%jx phys 0x%jx q %d ref 0x%x\n"
5590 " af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
5591 m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
5592 m->a.queue, m->ref_count, m->a.flags, m->oflags,
5593 m->flags, m->a.act_count, m->busy_lock, m->valid, m->dirty);
struct vm_phys_seg vm_phys_segs[]
SYSCTL_PROC(_vm_memguard, OID_AUTO, desc, CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE, 0, 0, memguard_sysctl_desc, "A", "Short description of memory type to monitor")
void pmap_zero_page(vm_page_t)
void pmap_zero_page_area(vm_page_t, int off, int size)
void pmap_remove_all(vm_page_t m)
void pmap_remove_write(vm_page_t m)
void pmap_copy_page(vm_page_t, vm_page_t)
boolean_t pmap_is_modified(vm_page_t m)
void pmap_clear_modify(vm_page_t m)
vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int)
void pmap_page_init(vm_page_t m)
vm_page_t bq_pa[VM_BATCHQUEUE_SIZE]
struct vm_domain::vm_pgcache vmd_pgcache[VM_NFREEPOOL]
struct vm_page vmd_clock[2]
struct vm_page vmd_markers[PQ_COUNT]
struct mtx_padalign vmd_free_mtx
int vmd_pageout_pages_needed
u_int vmd_pageout_free_min
char vmd_name[sizeof(__XSTRING(MAXMEMDOM))]
struct vm_page vmd_inacthead
u_int vmd_pageout_deficit
u_int vmd_interrupt_free_min
struct vm_pagequeue vmd_pagequeues[PQ_COUNT]
struct mtx_padalign vmd_pageout_mtx
struct domainset_ref domain
struct vm_object::@0::@1 vnp
union vm_object::@0 un_pager
const char *const pq_name
static __inline void uma_zfree(uma_zone_t zone, void *item)
static __inline void * uma_zalloc(uma_zone_t zone, int flags)
void uma_zone_set_maxcache(uma_zone_t zone, int nitems)
uma_zone_t uma_zcache_create(const char *name, int size, uma_ctor ctor, uma_dtor dtor, uma_init zinit, uma_fini zfini, uma_import zimport, uma_release zrelease, void *arg, int flags)
uma_zone_t uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, uma_init uminit, uma_fini fini, int align, uint32_t flags)
int vm_domainset_iter_page(struct vm_domainset_iter *di, struct vm_object *obj, int *domain)
void vm_domainset_iter_page_init(struct vm_domainset_iter *di, struct vm_object *obj, vm_pindex_t pindex, int *domain, int *flags)
vm_paddr_t dump_avail[PHYS_AVAIL_COUNT]
#define dump_add_page(pa)
static bool vm_addr_bound_ok(vm_paddr_t pa, vm_paddr_t size, vm_paddr_t boundary)
u_int vm_active_count(void)
u_int vm_laundry_count(void)
u_int vm_inactive_count(void)
static bool vm_addr_align_ok(vm_paddr_t pa, u_long alignment)
struct vmmeter __read_mostly vm_cnt
u_int vm_free_count(void)
void vm_object_pip_wakeupn(vm_object_t object, short i)
void vm_object_pip_add(vm_object_t object, short i)
void vm_object_busy_wait(vm_object_t obj, const char *wmesg)
#define VM_OBJECT_ASSERT_UNLOCKED(object)
void vm_object_set_writeable_dirty(vm_object_t)
static bool vm_object_busied(vm_object_t object)
#define VM_OBJECT_RLOCK(object)
#define VM_OBJECT_ASSERT_BUSY(object)
#define VM_OBJECT_RUNLOCK(object)
#define VM_OBJECT_TRYWLOCK(object)
#define VM_OBJECT_ASSERT_LOCKED(object)
#define VM_OBJECT_WLOCK(object)
#define VM_OBJECT_WOWNED(object)
#define VM_OBJECT_WUNLOCK(object)
#define VM_OBJECT_ASSERT_WLOCKED(object)
static __inline bool vm_object_reserv(vm_object_t object)
#define VM_OBJECT_DROP(object)
static bool vm_page_pqstate_commit_request(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
void vm_page_xunbusy_hard(vm_page_t m)
int vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
static int vm_severe_waiters
vm_page_t vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count)
vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
static __always_inline bool vm_page_pqstate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa)
static void vm_page_grab_release(vm_page_t m, int allocflags)
void vm_page_activate(vm_page_t m)
static vm_paddr_t vm_page_blacklist_next(char **list, char *end)
SYSCTL_COUNTER_U64(_vm_stats_page, OID_AUTO, pqstate_commit_retries, CTLFLAG_RD, &pqstate_commit_retries, "Number of failed per-page atomic queue state updates")
void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
int vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count)
void vm_page_deactivate(vm_page_t m)
void vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags)
DPCPU_DEFINE_STATIC(struct vm_batchqueue, pqbatch[MAXMEMDOM][PQ_COUNT])
struct mtx_padalign __exclusive_cache_line pa_lock[PA_LOCK_COUNT]
static void vm_page_blacklist_load(char **list, char **end)
void vm_page_advise(vm_page_t m, int advice)
static void vm_page_bits_clear(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t clear)
void vm_page_xunbusy_hard_unchecked(vm_page_t m)
void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
static __always_inline vm_page_t _vm_page_alloc_noobj_domain(int domain, const int freelist, int req)
void vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
static void vm_page_zone_release(void *arg, void **store, int cnt)
vm_page_t vm_page_alloc_noobj(int req)
static void vm_page_unwire_managed(vm_page_t m, uint8_t nqueue, bool noreuse)
void vm_page_reference(vm_page_t m)
bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
static void vm_page_domain_init(int domain)
vm_page_t vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
void vm_page_launder(vm_page_t m)
void vm_page_unhold_pages(vm_page_t *ma, int count)
void vm_domain_set(struct vm_domain *vmd)
void vm_page_release_locked(vm_page_t m, int flags)
void vm_set_page_size(void)
int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags)
static vm_paddr_t vm_page_array_alloc(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t page_range)
static void vm_page_blacklist_check(char *list, char *end)
void vm_page_wire(vm_page_t m)
void vm_wait_severe(void)
int vm_page_is_valid(vm_page_t m, int base, int size)
vm_page_t vm_page_alloc_domain(vm_object_t object, vm_pindex_t pindex, int domain, int req)
vm_page_t vm_page_alloc_freelist(int freelist, int req)
bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
vm_page_t vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
int vm_wait_intr(vm_object_t obj)
void vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, vm_page_t mold)
bool vm_page_busy_acquire(vm_page_t m, int allocflags)
void vm_page_unswappable(vm_page_t m)
void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set)
void vm_page_putfake(vm_page_t m)
vm_page_t vm_page_prev(vm_page_t m)
vm_page_t vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
static bool _vm_page_pqstate_commit_dequeue(struct vm_pagequeue *pq, vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
domainset_t __exclusive_cache_line vm_min_domains
int vm_page_tryxbusy(vm_page_t m)
CTASSERT(powerof2(NRUNS))
static void vm_page_object_remove(vm_page_t m)
struct bitset * vm_page_dump
static int vm_wait_flags(vm_object_t obj, int mflags)
void vm_waitpfault(struct domainset *dset, int timo)
vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options)
static void vm_page_init_cache_zones(void *dummy __unused)
bool vm_page_remove(vm_page_t m)
static void vm_page_enqueue(vm_page_t m, uint8_t queue)
static int vm_min_waiters
static void vm_page_grab_check(int allocflags)
void vm_page_readahead_finish(vm_page_t m)
void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, const char *wmesg, int allocflags)
vm_page_t vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
vm_page_t vm_page_relookup(vm_object_t object, vm_pindex_t pindex)
static TAILQ_HEAD(SYSINIT(vm_page)
static void vm_pqbatch_process(struct vm_pagequeue *pq, struct vm_batchqueue *bq, uint8_t queue)
void vm_page_free_zero(vm_page_t m)
void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
static COUNTER_U64_DEFINE_EARLY(pqstate_commit_retries)
int vm_page_sbusied(vm_page_t m)
void vm_page_valid(vm_page_t m)
static int vm_page_grab_pflags(int allocflags)
vm_page_t vm_page_next(vm_page_t m)
static void vm_page_free_toq(vm_page_t m)
void vm_page_dirty_KBI(vm_page_t m)
u_int vm_wait_count(void)
vm_page_t vm_page_alloc_after(vm_object_t object, vm_pindex_t pindex, int req, vm_page_t mpred)
int vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
void vm_page_pqbatch_drain(void)
SYSINIT(vm_page2, SI_SUB_VM_CONF, SI_ORDER_ANY, vm_page_init_cache_zones, NULL)
static vm_page_t vm_page_find_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
static int _vm_domain_allocate(struct vm_domain *vmd, int req_class, int npages)
void vm_page_set_valid_range(vm_page_t m, int base, int size)
bool vm_page_wire_mapped(vm_page_t m)
void vm_domain_clear(struct vm_domain *vmd)
static bool vm_page_tryacquire(vm_page_t m, int allocflags)
bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m)
vm_page_bits_t vm_page_set_dirty(vm_page_t m)
static bool vm_page_pqstate_commit_dequeue(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
static vm_page_bits_t vm_page_bits_swap(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t newbits)
vm_page_t vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
static void vm_pqbatch_process_page(struct vm_pagequeue *pq, vm_page_t m, uint8_t queue)
static int vm_page_zone_import(void *arg, void **store, int cnt, int domain, int flags)
vm_page_t vm_page_alloc_freelist_domain(int domain, int freelist, int req)
bool vm_page_try_remove_all(vm_page_t m)
void vm_page_lock_KBI(vm_page_t m, const char *file, int line)
int vm_page_busy_tryupgrade(vm_page_t m)
static void vm_page_release_toq(vm_page_t m, uint8_t nqueue, const bool noreuse)
static bool vm_page_grab_sleep(vm_object_t object, vm_page_t m, vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked)
vm_page_bits_t vm_page_bits(int base, int size)
int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags)
static struct vm_pagequeue * _vm_page_pagequeue(vm_page_t m, uint8_t queue)
static int vm_page_reclaim_run(int req_class, int domain, u_long npages, vm_page_t m_run, vm_paddr_t high)
static bool _vm_page_pqstate_commit_requeue(struct vm_pagequeue *pq, vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
struct mtx_padalign __exclusive_cache_line vm_domainset_lock
void vm_page_set_invalid(vm_page_t m, int base, int size)
static int vm_domain_alloc_fail(struct vm_domain *vmd, vm_object_t object, int req)
static int vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex, vm_page_t mpred)
int vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
vm_page_t vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
void vm_page_set_validclean(vm_page_t m, int base, int size)
bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
void vm_page_invalid(vm_page_t m)
int vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
static bool vm_page_try_blocked_op(vm_page_t m, void(*op)(vm_page_t))
bool vm_page_unwire_noq(vm_page_t m)
void vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind)
vm_page_t vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex)
struct vm_domain vm_dom[MAXMEMDOM]
void vm_page_sunbusy(vm_page_t m)
static __always_inline void vm_page_mvqueue(vm_page_t m, const uint8_t nqueue, const uint16_t nflag)
void vm_wait_domain(int domain)
static __inline void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
bool vm_page_busy_sleep(vm_page_t m, const char *wmesg, int allocflags)
void vm_wait(vm_object_t obj)
void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
static void vm_page_busy_free(vm_page_t m)
static bool vm_page_replace_hold(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, vm_page_t mold)
void vm_page_busy_downgrade(vm_page_t m)
static bool vm_page_free_prep(vm_page_t m)
void vm_page_release(vm_page_t m, int flags)
static bool _vm_page_busy_sleep(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, const char *wmesg, int allocflags, bool locked)
void vm_page_test_dirty(vm_page_t m)
vm_offset_t vm_page_startup(vm_offset_t vaddr)
void vm_page_unwire(vm_page_t m, uint8_t nqueue)
static SYSCTL_NODE(_vm_stats, OID_AUTO, page, CTLFLAG_RD|CTLFLAG_MPSAFE, 0, "VM page statistics")
void vm_page_free_invalid(vm_page_t m)
static void vm_page_alloc_check(vm_page_t m)
void vm_page_dequeue(vm_page_t m)
bool vm_page_try_remove_write(vm_page_t m)
int vm_page_trysbusy(vm_page_t m)
static void vm_page_xunbusy_hard_tail(vm_page_t m)
static bool vm_page_acquire_unlocked(vm_object_t object, vm_pindex_t pindex, vm_page_t prev, vm_page_t *mp, int allocflags)
static int vm_pageproc_waiters
void vm_page_free(vm_page_t m)
static void vm_page_busy_release(vm_page_t m)
vm_page_t vm_page_alloc_noobj_domain(int domain, int req)
static bool vm_page_trybusy(vm_page_t m, int allocflags)
bool vm_page_remove_xbusy(vm_page_t m)
vm_page_t vm_page_grab_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags)
void vm_page_deactivate_noreuse(vm_page_t m)
void vm_page_clear_dirty(vm_page_t m, int base, int size)
bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
int vm_wait_doms(const domainset_t *wdoms, int mflags)
domainset_t __exclusive_cache_line vm_severe_domains
void vm_page_dequeue_deferred(vm_page_t m)
vm_page_t vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, int req, vm_page_t mpred)
static void vm_page_aflag_set(vm_page_t m, uint16_t bits)
#define vm_page_assert_xbusied(m)
static bool vm_page_in_laundry(vm_page_t m)
static bool vm_page_all_valid(vm_page_t m)
#define vm_page_xunbusy_unchecked(m)
#define VM_ALLOC_COUNT(count)
#define vm_page_assert_xbusied_unchecked(m)
#define vm_page_busy_freed(m)
#define vm_page_busy_fetch(m)
#define vm_page_assert_busied(m)
#define VM_ALLOC_INTERRUPT
#define VM_PAGE_OBJECT_BUSY_ASSERT(m)
static bool vm_page_none_valid(vm_page_t m)
#define vm_page_lockptr(m)
static bool vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
static __inline void vm_page_undirty(vm_page_t m)
#define vm_page_busied(m)
#define VM_ALLOC_IGN_SBUSY
static u_int vm_page_drop(vm_page_t m, u_int val)
#define vm_page_assert_unbusied(m)
#define VPB_CURTHREAD_EXCLUSIVE
#define VM_ALLOC_NORECLAIM
static uint8_t vm_page_queue(vm_page_t m)
#define vm_page_assert_sbusied(m)
#define VPB_SHARERS_WORD(x)
#define VM_ALLOC_CLASS_MASK
static vm_page_astate_t vm_page_astate_load(vm_page_t m)
#define VM_ALLOC_WAITFAIL
static int vm_page_domain(vm_page_t m)
#define VPRC_WIRE_COUNT_MAX
static void vm_page_aflag_clear(vm_page_t m, uint16_t bits)
#define vm_page_xbusy_claim(m)
#define VPRC_WIRE_COUNT(c)
#define vm_page_xunbusy(m)
#define PGA_QUEUE_STATE_MASK
static __inline void vm_page_dirty(vm_page_t m)
#define VM_ALLOC_COUNT_SHIFT
static bool vm_page_wired(vm_page_t m)
#define VM_PAGE_TO_PHYS(entry)
#define PGA_QUEUE_OP_MASK
#define vm_page_xbusied(m)
void pagedaemon_wakeup(int domain)
static int vm_paging_needed(struct vm_domain *vmd, u_int free_count)
#define vm_domain_free_unlock(d)
static bool vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
#define vm_pagequeue_assert_locked(pq)
static struct vm_domain * vm_pagequeue_domain(vm_page_t m)
#define vm_domain_free_lock(d)
static void vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
static int vm_paging_severe(struct vm_domain *vmd)
#define vm_pagequeue_cnt_dec(pq)
#define vm_pagequeue_unlock(pq)
static void vm_batchqueue_init(struct vm_batchqueue *bq)
#define vm_pagequeue_lock(pq)
static int vm_paging_min(struct vm_domain *vmd)
struct vm_pagequeue vmd_pagequeues[PQ_COUNT]
#define vm_domain_free_assert_unlocked(n)
#define vm_pagequeue_cnt_inc(pq)
void vm_phys_early_startup(void)
vm_page_t vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
vm_page_t vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
vm_page_t vm_phys_scan_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, int options)
int vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
vm_paddr_t vm_phys_avail_size(int i)
void vm_phys_enqueue_contig(vm_page_t m, u_long npages)
void vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
void vm_phys_free_pages(vm_page_t m, int order)
vm_page_t vm_phys_alloc_freelist_pages(int domain, int freelist, int pool, int order)
boolean_t vm_phys_unfree_page(vm_page_t m)
vm_page_t vm_phys_alloc_pages(int domain, int pool, int order)
int vm_phys_avail_largest(void)
vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa)
int vm_radix_insert(struct vm_radix *rtree, vm_page_t page)
vm_page_t vm_radix_lookup_ge(struct vm_radix *rtree, vm_pindex_t index)
vm_page_t vm_radix_replace(struct vm_radix *rtree, vm_page_t newpage)
vm_page_t vm_radix_remove(struct vm_radix *rtree, vm_pindex_t index)
vm_page_t vm_radix_lookup_unlocked(struct vm_radix *rtree, vm_pindex_t index)
vm_page_t vm_radix_lookup(struct vm_radix *rtree, vm_pindex_t index)
vm_page_t vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)