74#include <sys/blockcount.h>
75#include <sys/cpuset.h>
76#include <sys/limits.h>
80#include <sys/kernel.h>
81#include <sys/pctrie.h>
82#include <sys/sysctl.h>
85#include <sys/refcount.h>
86#include <sys/socket.h>
87#include <sys/resourcevar.h>
88#include <sys/refcount.h>
89#include <sys/rwlock.h>
92#include <sys/vmmeter.h>
114 "Use old (insecure) msync behavior");
117 int pagerflags,
int flags, boolean_t *allclean,
120 boolean_t *allclean);
154static SYSCTL_NODE(_vm_stats, OID_AUTO,
object, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
160 "VM object collapses");
165 "VM object bypasses");
169 &object_collapse_waits,
170 "Number of sleeps for collapse");
177static void vm_object_zdtor(
void *mem,
int size,
void *arg);
180vm_object_zdtor(
void *mem,
int size,
void *arg)
186 (
"object %p ref_count = %d",
object, object->
ref_count));
187 KASSERT(TAILQ_EMPTY(&object->
memq),
188 (
"object %p has resident pages in its memq",
object));
190 (
"object %p has resident pages in its trie",
object));
191#if VM_NRESERVLEVEL > 0
192 KASSERT(LIST_EMPTY(&object->rvq),
193 (
"object %p has reservations",
197 (
"object %p busy = %d",
object, blockcount_read(&object->
busy)));
199 (
"object %p resident_page_count = %d",
202 (
"object %p shadow_count = %d",
205 (
"object %p has non-dead type %d",
206 object, object->
type));
216 rw_init_flags(&object->
lock,
"vm object", RW_DUPOK | RW_NEW);
223 blockcount_init(&object->
busy);
224 object->resident_page_count = 0;
239 TAILQ_INIT(&object->
memq);
240 LIST_INIT(&object->shadow_head);
243 object->flags =
flags;
252 atomic_thread_fence_rel();
254 object->pg_color = 0;
256 object->domain.dr_policy = NULL;
257 object->generation = 1;
258 object->cleangeneration = 1;
260 object->memattr = VM_MEMATTR_DEFAULT;
264 object->backing_object = NULL;
265 object->backing_object_offset = (vm_ooffset_t) 0;
266#if VM_NRESERVLEVEL > 0
267 LIST_INIT(&object->rvq);
286#if VM_NRESERVLEVEL > 0
288 kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
316 object->flags &= ~bits;
336 if (!TAILQ_EMPTY(&object->
memq))
376 waitid, PVM | PDROP);
411 panic(
"vm_object_allocate: can't create OBJT_DEAD");
432 panic(
"vm_object_allocate: type %d is undefined or dynamic",
489 if (!refcount_acquire_if_gt(&object->
ref_count, 0)) {
491 old = refcount_acquire(&object->
ref_count);
515 (
"vm_object_reference: Referenced dead object."));
531 old = refcount_acquire(&object->
ref_count);
535 (
"vm_object_reference: Referenced dead object."));
544 struct vnode *vp = (
struct vnode *) object->
handle;
548 (
"vm_object_deallocate_vnode: not a vnode object"));
549 KASSERT(vp != NULL, (
"vm_object_deallocate_vnode: missing vp"));
552 last = refcount_release(&object->
ref_count);
576 object = LIST_FIRST(&backing_object->shadow_head);
577 KASSERT(
object != NULL &&
579 (
"vm_object_anon_deallocate: ref_count: %d, shadow_count: %d",
583 (
"invalid shadow object %p",
object));
601 !refcount_acquire_if_not_zero(&object->
ref_count)) {
606 if (backing_object != NULL && (backing_object->
flags &
OBJ_ANON) != 0)
630 while (
object != NULL) {
639 released = refcount_release_if_gt(&object->
ref_count, 1);
641 released = refcount_release_if_gt(&object->
ref_count, 2);
656 (
"vm_object_deallocate: object deallocated too many times: %d",
663 if (!refcount_release(&object->
ref_count)) {
688 (
"shadowed tmpfs v_object 2 %p",
object));
693 (
"vm_object_deallocate: Terminating dead object."));
711 if (object->
cred != NULL) {
714 crfree(object->
cred);
728 (
"object %p sub_shadow count zero",
object));
742 (
"vm_object_backing_remove: Removing collapsing object."));
746 LIST_REMOVE(
object, shadow_list);
747 object->flags &= ~OBJ_SHADOWLIST;
749 object->backing_object = NULL;
765 object->backing_object = NULL;
779 LIST_INSERT_HEAD(&backing_object->shadow_head,
object,
783 object->backing_object = backing_object;
797 object->backing_object = backing_object;
815 (
"shadowing dead anonymous object"));
823 object->backing_object = backing_object;
841 if (new_backing_object == NULL)
854 object->backing_object = new_backing_object;
870 counter_u64_add(object_collapse_waits, 1);
887 if (backing_object == NULL ||
895 counter_u64_add(object_collapse_waits, 1);
898 return (backing_object);
918 TAILQ_FOREACH_SAFE(p, &object->
memq, listq, p_next) {
920 KASSERT(p->object ==
object &&
922 (
"vm_object_terminate_pages: page %p is inconsistent", p));
938 TAILQ_INIT(&object->
memq);
939 object->resident_page_count = 0;
958 (
"terminating non-dead obj %p",
object));
960 (
"terminating collapsing obj %p",
object));
962 (
"terminating shadow obj %p",
object));
974 (
"vm_object_terminate: object with references, ref_count=%d",
980#if VM_NRESERVLEVEL > 0
981 if (__predict_false(!LIST_EMPTY(&object->rvq)))
982 vm_reserv_break_all(
object);
987 (
"%s: non-swap obj %p has cred", __func__,
object));
1020 return (p->dirty != 0);
1051 vm_pindex_t pi, tend, tstart;
1052 int curgeneration, n, pagerflags;
1053 boolean_t eio, res, allclean;
1066 allclean = tstart == 0 && tend >=
object->size;
1070 curgeneration =
object->generation;
1076 np = TAILQ_NEXT(p, listq);
1092 flags, &allclean, &eio);
1133 object->cleangeneration = curgeneration;
1139 int flags, boolean_t *allclean, boolean_t *eio)
1142 int count, i, mreq, runlen;
1173 for (tp = p_first, i = 0; i < count; tp = TAILQ_NEXT(tp, listq), i++)
1196 boolean_t syncio, boolean_t invalidate)
1201 int error, flags, fsync_after;
1211 offset +=
object->backing_object_offset;
1213 object = backing_object;
1231 ((vp = object->
handle)->v_vflag & VV_NOSYNC) == 0) {
1233 (void) vn_start_write(vp, &mp, V_WAIT);
1234 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1235 if (syncio && !invalidate && offset == 0 &&
1236 atop(size) == object->
size) {
1246 flags = (syncio || invalidate) ?
OBJPC_SYNC : 0;
1248 fsync_after = FALSE;
1255 error = VOP_FSYNC(vp, MNT_WAIT, curthread);
1257 vn_finished_write(mp);
1276 OFF_TO_IDX(offset + size + PAGE_MASK), flags);
1294 if (advice != MADV_FREE)
1305 if (advice == MADV_FREE)
1334 vm_pindex_t tpindex;
1356 if (m == NULL || pindex < m->pindex) {
1363 tpindex = (m != NULL && m->pindex < end) ?
1366 pindex, tpindex - pindex);
1367 if ((pindex = tpindex) == end)
1381 if (backing_object == NULL)
1386 if (tobject !=
object)
1388 tobject = backing_object;
1396 m = TAILQ_NEXT(m, listq);
1406 (
"vm_object_madvise: page %p is fictitious", tm));
1408 (
"vm_object_madvise: page %p is not managed", tm));
1410 if (
object != tobject)
1412 if (advice == MADV_WILLNEED) {
1428 if (tobject !=
object)
1446 struct ucred *cred,
bool shared)
1461 if (source != NULL && source->
ref_count == 1 &&
1476 if (shared || source != NULL) {
1497 if (source != NULL) {
1500#if VM_NRESERVLEVEL > 0
1503 OFF_TO_IDX(*offset)) & ((1 << (VM_NFREEORDER -
1527 vm_page_t m, m_busy, m_next;
1528 vm_object_t orig_object, new_object, backing_object;
1529 vm_pindex_t idx, offidxstart;
1534 (
"vm_object_split: Splitting object with multiple mappings."));
1542 size = atop(entry->
end - entry->
start);
1549 orig_object->
cred, ptoa(size));
1567 if (backing_object != NULL) {
1572 if (orig_object->
cred != NULL) {
1573 crhold(orig_object->
cred);
1574 KASSERT(orig_object->
charge >= ptoa(size),
1575 (
"orig_object->charge < 0"));
1576 orig_object->
charge -= ptoa(size);
1590 KASSERT(m == NULL || idx <= m->pindex - offidxstart,
1591 (
"%s: object %p was repopulated", __func__, orig_object));
1592 for (; m != NULL && (idx = m->pindex - offidxstart) < size;
1594 m_next = TAILQ_NEXT(m, listq);
1632#if VM_NRESERVLEVEL > 0
1645 vm_reserv_rename(m, new_object, orig_object, offidxstart);
1654 else if (m_busy == NULL)
1664 TAILQ_FOREACH_FROM(m_busy, &new_object->
memq, listq)
1685 KASSERT(p == NULL || p->object ==
object || p->object == backing_object,
1686 (
"invalid ownership %p %p %p", p,
object, backing_object));
1693 }
else if (p->object ==
object) {
1704 return (TAILQ_FIRST(&backing_object->
memq));
1712 vm_pindex_t backing_offset_index, new_pindex, pi, ps;
1731 if (p != NULL && p->pindex < pi)
1732 p = TAILQ_NEXT(p, listq);
1735 if (p == NULL && ps >= backing_object->
size)
1740 pi = MIN(p->pindex, ps);
1742 new_pindex = pi - backing_offset_index;
1743 if (new_pindex >= object->
size)
1798 vm_page_t next, p, pp;
1799 vm_pindex_t backing_offset_index, new_pindex;
1810 for (p = TAILQ_FIRST(&backing_object->
memq); p != NULL; p = next) {
1811 next = TAILQ_NEXT(p, listq);
1812 new_pindex = p->pindex - backing_offset_index;
1823 (
"vm_object_collapse_scan: backing object mismatch %p != %p",
1825 KASSERT(p->object == backing_object,
1826 (
"vm_object_collapse_scan: object mismatch %p != %p",
1827 p->object, backing_object));
1829 if (p->pindex < backing_offset_index ||
1830 new_pindex >= object->
size) {
1833 KASSERT(!pmap_page_is_mapped(p),
1834 (
"freeing mapped page %p", p));
1841 KASSERT(!pmap_page_is_mapped(p),
1842 (
"freeing mapped page %p", p));
1881 KASSERT(!pmap_page_is_mapped(p),
1882 (
"freeing mapped page %p", p));
1905 backing_offset_index, 1);
1907#if VM_NRESERVLEVEL > 0
1911 vm_reserv_rename(p,
object, backing_object,
1912 backing_offset_index);
1935 (
"collapsing invalid object"));
1943 if (backing_object == NULL)
1948 (
"collapse with invalid ref %d or shadow %d count.",
1950 KASSERT((backing_object->
flags &
1952 (
"vm_object_collapse: Backing object already collapsing."));
1954 (
"vm_object_collapse: object is already collapsing."));
1965 (
"vm_object_collapse: shadow_count: %d",
1978#if VM_NRESERVLEVEL > 0
1982 if (__predict_false(!LIST_EMPTY(&backing_object->rvq)))
1983 vm_reserv_break_all(backing_object);
2009 object->backing_object_offset +=
2021 KASSERT(backing_object->
ref_count == 1, (
2022"backing_object %p was somehow re-referenced during collapse!",
2025 (void)refcount_release(&backing_object->
ref_count);
2027 counter_u64_add(object_collapses, 1);
2049 if (new_backing_object != NULL) {
2051 new_backing_object);
2052 object->backing_object_offset +=
2060 (void)refcount_release(&backing_object->
ref_count);
2061 KASSERT(backing_object->
ref_count >= 1, (
2062"backing_object %p was somehow dereferenced during collapse!",
2065 counter_u64_add(object_bypasses, 1);
2109 (
"vm_object_page_remove: illegal options for object %p",
object));
2120 for (; p != NULL && (p->pindex < end || end == 0); p = next) {
2121 next = TAILQ_NEXT(p, listq);
2168 (
"vm_object_page_remove: page %p is fictitious", p));
2175 if (p->dirty != 0) {
2214 (
"vm_object_page_noreuse: illegal object %p",
object));
2223 for (; p != NULL && (p->pindex < end || end == 0); p = next) {
2224 next = TAILQ_NEXT(p, listq);
2247 for (pindex = start; pindex < end; pindex++) {
2257 if (pindex > start) {
2259 while (m != NULL && m->pindex < pindex) {
2261 m = TAILQ_NEXT(m, listq);
2264 return (pindex == end);
2290 vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
2292 vm_pindex_t next_pindex;
2294 if (prev_object == NULL)
2315 prev_size >>= PAGE_SHIFT;
2316 next_size >>= PAGE_SHIFT;
2317 next_pindex =
OFF_TO_IDX(prev_offset) + prev_size;
2320 prev_object->
size != next_pindex &&
2329 if (prev_object->
cred != NULL) {
2341 prev_object->
cred)) {
2345 prev_object->
charge += ptoa(next_size);
2352 if (next_pindex < prev_object->size) {
2356 if (prev_object->
cred != NULL) {
2357 KASSERT(prev_object->
charge >=
2358 ptoa(prev_object->
size - next_pindex),
2359 (
"object %p overcharged 1 %jx %jx", prev_object,
2360 (uintmax_t)next_pindex, (uintmax_t)next_size));
2361 prev_object->
charge -= ptoa(prev_object->
size -
2370 if (next_pindex + next_size > prev_object->
size)
2371 prev_object->
size = next_pindex + next_size;
2403 vm_pindex_t end_pindex, pindex, tpindex;
2404 int depth, locked_depth;
2406 KASSERT((offset & PAGE_MASK) == 0,
2407 (
"vm_object_unwire: offset is not page aligned"));
2408 KASSERT((length & PAGE_MASK) == 0,
2409 (
"vm_object_unwire: length is not a multiple of PAGE_SIZE"));
2414 end_pindex = pindex + atop(length);
2419 while (pindex < end_pindex) {
2420 if (m == NULL || pindex < m->pindex) {
2433 KASSERT(tobject != NULL,
2434 (
"vm_object_unwire: missing page"));
2438 if (depth == locked_depth) {
2446 m = TAILQ_NEXT(m, listq);
2449 for (tobject =
object; locked_depth >= 1;
2452 if (tm->object != tobject)
2456 tobject = tm->object;
2468 for (tobject =
object; locked_depth >= 1; locked_depth--) {
2501 blockcount_acquire(&obj->
busy, 1);
2503 atomic_thread_fence_acq_rel();
2510 blockcount_release(&obj->
busy, 1);
2519 (void)blockcount_sleep(&obj->
busy, NULL, wmesg, PVM);
2540 struct kinfo_vmobject *kvo;
2541 char *fullpath, *freepath;
2549 if (req->oldptr == NULL) {
2562 return (SYSCTL_OUT(req, NULL,
sizeof(
struct kinfo_vmobject) *
2566 kvo = malloc(
sizeof(*kvo), M_TEMP, M_WAITOK);
2586 kvo->kvo_size = ptoa(obj->size);
2587 kvo->kvo_resident = obj->resident_page_count;
2588 kvo->kvo_ref_count = obj->ref_count;
2589 kvo->kvo_shadow_count = atomic_load_int(&obj->shadow_count);
2590 kvo->kvo_memattr = obj->memattr;
2591 kvo->kvo_active = 0;
2592 kvo->kvo_inactive = 0;
2594 TAILQ_FOREACH(m, &obj->memq, listq) {
2607 kvo->kvo_inactive++;
2611 kvo->kvo_vn_fileid = 0;
2612 kvo->kvo_vn_fsid = 0;
2613 kvo->kvo_vn_fsid_freebsd11 = 0;
2620 }
else if ((obj->flags &
OBJ_ANON) != 0) {
2621 MPASS(kvo->kvo_type == KVME_TYPE_DEFAULT ||
2622 kvo->kvo_type == KVME_TYPE_SWAP);
2623 kvo->kvo_me = (uintptr_t)obj;
2625 kvo->kvo_backing_obj = (uintptr_t)obj->backing_object;
2627 kvo->kvo_swapped = sp > UINT32_MAX ? UINT32_MAX : sp;
2631 vn_fullpath(vp, &fullpath, &freepath);
2632 vn_lock(vp, LK_SHARED | LK_RETRY);
2633 if (VOP_GETATTR(vp, &va, curthread->td_ucred) == 0) {
2634 kvo->kvo_vn_fileid = va.va_fileid;
2635 kvo->kvo_vn_fsid = va.va_fsid;
2636 kvo->kvo_vn_fsid_freebsd11 = va.va_fsid;
2642 strlcpy(kvo->kvo_path, fullpath,
sizeof(kvo->kvo_path));
2643 if (freepath != NULL)
2644 free(freepath, M_TEMP);
2647 kvo->kvo_structsize = offsetof(
struct kinfo_vmobject, kvo_path)
2648 + strlen(kvo->kvo_path) + 1;
2649 kvo->kvo_structsize = roundup(kvo->kvo_structsize,
2651 error = SYSCTL_OUT(req, kvo, kvo->kvo_structsize);
2668SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP |
2670 "List of VM objects");
2686 CTLTYPE_STRUCT | CTLFLAG_RW | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL, 0,
2688 "List of swap VM objects");
2692#include <sys/kernel.h>
2694#include <sys/cons.h>
2710 if (_vm_object_in_map(map,
object, tmpe)) {
2717 if (_vm_object_in_map(tmpm,
object, tmpe)) {
2723 if (obj ==
object) {
2736 FOREACH_PROC_IN_SYSTEM(p) {
2739 if (_vm_object_in_map(&p->p_vmspace->vm_map,
object, 0)) {
2745 if (_vm_object_in_map(
kernel_map,
object, 0))
2750DB_SHOW_COMMAND(vmochk, vm_object_check)
2761 db_printf(
"vmochk: internal obj has zero ref count: %ld\n",
2762 (
long)object->
size);
2764 if (!vm_object_in_map(
object)) {
2766 "vmochk: internal obj is not in a map: "
2767 "ref: %d, size: %lu: 0x%lx, backing_object: %p\n",
2769 (u_long)object->
size,
2781DB_SHOW_COMMAND(
object, vm_object_print_static)
2785 boolean_t full = have_addr;
2790#define count was_count
2798 "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x ruid %d charge %jx\n",
2799 object, (
int)object->
type, (uintmax_t)object->
size,
2801 object->
cred ? object->
cred->cr_ruid : -1, (uintmax_t)object->
charge);
2802 db_iprintf(
" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
2812 TAILQ_FOREACH(p, &object->
memq, listq) {
2814 db_iprintf(
"memory:=");
2815 else if (count == 6) {
2823 db_printf(
"(off=0x%jx,page=0x%jx)",
2841 boolean_t have_addr,
2845 vm_object_print_static(addr, have_addr, count, modif);
2848DB_SHOW_COMMAND(vmopag, vm_object_print_pages)
2853 vm_page_t m, prev_m;
2857 db_printf(
"new object: %p\n", (
void *)
object);
2864 TAILQ_FOREACH(m, &object->
memq, listq) {
2865 if (m->pindex > 128)
2867 if ((prev_m = TAILQ_PREV(m, pglist, listq)) != NULL &&
2868 prev_m->pindex + 1 != m->pindex) {
2870 db_printf(
" index(%ld)run(%d)pa(0x%lx)\n",
2871 (
long)fidx, rcount, (
long)pa);
2883 db_printf(
" index(%ld)run(%d)pa(0x%lx)\n",
2884 (
long)fidx, rcount, (
long)pa);
2893 db_printf(
" index(%ld)run(%d)pa(0x%lx)\n",
2894 (
long)fidx, rcount, (
long)pa);
void pmap_remove_all(vm_page_t m)
void pmap_remove_write(vm_page_t m)
union vm_map_object object
vm_ooffset_t backing_object_offset
struct vm_object::@0::@4 swp
struct domainset_ref domain
blockcount_t paging_in_progress
union vm_object::@0 un_pager
struct vm_object * backing_object
static __inline void uma_zfree(uma_zone_t zone, void *item)
static __inline void * uma_zalloc(uma_zone_t zone, int flags)
uma_zone_t uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, uma_init uminit, uma_fini fini, int align, uint32_t flags)
struct vm_object * vm_object
struct vm_object * vm_object_t
#define VM_MAP_ENTRY_FOREACH(it, map)
#define MAP_ENTRY_IS_SUB_MAP
static vm_page_t vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p)
static uma_zone_t obj_zone
static vm_object_t vm_object_deallocate_anon(vm_object_t backing_object)
static void vm_object_deallocate_vnode(vm_object_t object)
void vm_object_destroy(vm_object_t object)
void vm_object_split(vm_map_entry_t entry)
static void vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, vm_size_t size)
void vm_object_init(void)
void vm_object_reference(vm_object_t object)
static void vm_object_pip_sleep(vm_object_t object, const char *waitid)
static void vm_object_terminate_pages(vm_object_t object)
void vm_object_pip_wakeupn(vm_object_t object, short i)
void vm_object_set_writeable_dirty_(vm_object_t object)
boolean_t vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, boolean_t syncio, boolean_t invalidate)
vm_object_t vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object, struct ucred *cred, vm_size_t charge)
static int sysctl_vm_object_list_swap(SYSCTL_HANDLER_ARGS)
void vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, uint8_t queue)
struct mtx vm_object_list_mtx
static vm_object_t vm_object_backing_collapse_wait(vm_object_t object)
static void vm_object_backing_transfer(vm_object_t object, vm_object_t backing_object)
static void vm_object_reference_vnode(vm_object_t object)
int vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr)
void vm_object_pip_wait(vm_object_t object, const char *waitid)
static void vm_object_backing_remove(vm_object_t object)
static int sysctl_vm_object_list(SYSCTL_HANDLER_ARGS)
static COUNTER_U64_DEFINE_EARLY(object_collapses)
static void vm_object_sub_shadow(vm_object_t object)
static bool vm_object_scan_all_shadowed(vm_object_t object)
static int vm_object_list_handler(struct sysctl_req *req, bool swap_only)
void vm_object_collapse(vm_object_t object)
void vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
static int vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, int flags, boolean_t *allclean, boolean_t *eio)
void vm_object_pip_wait_unlocked(vm_object_t object, const char *waitid)
void vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int options)
SYSCTL_COUNTER_U64(_vm_stats_object, OID_AUTO, collapses, CTLFLAG_RD, &object_collapses, "VM object collapses")
void vm_object_busy(vm_object_t obj)
void vm_object_pip_add(vm_object_t object, short i)
static void vm_object_backing_remove_locked(vm_object_t object)
void vm_object_pip_wakeup(vm_object_t object)
void vm_object_reference_locked(vm_object_t object)
vm_object_t vm_object_allocate_dyn(objtype_t dyntype, vm_pindex_t size, u_short flags)
void vm_object_clear_flag(vm_object_t object, u_short bits)
static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD|CTLFLAG_MPSAFE, 0, "VM object stats")
bool vm_object_is_active(vm_object_t obj)
void vm_object_terminate(vm_object_t object)
vm_object_t vm_object_allocate(objtype_t type, vm_pindex_t size)
static int vm_object_zinit(void *mem, int size, int flags)
struct vnode * vm_object_vnode(vm_object_t object)
static void vm_object_backing_insert_ref(vm_object_t object, vm_object_t backing_object)
static boolean_t vm_object_page_remove_write(vm_page_t p, int flags, boolean_t *allclean)
void vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, int advice)
boolean_t vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
static void vm_object_backing_insert_locked(vm_object_t object, vm_object_t backing_object)
static void vm_object_backing_insert(vm_object_t object, vm_object_t backing_object)
boolean_t vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, int flags)
SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, "Use old (insecure) msync behavior")
void vm_object_busy_wait(vm_object_t obj, const char *wmesg)
boolean_t vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
static void vm_object_collapse_wait(vm_object_t object)
static void _vm_object_allocate(objtype_t type, vm_pindex_t size, u_short flags, vm_object_t object, void *handle)
bool vm_object_mightbedirty_(vm_object_t object)
SYSCTL_PROC(_vm, OID_AUTO, objects, CTLTYPE_STRUCT|CTLFLAG_RW|CTLFLAG_SKIP|CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_object_list, "S,kinfo_vmobject", "List of VM objects")
struct vm_object kernel_object_store
static bool vm_object_advice_applies(vm_object_t object, int advice)
struct object_q vm_object_list
static void vm_object_collapse_scan(vm_object_t object)
void vm_object_deallocate(vm_object_t object)
void vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length, struct ucred *cred, bool shared)
void vm_object_unbusy(vm_object_t obj)
#define VM_OBJECT_ASSERT_UNLOCKED(object)
static bool vm_object_busied(vm_object_t object)
#define VM_OBJECT_RLOCK(object)
void umtx_shm_object_terminated(vm_object_t object)
int vm_object_kvme_type(vm_object_t object, struct vnode **vpp)
#define VM_OBJECT_RUNLOCK(object)
int umtx_shm_vnobj_persistent
bool vm_object_mightbedirty(vm_object_t object)
#define VM_OBJECT_TRYWLOCK(object)
#define VM_OBJECT_ASSERT_LOCKED(object)
#define VM_OBJECT_WLOCK(object)
static __inline void vm_object_set_flag(vm_object_t object, u_short bits)
void umtx_shm_object_init(vm_object_t object)
#define VM_OBJECT_WUNLOCK(object)
#define VM_OBJECT_ASSERT_WLOCKED(object)
void vm_object_print(long addr, boolean_t have_addr, long count, char *modif)
void vm_page_advise(vm_page_t m, int advice)
vm_page_t vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
bool vm_page_busy_acquire(vm_page_t m, int allocflags)
vm_page_t vm_page_prev(vm_page_t m)
int vm_page_tryxbusy(vm_page_t m)
bool vm_page_remove(vm_page_t m)
vm_page_t vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
vm_page_t vm_page_next(vm_page_t m)
bool vm_page_try_remove_all(vm_page_t m)
int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags)
void vm_page_invalid(vm_page_t m)
int vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
void vm_page_sunbusy(vm_page_t m)
bool vm_page_busy_sleep(vm_page_t m, const char *wmesg, int allocflags)
void vm_page_unwire(vm_page_t m, uint8_t nqueue)
bool vm_page_try_remove_write(vm_page_t m)
int vm_page_trysbusy(vm_page_t m)
void vm_page_free(vm_page_t m)
void vm_page_deactivate_noreuse(vm_page_t m)
static void vm_page_aflag_set(vm_page_t m, uint16_t bits)
#define vm_page_assert_xbusied(m)
static bool vm_page_all_valid(vm_page_t m)
#define vm_page_assert_busied(m)
static bool vm_page_none_valid(vm_page_t m)
static __inline void vm_page_undirty(vm_page_t m)
#define VM_ALLOC_IGN_SBUSY
static u_int vm_page_drop(vm_page_t m, u_int val)
#define vm_page_assert_unbusied(m)
#define VM_ALLOC_WAITFAIL
#define vm_page_xunbusy(m)
#define vm_page_lock_assert(m, a)
static bool vm_page_wired(vm_page_t m)
#define VM_PAGE_TO_PHYS(entry)
int vm_pageout_page_count
int vm_pageout_flush(vm_page_t *mc, int count, int flags, int mreq, int *prunlen, boolean_t *eio)
#define KERN_INVALID_ARGUMENT
void vm_radix_reclaim_allnodes(struct vm_radix *rtree)
void vm_radix_zinit(void)
static __inline void vm_radix_init(struct vm_radix *rtree)
static __inline boolean_t vm_radix_is_empty(struct vm_radix *rtree)