73#include <sys/kernel.h>
78#include <sys/vmmeter.h>
82#include <sys/resourcevar.h>
83#include <sys/rwlock.h>
85#include <sys/sysctl.h>
86#include <sys/sysent.h>
141 vm_object_t object, vm_pindex_t pindex, vm_size_t size,
int flags);
143static void vmspace_zdtor(
void *mem,
int size,
void *arg);
149 vm_offset_t failed_addr);
151#define ENTRY_CHARGED(e) ((e)->cred != NULL || \
152 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
153 !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
159#define PROC_VMSPACE_LOCK(p) do { } while (0)
160#define PROC_VMSPACE_UNLOCK(p) do { } while (0)
168#define VM_MAP_RANGE_CHECK(map, start, end) \
170 if (start < vm_map_min(map)) \
171 start = vm_map_min(map); \
172 if (end > vm_map_max(map)) \
173 end = vm_map_max(map); \
178#ifndef UMA_MD_SMALL_ALLOC
201 panic(
"%s: kernel map is exhausted", __func__);
205 panic(
"%s: vm_map_insert() failed: %d", __func__, error);
209 M_USE_RESERVE | (wait & M_ZERO));
211 return ((
void *)addr);
232 addr = (vm_offset_t)item;
236 (
"%s: vm_map_remove failed: %d", __func__, error));
243#define KMAPENT_RESERVE 1
261 mtx_init(&
map_sleep_mtx,
"vm map sleep mutex", NULL, MTX_DEF);
270#ifndef UMA_MD_SMALL_ALLOC
298 memset(map, 0,
sizeof(*map));
299 mtx_init(&map->
system_mtx,
"vm map (system)", NULL,
300 MTX_DEF | MTX_DUPOK);
301 sx_init(&map->
lock,
"vm map (user)");
308vmspace_zdtor(
void *mem,
int size,
void *arg)
316 (
"vmspace %p size == %ju on free", vm, (uintmax_t)vm->
vm_map.
size));
330 KASSERT(vm->
vm_map.
pmap == NULL, (
"vm_map.pmap must be NULL"));
335 CTR1(KTR_VM,
"vmspace_alloc: %p", vm);
351vmspace_container_reset(
struct proc *p)
355 racct_set(p, RACCT_DATA, 0);
356 racct_set(p, RACCT_STACK, 0);
357 racct_set(p, RACCT_RSS, 0);
358 racct_set(p, RACCT_MEMLOCK, 0);
359 racct_set(p, RACCT_VMEM, 0);
368 CTR1(KTR_VM,
"vmspace_free: %p", vm);
393 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
394 "vmspace_free() called");
409 KASSERT(vm == &vmspace0, (
"vmspace_exitfree: wrong vmspace"));
430 refcount_acquire(&vmspace0.vm_refcnt);
431 if (!(released = refcount_release_if_last(&vm->
vm_refcnt))) {
432 if (p->p_vmspace != &vmspace0) {
434 p->p_vmspace = &vmspace0;
438 released = refcount_release(&vm->
vm_refcnt);
445 if (p->p_vmspace != vm) {
453 p->p_vmspace = &vmspace0;
460 vmspace_container_reset(p);
473 if (vm == NULL || !refcount_acquire_if_not_zero(&vm->
vm_refcnt)) {
477 if (vm != p->p_vmspace) {
508 KASSERT(refcount_load(&newvm->
vm_refcnt) > 0,
509 (
"vmspace_switch_aio: newvm unreferenced"));
511 oldvm = curproc->p_vmspace;
518 curproc->p_vmspace = newvm;
532 mtx_lock_flags_(&map->
system_mtx, 0, file, line);
534 sx_xlock_(&map->
lock, file, line);
548 (
"Submap with execs"));
550 KASSERT(
object != NULL, (
"No object for text, entry %p", entry));
552 object =
object->handle;
555 (
"non-anon object %p shadows",
object));
556 KASSERT(
object != NULL, (
"No content object for text, entry %p obj %p",
567 VOP_SET_TEXT_CHECKED(vp);
569 vn_lock(vp, LK_SHARED | LK_RETRY);
570 VOP_UNSET_TEXT_CHECKED(vp);
582#define defer_next right
592 entry = td->td_map_def_user;
593 td->td_map_def_user = NULL;
594 while (entry != NULL) {
595 next = entry->defer_next;
605 (
"Submap with writecount"));
607 KASSERT(
object != NULL, (
"No object for writecount"));
619_vm_map_assert_locked(
vm_map_t map,
const char *file,
int line)
623 mtx_assert_(&map->
system_mtx, MA_OWNED, file, line);
625 sx_assert_(&map->
lock, SA_XLOCKED, file, line);
628#define VM_MAP_ASSERT_LOCKED(map) \
629 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
631enum { VMMAP_CHECK_NONE, VMMAP_CHECK_UNLOCK, VMMAP_CHECK_ALL };
633static int enable_vmmap_check = VMMAP_CHECK_UNLOCK;
635static int enable_vmmap_check = VMMAP_CHECK_NONE;
637SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN,
638 &enable_vmmap_check, 0,
"Enable vm map consistency checking");
640static void _vm_map_assert_consistent(
vm_map_t map,
int check);
642#define VM_MAP_ASSERT_CONSISTENT(map) \
643 _vm_map_assert_consistent(map, VMMAP_CHECK_ALL)
645#define VM_MAP_UNLOCK_CONSISTENT(map) do { \
646 if (map->nupdates > map->nentries) { \
647 _vm_map_assert_consistent(map, VMMAP_CHECK_UNLOCK); \
652#define VM_MAP_UNLOCK_CONSISTENT(map)
655#define VM_MAP_ASSERT_LOCKED(map)
656#define VM_MAP_ASSERT_CONSISTENT(map)
657#define VM_MAP_UNLOCK_CONSISTENT(map)
666#ifndef UMA_MD_SMALL_ALLOC
669 map->
flags &= ~MAP_REPLENISH;
672 mtx_unlock_flags_(&map->
system_mtx, 0, file, line);
674 sx_xunlock_(&map->
lock, file, line);
684 mtx_lock_flags_(&map->
system_mtx, 0, file, line);
686 sx_slock_(&map->
lock, file, line);
695 (
"%s: MAP_REPLENISH leaked", __func__));
696 mtx_unlock_flags_(&map->
system_mtx, 0, file, line);
698 sx_sunlock_(&map->
lock, file, line);
709 !mtx_trylock_flags_(&map->
system_mtx, 0, file, line) :
710 !sx_try_xlock_(&map->
lock, file, line);
722 !mtx_trylock_flags_(&map->
system_mtx, 0, file, line) :
723 !sx_try_slock_(&map->
lock, file, line);
740 unsigned int last_timestamp;
743 mtx_assert_(&map->
system_mtx, MA_OWNED, file, line);
745 if (!sx_try_upgrade_(&map->
lock, file, line)) {
747 sx_sunlock_(&map->
lock, file, line);
753 sx_xlock_(&map->
lock, file, line);
755 sx_xunlock_(&map->
lock, file, line);
770 (
"%s: MAP_REPLENISH leaked", __func__));
771 mtx_assert_(&map->
system_mtx, MA_OWNED, file, line);
774 sx_downgrade_(&map->
lock, file, line);
791 return (sx_xlocked(&map->
lock));
816 (
"%s: MAP_REPLENISH leaked", __func__));
817 mtx_unlock_flags_(&map->
system_mtx, 0, file, line);
819 sx_xunlock_(&map->
lock, file, line);
858 KASSERT(map->
busy, (
"vm_map_unbusy: not busy"));
875 sx_sleep(&map->
busy, &map->
lock, 0,
"mbusy", 0);
916 mtx_init(&map->
system_mtx,
"vm map (system)", NULL,
917 MTX_DEF | MTX_DUPOK);
918 sx_init(&map->
lock,
"vm map (user)");
943#ifndef UMA_MD_SMALL_ALLOC
956 if (new_entry == NULL) {
958 M_NOWAIT | M_NOVM | M_USE_RESERVE);
968 KASSERT(new_entry != NULL,
969 (
"vm_map_entry_create: kernel resources exhausted"));
982 entry->
eflags = (entry->
eflags & ~MAP_ENTRY_BEHAV_MASK) |
993static inline vm_size_t
997 return (root->
left != left_ancestor ?
1001static inline vm_size_t
1005 return (root->
right != right_ancestor ?
1021 prior = entry->
left;
1024 prior = prior->
right;
1025 while (prior->
right != entry);
1030static inline vm_size_t
1034 return (a > b ? a : b);
1037#define SPLAY_LEFT_STEP(root, y, llist, rlist, test) do { \
1039 vm_size_t max_free; \
1047 max_free = root->max_free; \
1048 KASSERT(max_free == vm_size_max( \
1049 vm_map_entry_max_free_left(root, llist), \
1050 vm_map_entry_max_free_right(root, rlist)), \
1051 ("%s: max_free invariant fails", __func__)); \
1052 if (max_free - 1 < vm_map_entry_max_free_left(root, llist)) \
1053 max_free = vm_map_entry_max_free_right(root, rlist); \
1054 if (y != llist && (test)) { \
1060 if (max_free < y->max_free) \
1061 root->max_free = max_free = \
1062 vm_size_max(max_free, z->max_free); \
1063 } else if (max_free < y->max_free) \
1064 root->max_free = max_free = \
1065 vm_size_max(max_free, root->start - y->end);\
1070 root->max_free = max_free; \
1071 KASSERT(max_free == vm_map_entry_max_free_right(root, rlist), \
1072 ("%s: max_free not copied from right", __func__)); \
1073 root->left = rlist; \
1075 root = y != llist ? y : NULL; \
1078#define SPLAY_RIGHT_STEP(root, y, llist, rlist, test) do { \
1080 vm_size_t max_free; \
1088 max_free = root->max_free; \
1089 KASSERT(max_free == vm_size_max( \
1090 vm_map_entry_max_free_left(root, llist), \
1091 vm_map_entry_max_free_right(root, rlist)), \
1092 ("%s: max_free invariant fails", __func__)); \
1093 if (max_free - 1 < vm_map_entry_max_free_right(root, rlist)) \
1094 max_free = vm_map_entry_max_free_left(root, llist); \
1095 if (y != rlist && (test)) { \
1101 if (max_free < y->max_free) \
1102 root->max_free = max_free = \
1103 vm_size_max(max_free, z->max_free); \
1104 } else if (max_free < y->max_free) \
1105 root->max_free = max_free = \
1106 vm_size_max(max_free, y->start - root->end);\
1111 root->max_free = max_free; \
1112 KASSERT(max_free == vm_map_entry_max_free_left(root, llist), \
1113 ("%s: max_free not copied from left", __func__)); \
1114 root->right = llist; \
1116 root = y != rlist ? y : NULL; \
1135 left = right = &map->
header;
1137 while (root != NULL && root->
max_free >= length) {
1138 KASSERT(left->
end <= root->
start &&
1140 (
"%s: root not within tree bounds", __func__));
1141 if (addr < root->start) {
1143 y->
max_free >= length && addr < y->start);
1144 }
else if (addr >= root->
end) {
1155static __always_inline
void
1161 hi = root->
right == right ? NULL : root->
right;
1170static __always_inline
void
1176 lo = root->
left == left ? NULL : root->
left;
1213 }
while (llist != header);
1221static inline vm_size_t
1227 max_free = root->
start - llist->
end;
1228 if (llist != header) {
1230 root, max_free, llist);
1232 root->
left = header;
1233 header->
right = root;
1241static inline vm_size_t
1248 if (llist != header) {
1250 root->
left == llist ? root : root->
left,
1270 }
while (rlist != header);
1278static inline vm_size_t
1284 max_free = rlist->
start - root->
end;
1285 if (rlist != header) {
1287 root, max_free, rlist);
1289 root->
right = header;
1290 header->
left = root;
1298static inline vm_size_t
1305 if (rlist != header) {
1342 vm_size_t max_free_left, max_free_right;
1349 }
else if (llist != header) {
1355 llist = root->
right;
1358 }
else if (rlist != header) {
1390 vm_size_t max_free_left, max_free_right;
1393 "vm_map_entry_link: map %p, nentries %d, entry %p", map,
1413 KASSERT(entry->
end < root->
end,
1414 (
"%s: clip_start not within entry", __func__));
1429 KASSERT(entry->
end == root->
end,
1430 (
"%s: clip_start not within entry", __func__));
1454 vm_size_t max_free_left, max_free_right;
1459 KASSERT(root != NULL,
1460 (
"vm_map_entry_unlink: unlink object not mapped"));
1468 if (llist != header) {
1470 llist = root->
right;
1473 }
else if (rlist != header) {
1487 CTR3(KTR_VM,
"vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1507 KASSERT(root != NULL, (
"%s: resize object not mapped", __func__));
1509 entry->
end += grow_amount;
1515 CTR4(KTR_VM,
"%s: map %p, nentries %d, entry %p",
1516 __func__, map, map->
nentries, entry);
1532 vm_offset_t address,
1548 if (address >= cur->
start && cur->
end > address) {
1553 sx_try_upgrade(&map->
lock)) {
1563 sx_downgrade(&map->
lock);
1571 if (address < cur->start) {
1576 return (address < cur->end);
1582 lbound = ubound = header;
1584 if (address < cur->start) {
1589 }
else if (cur->
end <= address) {
1629 (
"vm_map_insert: kernel object and COW"));
1630 KASSERT(
object == NULL || (cow &
MAP_NOFAULT) == 0 ||
1632 (
"vm_map_insert: paradoxical MAP_NOFAULT request, obj %p cow %#x",
1634 KASSERT((prot & ~max) == 0,
1635 (
"prot %#x is not subset of max_prot %#x", prot, max));
1658 if (next_entry->
start < end)
1696 if (bidx >= MAXPAGESIZES)
1698 bdry = pagesizes[bidx] - 1;
1699 if ((start & bdry) != 0 || (end & bdry) != 0)
1711 KASSERT(
object == NULL ||
1713 object->
cred == NULL,
1714 (
"overcommit: vm_map_insert o %p",
object));
1715 cred = curthread->td_ucred;
1722 if (
object != NULL) {
1740 prev_entry->
end == start && (prev_entry->
cred == cred ||
1745 (vm_size_t)(prev_entry->
end - prev_entry->
start),
1746 (vm_size_t)(end - prev_entry->
end), cred != NULL &&
1758 0, (
"prev_entry %p has incoherent wiring",
1761 map->
size += end - prev_entry->
end;
1763 end - prev_entry->
end);
1775 offset = prev_entry->
offset +
1776 (prev_entry->
end - prev_entry->
start);
1778 if (cred != NULL &&
object != NULL && object->
cred != NULL &&
1791 new_entry->
start = start;
1792 new_entry->
end = end;
1793 new_entry->
cred = NULL;
1795 new_entry->
eflags = protoeflags;
1797 new_entry->
offset = offset;
1808 (
"overcommit: vm_map_insert leaks vm_map %p", new_entry));
1809 new_entry->
cred = cred;
1856 vm_size_t left_length, max_free_left, max_free_right;
1857 vm_offset_t gap_end;
1870 if (map->
root == NULL)
1882 gap_end = rlist->
start;
1885 if (root->
right != rlist)
1889 }
else if (rlist != header) {
1896 llist = root->
right;
1903 if (length <= gap_end - start)
1913 llist = rlist = header;
1914 for (left_length = 0;;
1916 if (length <= left_length)
1926 llist = root->
right;
1928 if (rlist == header) {
1946 vm_offset_t start, vm_size_t length,
vm_prot_t prot,
1952 end = start + length;
1955 (
"vm_map_fixed: non-NULL backing object for stack"));
1981 "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always");
2001 "Number of aslr failures");
2017 vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr,
2018 vm_offset_t alignment)
2020 vm_offset_t aligned_addr, free_addr;
2025 (
"caller failed to provide space %#jx at address %p",
2026 (uintmax_t)length, (
void *)free_addr));
2035 *addr = roundup2(*addr, alignment);
2036 aligned_addr = *addr;
2037 if (aligned_addr == free_addr) {
2050 if (aligned_addr < free_addr)
2054 (max_addr != 0 && *addr + length > max_addr))
2057 if (free_addr == aligned_addr) {
2070 vm_offset_t max_addr, vm_offset_t alignment)
2075 (max_addr != 0 && *addr + length > max_addr))
2093 vm_size_t length, vm_offset_t max_addr,
int find_space,
2096 vm_offset_t alignment, curr_min_addr, min_addr;
2097 int gap, pidx, rv,
try;
2098 bool cluster, en_aslr, update_anon;
2102 (
"vm_map_find: non-NULL backing object for stack"));
2108 if (find_space >> 8 != 0) {
2109 KASSERT((find_space & 0xff) == 0, (
"bad VMFS flags"));
2110 alignment = (vm_offset_t)1 << (find_space >> 8);
2119 curr_min_addr = min_addr = *addr;
2120 if (en_aslr && min_addr == 0 && !cluster &&
2128 if (curr_min_addr == 0)
2135 alignment != 0, (
"unexpected VMFS flag"));
2167 if (
try == 1 && en_aslr && !cluster) {
2172 pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 &&
2175 gap =
vm_map_max(map) > MAP_32BIT_MAX_ADDR &&
2176 (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ?
2179 length + gap * pagesizes[pidx]);
2180 if (*addr + length + gap * pagesizes[pidx] >
2184 *addr += (arc4random() % gap) * pagesizes[pidx];
2185 if (max_addr != 0 && *addr + length > max_addr)
2190 (max_addr != 0 && *addr + length > max_addr)) {
2206 curr_min_addr = min_addr;
2207 cluster = update_anon;
2226 rv =
vm_map_insert(map,
object, offset, *addr, *addr + length,
2250 vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr,
2259 rv =
vm_map_find(map,
object, offset, addr, length, max_addr,
2260 find_space, prot, max, cow);
2263 *addr = hint = min_addr;
2271#define MAP_ENTRY_NOMERGE_MASK (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \
2272 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC)
2280 (
"vm_map_mergeable_neighbors: neither %p nor %p are mergeable",
2282 return (prev->
end == entry->
start &&
2310 if (entry->
cred != NULL)
2311 crfree(entry->
cred);
2348 (
"map entry %p has backing object", entry));
2350 (
"map entry %p is a submap", entry));
2370 (
"map entry %p is a submap", entry));
2376 entry->
cred != NULL) {
2379 (
"OVERCOMMIT: %s: both cred e %p", __func__, entry));
2407 *new_entry = *entry;
2408 if (new_entry->
cred != NULL)
2409 crhold(entry->
cred);
2437 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2438 "%s: map %p entry %p start 0x%jx", __func__, map, entry,
2439 (uintmax_t)startaddr);
2441 if (startaddr <= entry->start)
2445 KASSERT(entry->
end > startaddr && entry->
start < startaddr,
2446 (
"%s: invalid clip of entry %p", __func__, entry));
2450 if (bdry_idx != 0) {
2451 if ((startaddr & (pagesizes[bdry_idx] - 1)) != 0)
2461 new_entry->
end = startaddr;
2481 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2482 "%s: map %p start 0x%jx prev %p", __func__, map,
2483 (uintmax_t)start, prev_entry);
2486 entry = *prev_entry;
2511 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
2512 "%s: map %p entry %p end 0x%jx", __func__, map, entry,
2513 (uintmax_t)endaddr);
2515 if (endaddr >= entry->
end)
2519 KASSERT(entry->
start < endaddr && entry->
end > endaddr,
2520 (
"%s: invalid clip of entry %p", __func__, entry));
2524 if (bdry_idx != 0) {
2525 if ((endaddr & (pagesizes[bdry_idx] - 1)) != 0)
2535 new_entry->
start = endaddr;
2595 submap->
flags &= ~MAP_IS_SUB_MAP;
2604#define MAX_INIT_PT 96
2620 vm_object_t object, vm_pindex_t pindex, vm_size_t size,
int flags)
2623 vm_page_t p, p_start;
2624 vm_pindex_t mask, psize, threshold, tmpidx;
2641 if (psize + pindex > object->
size) {
2642 if (pindex >= object->
size) {
2646 psize =
object->size - pindex;
2660 p != NULL && (tmpidx = p->pindex - pindex) < psize;
2661 p = TAILQ_NEXT(p, listq)) {
2667 vm_page_count_severe()) ||
2669 tmpidx >= threshold)) {
2674 if (p_start == NULL) {
2675 start = addr + ptoa(tmpidx);
2679 if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
2680 (pagesizes[p->psind] - 1)) == 0) {
2681 mask = atop(pagesizes[p->psind]) - 1;
2682 if (tmpidx + mask < psize &&
2688 }
else if (p_start != NULL) {
2690 ptoa(tmpidx), p_start, prot);
2694 if (p_start != NULL)
2721 (new_prot & new_maxprot) != new_prot)
2752 for (entry = first_entry; entry->
start < end;
2780 if (in_tran != NULL) {
2798 for (entry = first_entry; entry->
start < end;
2812 cred = curthread->td_ucred;
2842 KASSERT(obj->
charge == 0,
2843 (
"vm_map_protect: object %p overcharged (entry %p)",
2896#define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2936 case MADV_SEQUENTIAL:
2977 for (; entry->
start < end; prev_entry = entry,
2993 case MADV_SEQUENTIAL:
3005 entry->eflags &= ~MAP_ENTRY_NOSYNC;
3011 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP;
3021 vm_pindex_t pstart, pend;
3032 for (; entry->
start < end;
3034 vm_offset_t useEnd, useStart;
3046 if (behav == MADV_FREE &&
3052 pend = pstart + atop(entry->
end - entry->
start);
3053 useStart = entry->
start;
3054 useEnd = entry->
end;
3056 if (entry->
start < start) {
3057 pstart += atop(start - entry->
start);
3060 if (entry->
end > end) {
3061 pend -= atop(entry->
end - end);
3078 if (behav == MADV_DONTNEED || behav == MADV_FREE)
3090 if (behav == MADV_WILLNEED &&
3097 ptoa(pend - pstart),
3122 switch (new_inheritance) {
3144 for (entry = start_entry; entry->
start < end;
3153 for (entry = start_entry; entry->
start < end; prev_entry = entry,
3155 KASSERT(entry->
end <= end, (
"non-clipped entry %p end %jx %jx",
3156 entry, (uintmax_t)entry->
end, (uintmax_t)end));
3182 u_int last_timestamp;
3186 (
"not in-tranition map entry %p", in_entry));
3190 start = MAX(in_start, in_entry->
start);
3199 if (last_timestamp + 1 == map->
timestamp)
3228 bool holes_ok, need_wakeup, user_unwire;
3245 for (entry = first_entry; entry->
start < end; entry = next_entry) {
3251 &end, holes_ok, entry);
3252 if (next_entry == NULL) {
3253 if (entry == first_entry) {
3260 first_entry = (entry == first_entry) ?
3277 (
"owned map entry %p", entry));
3286 entry->
end < end && next_entry->
start > entry->
end) {
3301 need_wakeup =
false;
3302 if (first_entry == NULL &&
3304 KASSERT(holes_ok, (
"vm_map_unwire: lookup failed"));
3305 prev_entry = first_entry;
3309 entry = first_entry;
3311 for (; entry->
start < end;
3323 entry->wiring_thread != curthread) {
3325 (
"vm_map_unwire: !HOLESOK and new/changed entry"));
3331 if (entry->wired_count == 1)
3334 entry->wired_count--;
3336 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
3339 (
"vm_map_unwire: in-transition flag missing %p", entry));
3340 KASSERT(entry->wiring_thread == curthread,
3341 (
"vm_map_unwire: alien wire %p", entry));
3342 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
3343 entry->wiring_thread = NULL;
3345 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3388 vm_offset_t failed_addr)
3394 (
"vm_map_wire_entry_failure: entry %p isn't being wired", entry));
3395 KASSERT(failed_addr < entry->end,
3396 (
"vm_map_wire_entry_failure: entry %p was fully wired", entry));
3402 if (failed_addr > entry->
start) {
3436 vm_offset_t faddr, saved_end, saved_start;
3437 u_long incr, npages;
3438 u_int bidx, last_timestamp;
3440 bool holes_ok, need_wakeup, user_wire;
3459 for (entry = first_entry; entry->
start < end; entry = next_entry) {
3465 &end, holes_ok, entry);
3466 if (next_entry == NULL) {
3467 if (entry == first_entry)
3472 first_entry = (entry == first_entry) ?
3489 (
"owned map entry %p", entry));
3503 npages = atop(entry->
end - entry->
start);
3516 saved_start = entry->
start;
3517 saved_end = entry->
end;
3521 incr = pagesizes[bidx];
3525 for (faddr = saved_start; faddr < saved_end;
3538 if (last_timestamp + 1 != map->
timestamp) {
3548 (
"vm_map_wire: lookup failed"));
3549 first_entry = (entry == first_entry) ?
3551 for (entry = next_entry; entry->
end < saved_end;
3572 }
else if (!user_wire ||
3582 entry->
end < end && next_entry->
start > entry->
end) {
3590 need_wakeup =
false;
3591 if (first_entry == NULL &&
3593 KASSERT(holes_ok, (
"vm_map_wire: lookup failed"));
3594 prev_entry = first_entry;
3598 entry = first_entry;
3600 for (; entry->
start < end;
3618 (
"vm_map_wire: !HOLESOK and new/changed entry"));
3633 }
else if (!user_wire ||
3648 (
"vm_map_wire: in-transition flag missing %p", entry));
3650 (
"vm_map_wire: alien wire %p", entry));
3655 entry->
eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3688 boolean_t invalidate)
3693 vm_ooffset_t offset;
3694 unsigned int last_timestamp;
3703 }
else if (start == end) {
3704 start = first_entry->
start;
3705 end = first_entry->
end;
3712 for (entry = first_entry; entry->
start < end; entry = next_entry) {
3718 bdry_idx = (entry->
eflags &
3721 if (bdry_idx != 0 &&
3722 ((start & (pagesizes[bdry_idx] - 1)) != 0 ||
3723 (end & (pagesizes[bdry_idx] - 1)) != 0)) {
3729 if (end > entry->
end &&
3744 for (entry = first_entry; entry->
start < end;) {
3746 size = (end <= entry->
end ? end : entry->
end) - start;
3755 tsize = tentry->
end - offset;
3759 offset = tentry->
offset + (offset - tentry->
start);
3796 (
"vm_map_entry_unwire: entry %p isn't wired", entry));
3825 vm_pindex_t offidxstart, offidxend, size1;
3832 MPASS(entry->
cred == NULL);
3834 MPASS(
object == NULL);
3842 if (entry->
cred != NULL) {
3844 crfree(entry->
cred);
3851 KASSERT(entry->
cred == NULL || object->
cred == NULL ||
3853 (
"OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
3855 offidxend = offidxstart + atop(size);
3870 if (offidxend >= object->
size &&
3871 offidxstart < object->size) {
3872 size1 =
object->size;
3873 object->size = offidxstart;
3874 if (object->
cred != NULL) {
3875 size1 -=
object->size;
3876 KASSERT(object->
charge >= ptoa(size1),
3877 (
"object %p charge < 0",
object));
3880 object->charge -= ptoa(size1);
3889 entry->defer_next = curthread->td_map_def_user;
3890 curthread->td_map_def_user = entry;
3918 for (; entry->
start < end; entry = next_entry) {
3927 unsigned int last_timestamp;
3928 vm_offset_t saved_start;
3930 saved_start = entry->
start;
3935 if (last_timestamp + 1 != map->
timestamp) {
3943 &next_entry, &scratch_entry);
4030 while (start < end) {
4034 if (start < entry->start)
4039 if ((entry->
protection & protection) != protection)
4058 vm_offset_t size, vm_ooffset_t *fork_charge)
4078 if (src_entry->
cred != NULL &&
4080 KASSERT(src_object->
cred == NULL,
4081 (
"OVERCOMMIT: vm_map_copy_anon_entry: cred %p",
4083 src_object->
cred = src_entry->
cred;
4084 src_object->
charge = size;
4088 cred = curthread->td_ucred;
4090 dst_entry->
cred = cred;
4091 *fork_charge += size;
4094 src_entry->
cred = cred;
4095 *fork_charge += size;
4112 vm_ooffset_t *fork_charge)
4140 size = src_entry->
end - src_entry->
start;
4170 src_entry->
eflags &= ~MAP_ENTRY_WRITECNT;
4174 fake_entry->
end = src_entry->
end;
4175 fake_entry->defer_next =
4176 curthread->td_map_def_user;
4177 curthread->td_map_def_user = fake_entry;
4186 if (src_entry->
cred != NULL) {
4187 dst_entry->
cred = curthread->td_ucred;
4188 crhold(dst_entry->
cred);
4189 *fork_charge += size;
4213 vm_size_t entrysize;
4218 entrysize = entry->
end - entry->
start;
4224 newend = MIN(entry->
end,
4229 newend = MIN(entry->
end,
4272 KASSERT(locked, (
"vmspace_fork: lock failed"));
4274 error = pmap_vmspace_copy(new_map->
pmap, old_map->
pmap);
4276 sx_xunlock(&old_map->
lock);
4277 sx_xunlock(&new_map->
lock);
4289 panic(
"vm_map_fork: encountered a submap");
4306 if (
object == NULL) {
4323 old_entry->
eflags &= ~MAP_ENTRY_NEEDS_COPY;
4324 old_entry->
cred = NULL;
4336 if (old_entry->
cred != NULL) {
4337 KASSERT(object->
cred == NULL,
4338 (
"vmspace_fork both cred"));
4339 object->cred = old_entry->
cred;
4340 object->charge = old_entry->
end -
4342 old_entry->
cred = NULL;
4353 KASSERT(((
struct vnode *)object->
4354 handle)->v_writecount > 0,
4355 (
"vmspace_fork: v_writecount %p",
4359 (
"vmspace_fork: vnp.writecount %p",
4369 *new_entry = *old_entry;
4392 (old_entry->
end - old_entry->
start),
4401 *new_entry = *old_entry;
4410 new_entry->
cred = NULL;
4414 new_entry, fork_charge);
4424 memset(new_entry, 0,
sizeof(*new_entry));
4427 new_entry->
end = old_entry->
end;
4439 new_entry->
cred = curthread->td_ucred;
4440 crhold(new_entry->
cred);
4441 *fork_charge += (new_entry->
end - new_entry->
start);
4451 sx_xunlock(&old_map->
lock);
4452 sx_xunlock(&new_map->
lock);
4466 vm_size_t growsize, init_ssize;
4472 init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
4474 vmemlim = lim_cur(curthread, RLIMIT_VMEM);
4476 if (map->
size + init_ssize > vmemlim) {
4490 "Specifies the number of guard pages for a stack that grows");
4497 vm_offset_t bot, gap_bot, gap_top, top;
4498 vm_size_t init_ssize, sgp;
4507 KASSERT(orient != 0, (
"No stack grow direction"));
4511 if (max_ssize == 0 ||
4514 sgp = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4515 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4517 if (sgp >= max_ssize)
4520 init_ssize = growsize;
4521 if (max_ssize < init_ssize + sgp)
4522 init_ssize = max_ssize - sgp;
4545 bot = addrbos + max_ssize - init_ssize;
4546 top = bot + init_ssize;
4551 top = bot + init_ssize;
4553 gap_top = addrbos + max_ssize;
4559 KASSERT(new_entry->
end == top || new_entry->
start == bot,
4560 (
"Bad entry start/end for new stack entry"));
4563 (
"new entry lacks MAP_ENTRY_GROWS_DOWN"));
4566 (
"new entry lacks MAP_ENTRY_GROWS_UP"));
4567 if (gap_bot == gap_top)
4600 vm_offset_t gap_end, gap_start, grow_start;
4601 vm_size_t grow_amount, guard, max_grow;
4602 rlim_t lmemlim, stacklim, vmemlim;
4604 bool gap_deleted, grow_down, is_procstack;
4620 if (p != initproc && (map != &p->p_vmspace->vm_map ||
4621 p->p_textvp == NULL))
4626 lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
4627 stacklim = lim_cur(curthread, RLIMIT_STACK);
4628 vmemlim = lim_cur(curthread, RLIMIT_VMEM);
4638 stack_entry->
start != gap_entry->
end)
4640 grow_amount = round_page(stack_entry->
start - addr);
4645 stack_entry->
end != gap_entry->
start)
4647 grow_amount = round_page(addr + 1 - stack_entry->
end);
4652 guard = ((curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ||
4653 (curproc->p_fctl0 & NT_FREEBSD_FCTL_STKGAP_DISABLE) != 0) ? 0 :
4655 max_grow = gap_entry->
end - gap_entry->
start;
4656 if (guard > max_grow)
4659 if (grow_amount > max_grow)
4666 is_procstack = addr >= (vm_offset_t)vm->
vm_maxsaddr &&
4668 if (is_procstack && (ctob(vm->
vm_ssize) + grow_amount > stacklim))
4674 if (is_procstack && racct_set(p, RACCT_STACK,
4675 ctob(vm->
vm_ssize) + grow_amount)) {
4683 grow_amount = roundup(grow_amount,
sgrowsiz);
4684 if (grow_amount > max_grow)
4685 grow_amount = max_grow;
4686 if (is_procstack && (ctob(vm->
vm_ssize) + grow_amount > stacklim)) {
4687 grow_amount = trunc_page((vm_size_t)stacklim) -
4693 limit = racct_get_available(p, RACCT_STACK);
4695 if (is_procstack && (ctob(vm->
vm_ssize) + grow_amount > limit))
4696 grow_amount = limit - ctob(vm->
vm_ssize);
4707 if (racct_set(p, RACCT_MEMLOCK,
4719 if (map->
size + grow_amount > vmemlim) {
4726 if (racct_set(p, RACCT_VMEM, map->
size + grow_amount)) {
4742 grow_start = gap_entry->
end - grow_amount;
4743 if (gap_entry->
start + grow_amount == gap_entry->
end) {
4744 gap_start = gap_entry->
start;
4745 gap_end = gap_entry->
end;
4749 MPASS(gap_entry->
start < gap_entry->
end - grow_amount);
4751 gap_deleted =
false;
4754 grow_start + grow_amount,
4768 grow_start = stack_entry->
end;
4769 cred = stack_entry->
cred;
4778 (vm_size_t)(stack_entry->
end - stack_entry->
start),
4779 grow_amount, cred != NULL)) {
4780 if (gap_entry->
start + grow_amount == gap_entry->
end) {
4785 gap_entry->
start += grow_amount;
4786 stack_entry->
end += grow_amount;
4788 map->
size += grow_amount;
4801 grow_start + grow_amount,
4810 error = racct_set(p, RACCT_VMEM, map->
size);
4811 KASSERT(error == 0, (
"decreasing RACCT_VMEM failed"));
4813 error = racct_set(p, RACCT_MEMLOCK,
4815 KASSERT(error == 0, (
"decreasing RACCT_MEMLOCK failed"));
4817 error = racct_set(p, RACCT_STACK, ctob(vm->
vm_ssize));
4818 KASSERT(error == 0, (
"decreasing RACCT_STACK failed"));
4831vmspace_exec(
struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
4833 struct vmspace *oldvmspace = p->p_vmspace;
4836 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
4837 (
"vmspace_exec recursed"));
4839 if (newvmspace == NULL)
4850 p->p_vmspace = newvmspace;
4852 if (p == curthread->td_proc)
4854 curthread->td_pflags |= TDP_EXECVMSPC;
4865 struct vmspace *oldvmspace = p->p_vmspace;
4867 vm_ooffset_t fork_charge;
4873 if (refcount_load(&oldvmspace->
vm_refcnt) == 1)
4877 if (newvmspace == NULL)
4884 p->p_vmspace = newvmspace;
4886 if (p == curthread->td_proc)
4920 vm_pindex_t *pindex,
4963 fault_typea &= ~VM_PROT_FAULT_LOOKUP;
4969 goto RetryLookupLocked;
4972 if ((fault_type & prot) != fault_type || prot ==
VM_PROT_NONE) {
4979 (
"entry %p flags %x", entry, entry->
eflags));
5018 if (entry->
cred == NULL) {
5023 cred = curthread->td_ucred;
5040 crfree(entry->
cred);
5043 entry->
eflags &= ~MAP_ENTRY_NEEDS_COPY;
5051 prot &= ~VM_PROT_WRITE;
5062 NULL, entry->
cred, entry->
cred != NULL ? size : 0);
5091 vm_pindex_t *pindex,
5119 if ((fault_type & prot) != fault_type)
5140 prot &= ~VM_PROT_WRITE;
5205_vm_map_assert_consistent(
vm_map_t map,
int check)
5209 vm_size_t max_left, max_right;
5214 if (enable_vmmap_check != check)
5217 header = prev = &map->
header;
5220 (
"map %p prev->end = %jx, start = %jx", map,
5221 (uintmax_t)prev->
end, (uintmax_t)entry->
start));
5223 (
"map %p start = %jx, end = %jx", map,
5224 (uintmax_t)entry->
start, (uintmax_t)entry->
end));
5225 KASSERT(entry->
left == header ||
5227 (
"map %p left->start = %jx, start = %jx", map,
5229 KASSERT(entry->
right == header ||
5231 (
"map %p start = %jx, right->start = %jx", map,
5234 lbound = ubound = header;
5239 KASSERT(cur != lbound,
5240 (
"map %p cannot find %jx",
5241 map, (uintmax_t)entry->
start));
5242 }
else if (cur->
end <= entry->
start) {
5245 KASSERT(cur != ubound,
5246 (
"map %p cannot find %jx",
5247 map, (uintmax_t)entry->
start));
5249 KASSERT(cur == entry,
5250 (
"map %p cannot find %jx",
5251 map, (uintmax_t)entry->
start));
5258 (
"map %p max = %jx, max_left = %jx, max_right = %jx", map,
5260 (uintmax_t)max_left, (uintmax_t)max_right));
5264 (
"map %p prev->end = %jx, start = %jx", map,
5265 (uintmax_t)prev->
end, (uintmax_t)entry->
start));
5271#include <sys/kernel.h>
5280 db_iprintf(
"Task map %p: pmap=%p, nentries=%d, version=%u\n",
5282 (
void *)map->pmap, map->nentries, map->timestamp);
5285 prev = &map->header;
5287 db_iprintf(
"map entry %p: start=%p, end=%p, eflags=%#x, \n",
5288 (
void *)entry, (
void *)entry->start, (
void *)entry->end,
5291 static const char *
const inheritance_name[4] =
5292 {
"share",
"copy",
"none",
"donate_copy"};
5294 db_iprintf(
" prot=%x/%x/%s",
5296 entry->max_protection,
5297 inheritance_name[(
int)(
unsigned char)
5298 entry->inheritance]);
5299 if (entry->wired_count != 0)
5300 db_printf(
", wired");
5303 db_printf(
", share=%p, offset=0x%jx\n",
5304 (
void *)entry->object.sub_map,
5305 (uintmax_t)entry->offset);
5306 if (prev == &map->header ||
5308 entry->object.sub_map) {
5310 vm_map_print((
vm_map_t)entry->object.sub_map);
5314 if (entry->cred != NULL)
5315 db_printf(
", ruid %d", entry->cred->cr_ruid);
5316 db_printf(
", object=%p, offset=0x%jx",
5317 (
void *)entry->object.vm_object,
5318 (uintmax_t)entry->offset);
5319 if (entry->object.vm_object && entry->object.vm_object->cred)
5320 db_printf(
", obj ruid %d charge %jx",
5321 entry->object.vm_object->cred->cr_ruid,
5322 (uintmax_t)entry->object.vm_object->charge);
5324 db_printf(
", copy (%s)",
5328 if (prev == &map->header ||
5330 entry->object.vm_object) {
5333 entry->object.vm_object,
5343DB_SHOW_COMMAND(map, map)
5347 db_printf(
"usage: show map <addr>\n");
5353DB_SHOW_COMMAND(procvm, procvm)
5358 p = db_lookup_proc(addr);
5363 db_printf(
"p = %p, vmspace = %p, map = %p, pmap = %p\n",
5364 (
void *)p, (
void *)p->p_vmspace, (
void *)&p->p_vmspace->vm_map,
5367 vm_map_print((
vm_map_t)&p->p_vmspace->vm_map);
void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t)
#define pmap_wired_count(pm)
void pmap_align_superpage(vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t)
void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t)
vm_offset_t kernel_vm_end
void pmap_unwire(pmap_t pmap, vm_offset_t start, vm_offset_t end)
void pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
void pmap_remove_pages(pmap_t)
void pmap_release(pmap_t)
void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size)
void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot)
#define pmap_resident_count(pm)
void pmap_growkernel(vm_offset_t)
void pmap_remove(pmap_t, vm_offset_t, vm_offset_t)
void pmap_activate(struct thread *td)
struct vm_map_entry * right
struct thread * wiring_thread
union vm_map_object object
struct vm_map_entry * left
struct vm_map_entry header
struct vm_object::@0::@1 vnp
union vm_object::@0 un_pager
struct vm_object * backing_object
struct shmmap_state * vm_shm
void uma_zone_set_freef(uma_zone_t zone, uma_free freef)
static __inline void uma_zfree(uma_zone_t zone, void *item)
static __inline void * uma_zalloc(uma_zone_t zone, int flags)
#define UMA_ZONE_NOBUCKET
void uma_prealloc(uma_zone_t zone, int itemcnt)
uma_zone_t uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, uma_init uminit, uma_fini fini, int align, uint32_t flags)
void uma_zone_reserve(uma_zone_t zone, int nitems)
void uma_zone_set_allocf(uma_zone_t zone, uma_alloc allocf)
struct vm_object * vm_object
#define VM_INHERIT_DEFAULT
#define VM_PROT_FAULT_LOOKUP
int vm_fault(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, int fault_flags, vm_page_t *m_hold)
void kmem_unback(vm_object_t, vm_offset_t, vm_size_t)
int vm_mmap_to_errno(int rv)
int kmem_back_domain(int, vm_object_t, vm_offset_t, vm_size_t, int)
void vm_fault_copy_entry(vm_map_t, vm_map_t, vm_map_entry_t, vm_map_entry_t, vm_ooffset_t *)
int(* pmap_pinit_t)(struct pmap *pmap)
void vm_map_wakeup(vm_map_t map)
int vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
void vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
#define SPLAY_RIGHT_STEP(root, y, llist, rlist, test)
static void kmapent_free(void *item, vm_size_t size, uint8_t pflag)
void vmspace_free(struct vmspace *vm)
static void vm_map_copy_entry(vm_map_t src_map, vm_map_t dst_map, vm_map_entry_t src_entry, vm_map_entry_t dst_entry, vm_ooffset_t *fork_charge)
static void vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2, vm_map_entry_t entry)
static uma_zone_t vmspace_zone
#define MAP_ENTRY_NOMERGE_MASK
static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
static void vm_map_copy_swap_object(vm_map_entry_t src_entry, vm_map_entry_t dst_entry, vm_offset_t size, vm_ooffset_t *fork_charge)
static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
int vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr, vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, int cow)
static vm_map_entry_t vm_map_splay(vm_map_t map, vm_offset_t addr)
void vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
static void vm_map_process_deferred(void)
static const int aslr_pages_rnd_32[2]
static vm_size_t vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor)
int vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_prot_t new_prot, vm_prot_t new_maxprot, int flags)
int vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
void vm_map_unbusy(vm_map_t map)
SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD, &aslr_restarts, 0, "Number of aslr failures")
void vmspace_exitfree(struct proc *p)
static void vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount)
int vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
static __always_inline void vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *rlist)
static struct mtx map_sleep_mtx
static const int aslr_pages_rnd_64[2]
int vm_map_madvise(vm_map_t map, vm_offset_t start, vm_offset_t end, int behav)
static vm_map_entry_t vm_map_entry_pred(vm_map_entry_t entry)
static long aslr_restarts
static uma_zone_t mapentzone
static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, vm_offset_t failed_addr)
int vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, vm_prot_t prot, vm_prot_t max, int cow)
#define PROC_VMSPACE_LOCK(p)
#define PROC_VMSPACE_UNLOCK(p)
int vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_inherit_t new_inheritance)
int vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, vm_offset_t alignment)
static void vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, enum unlink_merge_type op)
void _vm_map_unlock_read(vm_map_t map, const char *file, int line)
int vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
void vm_map_wait_busy(vm_map_t map)
static vm_size_t vm_map_splay_merge_left_walk(vm_map_entry_t header, vm_map_entry_t root, vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t llist)
void vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add)
static void vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
#define SPLAY_LEFT_STEP(root, y, llist, rlist, test)
static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
void vm_map_startup(void)
static bool clustering_anon_allowed(vm_offset_t addr)
void _vm_map_unlock(vm_map_t map, const char *file, int line)
int _vm_map_trylock(vm_map_t map, const char *file, int line)
pmap_t vm_map_pmap_KBI(vm_map_t map)
static void vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
static vm_size_t vm_map_splay_merge_pred(vm_map_entry_t header, vm_map_entry_t root, vm_map_entry_t llist)
int vm_map_sync(vm_map_t map, vm_offset_t start, vm_offset_t end, boolean_t syncio, boolean_t invalidate)
static void vm_map_wire_user_count_sub(u_long npages)
boolean_t vm_map_lookup_entry(vm_map_t map, vm_offset_t address, vm_map_entry_t *entry)
static int vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t startaddr)
struct vmspace * vmspace_acquire_ref(struct proc *p)
void vmspace_switch_aio(struct vmspace *newvm)
void _vm_map_lock(vm_map_t map, const char *file, int line)
static void vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b)
void _vm_map_lock_read(vm_map_t map, const char *file, int line)
#define VM_MAP_UNLOCK_CONSISTENT(map)
int _vm_map_trylock_read(vm_map_t map, const char *file, int line)
int vmspace_unshare(struct proc *p)
void vmspace_exit(struct thread *td)
struct vmspace * vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
static int vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, vm_offset_t alignment)
int vm_map_lookup_locked(vm_map_t *var_map, vm_offset_t vaddr, vm_prot_t fault_typea, vm_map_entry_t *out_entry, vm_object_t *object, vm_pindex_t *pindex, vm_prot_t *out_prot, boolean_t *wired)
struct vmspace * vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
int vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, int cow)
static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
static void vmspace_dofree(struct vmspace *vm)
static vm_size_t vm_map_splay_merge_right_walk(vm_map_entry_t header, vm_map_entry_t root, vm_map_entry_t tail, vm_size_t max_free, vm_map_entry_t rlist)
boolean_t vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_prot_t protection)
int _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
static int stack_guard_page
static int vm_map_lookup_clip_start(vm_map_t map, vm_offset_t start, vm_map_entry_t *res_entry, vm_map_entry_t *prev_entry)
#define VM_MAP_ASSERT_LOCKED(map)
int vm_map_lookup(vm_map_t *var_map, vm_offset_t vaddr, vm_prot_t fault_typea, vm_map_entry_t *out_entry, vm_object_t *object, vm_pindex_t *pindex, vm_prot_t *out_prot, boolean_t *wired)
static vm_size_t vm_map_splay_merge_left(vm_map_entry_t header, vm_map_entry_t root, vm_map_entry_t llist)
static void vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
#define VM_MAP_RANGE_CHECK(map, start, end)
vm_offset_t vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length)
vm_offset_t vm_map_min_KBI(const struct vm_map *map)
long vmspace_resident_count(struct vmspace *vmspace)
static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
static vm_size_t vm_map_splay_merge_succ(vm_map_entry_t header, vm_map_entry_t root, vm_map_entry_t rlist)
static __always_inline vm_map_entry_t vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length, vm_map_entry_t *llist, vm_map_entry_t *rlist)
static void * kmapent_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, int wait)
void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev_entry, vm_map_entry_t entry)
static __always_inline void vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *llist)
void vm_map_busy(vm_map_t map)
static int vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t endaddr)
static int vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
static vm_size_t vm_map_splay_merge_right(vm_map_entry_t header, vm_map_entry_t root, vm_map_entry_t rlist)
vm_offset_t vm_map_max_KBI(const struct vm_map *map)
bool vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end)
static bool vm_map_wire_user_count_add(u_long npages)
int vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_size_t length, vm_prot_t prot, vm_prot_t max, int cow)
static bool vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW, &cluster_anon, 0, "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always")
static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
int vm_map_locked(vm_map_t map)
static int vmspace_zinit(void *mem, int size, int flags)
int vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
static vm_map_entry_t vm_map_entry_create(vm_map_t map)
static vm_map_entry_t vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry)
static void vm_map_entry_back(vm_map_entry_t entry)
int vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
static vm_size_t vm_size_max(vm_size_t a, vm_size_t b)
static vm_map_entry_t vm_map_entry_in_transition(vm_map_t map, vm_offset_t in_start, vm_offset_t *io_end, bool holes_ok, vm_map_entry_t in_entry)
static void vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
static void vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry)
static uma_zone_t kmapentzone
#define VM_MAP_ASSERT_CONSISTENT(map)
static vm_size_t vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor)
int vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
static __inline void vm_map_modflags(vm_map_t map, vm_flags_t set, vm_flags_t clear)
#define MAP_ENTRY_GROWS_DOWN
#define MAP_ENTRY_NOCOREDUMP
#define vm_map_lock_upgrade(map)
#define MAP_CREATE_STACK_GAP_DN
#define MAP_ENTRY_NEEDS_COPY
#define MAP_ENTRY_SPLIT_BOUNDARY_SHIFT
#define MAP_ENTRY_USER_WIRED
#define MAP_ENTRY_BEHAV_RANDOM
#define MAP_STACK_GROWS_DOWN
#define vm_map_unlock_read(map)
#define VMFS_OPTIMAL_SPACE
#define VM_MAP_ENTRY_FOREACH(it, map)
#define VM_MAP_WIRE_HOLESOK
static __inline vm_offset_t vm_map_max(const struct vm_map *map)
static __inline pmap_t vm_map_pmap(vm_map_t map)
#define MAP_ENTRY_IS_SUB_MAP
#define MAP_ENTRY_BEHAV_NORMAL
static bool vm_map_range_valid(vm_map_t map, vm_offset_t start, vm_offset_t end)
#define VM_FAULT_READ_AHEAD_INIT
static __inline int vm_map_entry_system_wired_count(vm_map_entry_t entry)
#define vm_map_lock_read(map)
#define MAP_ENTRY_WRITECNT
#define MAP_ASLR_IGNSTART
#define MAP_SPLIT_BOUNDARY_MASK
#define MAP_INHERIT_SHARE
#define VM_MAP_WIRE_WRITE
#define MAP_DISABLE_SYNCER
#define vm_map_trylock(map)
#define MAP_COPY_ON_WRITE
#define VM_MAP_PROTECT_SET_PROT
#define vm_map_lock_downgrade(map)
#define MAP_PREFAULT_MADVISE
#define MAP_SPLIT_BOUNDARY_SHIFT
#define MAP_ENTRY_STACK_GAP_UP
#define MAP_ENTRY_GROWS_UP
#define MAP_ENTRY_NEEDS_WAKEUP
static __inline pmap_t vmspace_pmap(struct vmspace *vmspace)
#define MAP_PREFAULT_PARTIAL
#define MAP_ENTRY_NOFAULT
#define VM_MAP_WIRE_NOHOLES
#define MAP_STACK_GROWS_UP
#define MAP_ENTRY_BEHAV_SEQUENTIAL
#define MAP_ENTRY_STACK_GAP_DN
#define MAP_ENTRY_IN_TRANSITION
static __inline vm_offset_t vm_map_min(const struct vm_map *map)
#define MAP_ENTRY_SPLIT_BOUNDARY_MASK
#define vm_map_unlock_and_wait(map, timo)
#define MAP_ENTRY_WIRE_SKIPPED
#define MAP_ACC_NO_CHARGE
static vm_map_entry_t vm_map_entry_succ(vm_map_entry_t entry)
#define MAP_ENTRY_BEHAV_MASK
#define MAP_ENTRY_VN_EXEC
#define vm_map_unlock(map)
#define MAP_CREATE_STACK_GAP_UP
#define VM_MAP_PROTECT_SET_MAXPROT
#define MAP_DISABLE_COREDUMP
u_long __exclusive_cache_line vm_user_wire_count
void vm_object_split(vm_map_entry_t entry)
void vm_object_reference(vm_object_t object)
boolean_t vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, boolean_t syncio, boolean_t invalidate)
vm_object_t vm_object_allocate_anon(vm_pindex_t size, vm_object_t backing_object, struct ucred *cred, vm_size_t charge)
void vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, uint8_t queue)
void vm_object_collapse(vm_object_t object)
void vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, int options)
void vm_object_reference_locked(vm_object_t object)
void vm_object_clear_flag(vm_object_t object, u_short bits)
void vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, int advice)
boolean_t vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset, vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
void vm_object_deallocate(vm_object_t object)
void vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length, struct ucred *cred, bool shared)
#define VM_OBJECT_RLOCK(object)
#define VM_OBJECT_LOCK_DOWNGRADE(object)
#define VM_OBJECT_RUNLOCK(object)
#define VM_OBJECT_WLOCK(object)
#define VM_OBJECT_WUNLOCK(object)
void vm_object_print(long addr, boolean_t have_addr, long count, char *modif)
vm_page_t vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m)
static bool vm_page_all_valid(vm_page_t m)
u_long vm_page_max_user_wired
#define KERN_RESOURCE_SHORTAGE
#define KERN_PROTECTION_FAILURE
#define KERN_OUT_OF_BOUNDS
#define KERN_INVALID_ADDRESS
#define KERN_INVALID_ARGUMENT