75#include <sys/domainset.h>
76#include <sys/eventhandler.h>
77#include <sys/kernel.h>
79#include <sys/malloc.h>
81#include <sys/rwlock.h>
82#include <sys/sysctl.h>
84#include <sys/vmmeter.h>
115 SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS,
"Min kernel address");
119 &vm_max_kernel_address, 0,
121 SYSCTL_NULL_ULONG_PTR, VM_MAX_KERNEL_ADDRESS,
123 "Max kernel address");
125#if VM_NRESERVLEVEL > 0
126#define KVA_QUANTUM_SHIFT (VM_LEVEL_0_ORDER + PAGE_SHIFT)
129#define KVA_QUANTUM_SHIFT (8 + PAGE_SHIFT)
131#define KVA_QUANTUM (1ul << KVA_QUANTUM_SHIFT)
132#define KVA_NUMA_IMPORT_QUANTUM (KVA_QUANTUM * 128)
176 int pflags, u_long npages, vm_paddr_t low, vm_paddr_t high,
177 u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
189 for (tries = wait ? 3 : 1;; tries--) {
191 npages, low, high, alignment, boundary, memattr);
192 if (m != NULL || tries == 0 || !reclaim)
197 low, high, alignment, boundary) && wait)
214 vm_paddr_t high, vm_memattr_t memattr)
218 vm_offset_t addr, i, offset;
225 asize = round_page(
size);
227 if (vmem_alloc(vmem, asize, M_BESTFIT |
flags, &addr))
229 offset = addr - VM_MIN_KERNEL_ADDRESS;
233 for (i = 0; i < asize; i += PAGE_SIZE) {
235 domain, pflags, 1, low, high, PAGE_SIZE, 0, memattr);
239 vmem_free(vmem, addr, asize);
243 (
"kmem_alloc_attr_domain: Domain mismatch %d != %d",
252 kasan_mark((
void *)addr,
size, asize, KASAN_KMEM_REDZONE);
258 vm_memattr_t memattr)
267 vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr)
294 vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
295 vm_memattr_t memattr)
299 vm_offset_t addr, offset, tmp;
306 asize = round_page(size);
308 if (vmem_alloc(vmem, asize, flags | M_BESTFIT, &addr))
310 offset = addr - VM_MIN_KERNEL_ADDRESS;
312 npages = atop(asize);
315 pflags, npages, low, high, alignment, boundary, memattr);
318 vmem_free(vmem, addr, asize);
322 (
"kmem_alloc_contig_domain: Domain mismatch %d != %d",
326 for (; m < end_m; m++) {
327 if ((flags & M_ZERO) && (m->flags &
PG_ZERO) == 0)
335 kasan_mark((
void *)addr, size, asize, KASAN_KMEM_REDZONE);
341 u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
345 high, alignment, boundary, memattr));
350 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
351 vm_memattr_t memattr)
360 alignment, boundary, memattr);
383 vm_size_t size,
bool superpage_align)
387 size = round_page(size);
390 ret =
vm_map_find(parent, NULL, 0, min, size, 0, superpage_align ?
394 panic(
"kmem_subinit: bad status return of %d", ret);
398 panic(
"kmem_subinit: unable to change range to submap");
414 if (__predict_true((flags & M_EXEC) == 0))
418 asize = round_page(size);
419 if (vmem_alloc(arena, asize, flags | M_BESTFIT, &addr))
424 vmem_free(arena, addr, asize);
427 kasan_mark((
void *)addr, size, asize, KASAN_KMEM_REDZONE);
463 vm_size_t size,
int flags)
465 vm_offset_t offset, i;
471 (
"kmem_back_domain: only supports kernel object."));
473 offset = addr - VM_MIN_KERNEL_ADDRESS;
476 if (flags & M_WAITOK)
484 for (; i < size; i += PAGE_SIZE, mpred = m) {
486 domain, pflags, mpred);
494 if ((flags & M_NOWAIT) == 0)
501 (
"kmem_back_domain: Domain mismatch %d != %d",
503 if (flags & M_ZERO && (m->flags &
PG_ZERO) == 0)
506 (
"kmem_malloc: page %p is managed", m));
526 vm_offset_t end, next, start;
530 (
"kmem_back: only supports kernel object."));
532 for (start = addr, end = addr + size; addr < end; addr = next) {
542 if (next > end || next < start)
571 vm_offset_t end, offset;
575 (
"kmem_unback: only supports kernel object."));
580 offset = addr - VM_MIN_KERNEL_ADDRESS;
589 for (; offset < end; offset += PAGE_SIZE, m = next) {
618 size = round_page(size);
619 kasan_mark((
void *)addr, size, size, 0);
622 vmem_free(arena, addr, size);
638 size = round_page(size);
677 (void)
vm_map_delete(map, trunc_page(addr), round_page(addr + size));
698 for (i = 0; i < ZERO_REGION_SIZE; i += PAGE_SIZE)
709kva_import(
void *unused, vmem_size_t size,
int flags, vmem_addr_t *addrp)
715 (
"kva_import: Size %jd is not a multiple of %d",
737 (
"kva_import_domain: Size %jd is not a multiple of %d",
739 return (vmem_xalloc(arena, size,
KVA_QUANTUM, 0, 0, VMEM_ADDR_MIN,
740 VMEM_ADDR_MAX, flags, addrp));
766 VM_MIN_KERNEL_ADDRESS,
797 vmem_init(
kernel_arena,
"kernel arena", 0, 0, PAGE_SIZE, 0, 0);
808 "kernel arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
820#if VM_NRESERVLEVEL > 0
822 "kernel rwx arena domain", 0, 0, PAGE_SIZE, 0, M_WAITOK);
850#if defined(__i386__) || defined(__amd64__)
856 end = trunc_page(start + size);
857 start = round_page(start);
865 pmap_change_prot(start, end - start,
VM_PROT_RW);
867 for (va = start; va < end; va += PAGE_SIZE) {
868 pa = pmap_kextract(va);
880 (void)vmem_add(
kernel_arena, start, end - start, M_WAITOK);
894 error = sysctl_handle_int(oidp, &i, 0, req);
900 EVENTHANDLER_INVOKE(vm_lowmem, i);
905 "set to trigger vm_lowmem event with given flags");
913 error = sysctl_handle_int(oidp, &i, 0, req);
914 if (error != 0 || req->newptr == NULL)
924 "set to generate request to reclaim uma caches");
929 int domain, error, request;
932 error = sysctl_handle_int(oidp, &request, 0, req);
933 if (error != 0 || req->newptr == NULL)
936 domain = request >> 4;
947 CTLTYPE_INT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, 0,
void pmap_zero_page(vm_page_t)
void pmap_protect(pmap_t, vm_offset_t, vm_offset_t, vm_prot_t)
int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind)
void pmap_qenter(vm_offset_t, vm_page_t *, int)
void pmap_remove(pmap_t, vm_offset_t, vm_offset_t)
struct vmem * vmd_kernel_rwx_arena
struct vmem * vmd_kernel_arena
#define UMA_RECLAIM_DRAIN
void uma_reclaim(int req)
#define UMA_RECLAIM_DRAIN_CPU
void uma_reclaim_domain(int req, int domain)
void vm_domainset_iter_policy_init(struct vm_domainset_iter *di, struct domainset *ds, int *domain, int *flags)
int vm_domainset_iter_policy(struct vm_domainset_iter *di, int *domain)
static int debug_uma_reclaim(SYSCTL_HANDLER_ARGS)
u_int exec_map_entry_size
struct vm_map pipe_map_store
const u_long vm_maxuser_address
vm_offset_t kva_alloc(vm_size_t size)
void kmem_subinit(vm_map_t map, vm_map_t parent, vm_offset_t *min, vm_offset_t *max, vm_size_t size, bool superpage_align)
static vm_offset_t kmem_alloc_attr_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr)
struct vm_map exec_map_store
struct vm_map kernel_map_store
CTASSERT((ZERO_REGION_SIZE &PAGE_MASK)==0)
static int debug_vm_lowmem(SYSCTL_HANDLER_ARGS)
vm_offset_t kmem_alloc_contig(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
vm_offset_t kmem_alloc_attr(vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr)
static int debug_uma_reclaim_domain(SYSCTL_HANDLER_ARGS)
void kva_free(vm_offset_t addr, vm_size_t size)
static vm_offset_t kmem_malloc_domain(int domain, vm_size_t size, int flags)
void kmem_free(vm_offset_t addr, vm_size_t size)
vm_offset_t kmem_malloc(vm_size_t size, int flags)
static struct vmem * _kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
void kmem_init(vm_offset_t start, vm_offset_t end)
#define KVA_QUANTUM_SHIFT
vm_offset_t kmem_alloc_contig_domainset(struct domainset *ds, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
void kmap_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size)
static vm_offset_t kmem_alloc_contig_domain(int domain, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
vm_offset_t kmem_malloc_domainset(struct domainset *ds, vm_size_t size, int flags)
vm_offset_t kmem_alloc_attr_domainset(struct domainset *ds, vm_size_t size, int flags, vm_paddr_t low, vm_paddr_t high, vm_memattr_t memattr)
void kmem_bootstrap_free(vm_offset_t start, vm_size_t size)
static vm_page_t kmem_alloc_contig_pages(vm_object_t object, vm_pindex_t pindex, int domain, int pflags, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
vm_offset_t kmap_alloc_wait(vm_map_t map, vm_size_t size)
void kmem_init_zero_region(void)
SYSCTL_PROC(_debug, OID_AUTO, vm_lowmem, CTLTYPE_INT|CTLFLAG_MPSAFE|CTLFLAG_RW, 0, 0, debug_vm_lowmem, "I", "set to trigger vm_lowmem event with given flags")
int kmem_back_domain(int domain, vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
#define KVA_NUMA_IMPORT_QUANTUM
static int kva_import(void *unused, vmem_size_t size, int flags, vmem_addr_t *addrp)
SYSCTL_ULONG(_vm, OID_AUTO, min_kernel_address, CTLFLAG_RD, SYSCTL_NULL_ULONG_PTR, VM_MIN_KERNEL_ADDRESS, "Min kernel address")
void kmem_unback(vm_object_t object, vm_offset_t addr, vm_size_t size)
static int kva_import_domain(void *arena, vmem_size_t size, int flags, vmem_addr_t *addrp)
int kmem_back(vm_object_t object, vm_offset_t addr, vm_size_t size, int flags)
struct vmem * kernel_arena
void vm_map_wakeup(vm_map_t map)
void vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
int vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
int vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max, int cow)
vm_offset_t vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length)
int vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
int vm_map_submap(vm_map_t map, vm_offset_t start, vm_offset_t end, vm_map_t submap)
static __inline vm_offset_t vm_map_max(const struct vm_map *map)
static __inline pmap_t vm_map_pmap(vm_map_t map)
static __inline vm_offset_t vm_map_min(const struct vm_map *map)
#define vm_map_unlock_and_wait(map, timo)
#define MAP_ACC_NO_CHARGE
#define vm_map_unlock(map)
struct vmmeter __read_mostly vm_cnt
#define VM_OBJECT_WLOCK(object)
#define VM_OBJECT_WUNLOCK(object)
#define VM_OBJECT_ASSERT_WLOCKED(object)
vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa)
vm_page_t vm_page_alloc_noobj(int req)
vm_page_t vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
void vm_page_valid(vm_page_t m)
vm_page_t vm_page_next(vm_page_t m)
vm_page_t vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
bool vm_page_unwire_noq(vm_page_t m)
struct vm_domain vm_dom[MAXMEMDOM]
void vm_wait_domain(int domain)
void vm_page_free(vm_page_t m)
vm_page_t vm_page_alloc_domain_after(vm_object_t object, vm_pindex_t pindex, int domain, int req, vm_page_t mpred)
#define VM_ALLOC_NORECLAIM
#define VM_ALLOC_WAITFAIL
static int vm_page_domain(vm_page_t m)
#define vm_page_xbusy_claim(m)
#define vm_domain_free_unlock(d)
static struct vm_domain * vm_pagequeue_domain(vm_page_t m)
#define vm_domain_free_lock(d)
static void vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
struct vmem * vmd_kernel_rwx_arena
struct vmem * vmd_kernel_arena
#define VM_DOMAIN_EMPTY(n)
void vm_phys_free_pages(vm_page_t m, int order)
vm_page_t vm_radix_lookup_le(struct vm_radix *rtree, vm_pindex_t index)