50#include <sys/kernel.h>
52#include <sys/callout.h>
55#include <sys/malloc.h>
58#include <sys/condvar.h>
59#include <sys/sysctl.h>
60#include <sys/taskqueue.h>
62#include <sys/vmmeter.h>
70#include <vm/vm_object.h>
71#include <vm/vm_kern.h>
72#include <vm/vm_extern.h>
73#include <vm/vm_param.h>
74#include <vm/vm_page.h>
75#include <vm/vm_pageout.h>
76#include <vm/vm_phys.h>
77#include <vm/vm_pagequeue.h>
78#include <vm/uma_int.h>
80#define VMEM_OPTORDER 5
81#define VMEM_OPTVALUE (1 << VMEM_OPTORDER)
82#define VMEM_MAXORDER \
83 (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
85#define VMEM_HASHSIZE_MIN 16
86#define VMEM_HASHSIZE_MAX 131072
88#define VMEM_QCACHE_IDX_MAX 16
90#define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
92#define VMEM_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | \
93 M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
95#define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
117#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
119#define VMEM_NAME_MAX 16
128#define bt_hashlist bt_u.u_hashlist
129#define bt_freelist bt_u.u_freelist
140 LIST_ENTRY(
vmem) vm_alllist;
143 struct vmem_seglist vm_seglist;
144 struct vmem_hashlist *vm_hashlist;
145 vmem_size_t vm_hashsize;
148 vmem_size_t vm_qcache_max;
149 vmem_size_t vm_quantum_mask;
150 vmem_size_t vm_import_quantum;
151 int vm_quantum_shift;
157 vmem_size_t vm_inuse;
159 vmem_size_t vm_limit;
163 vmem_import_t *vm_importfn;
164 vmem_release_t *vm_releasefn;
168 vmem_reclaim_t *vm_reclaimfn;
174#define BT_TYPE_SPAN 1
175#define BT_TYPE_SPAN_STATIC 2
176#define BT_TYPE_FREE 3
177#define BT_TYPE_BUSY 4
178#define BT_TYPE_CURSOR 5
179#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
181#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1)
183#if defined(DIAGNOSTIC)
184static int enable_vmem_check = 0;
185SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
186 &enable_vmem_check, 0,
"Enable vmem check");
187static void vmem_check(vmem_t *);
196static uma_zone_t vmem_zone;
199#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
200#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
201#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
202#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
204#define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock)
205#define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock)
206#define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock)
207#define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
208#define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock)
209#define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED);
211#define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align)))
213#define VMEM_CROSS_P(addr1, addr2, boundary) \
214 ((((addr1) ^ (addr2)) & -(boundary)) != 0)
216#define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \
217 (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
218#define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
219 (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
232#define BT_MAXFREE (BT_MAXALLOC * 8)
235static uma_zone_t vmem_bt_zone;
238static struct vmem kernel_arena_storage;
239static struct vmem buffer_arena_storage;
240static struct vmem transient_arena_storage;
242vmem_t *kernel_arena = &kernel_arena_storage;
243vmem_t *kmem_arena = &kernel_arena_storage;
244vmem_t *buffer_arena = &buffer_arena_storage;
245vmem_t *transient_arena = &transient_arena_storage;
248static struct vmem memguard_arena_storage;
249vmem_t *memguard_arena = &memguard_arena_storage;
281 if (vm != kernel_arena && vm->vm_arg != kernel_arena)
282 flags &= ~M_USE_RESERVE;
291 bt = uma_zalloc(vmem_bt_zone,
292 (
flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
295 bt = uma_zalloc(vmem_bt_zone,
flags);
327 bt = LIST_FIRST(&vm->vm_freetags);
345 LIST_INIT(&freetags);
347 while (vm->vm_nfreetags > freelimit) {
348 bt = LIST_FIRST(&vm->vm_freetags);
354 while ((
bt = LIST_FIRST(&freetags)) != NULL) {
356 uma_zfree(vmem_bt_zone,
bt);
365 MPASS(LIST_FIRST(&vm->vm_freetags) !=
bt);
378 (
"%s: insufficient free tags %d", __func__, vm->vm_nfreetags));
401static struct vmem_freelist *
404 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
407 MPASS(size != 0 && qsize != 0);
408 MPASS((size & vm->vm_quantum_mask) == 0);
412 return &vm->vm_freelist[idx];
423static struct vmem_freelist *
426 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
429 MPASS(size != 0 && qsize != 0);
430 MPASS((size & vm->vm_quantum_mask) == 0);
432 if (strat == M_FIRSTFIT &&
ORDER2SIZE(idx) != qsize) {
439 return &vm->vm_freelist[idx];
444static struct vmem_hashlist *
447 struct vmem_hashlist *list;
450 hash = hash32_buf(&
addr,
sizeof(
addr), 0);
451 list = &vm->vm_hashlist[hash % vm->vm_hashsize];
459 struct vmem_hashlist *list;
465 if (
bt->bt_start ==
addr) {
478 MPASS(vm->vm_nbusytag > 0);
479 vm->vm_inuse -=
bt->bt_size;
487 struct vmem_hashlist *list;
495 vm->vm_inuse +=
bt->bt_size;
505 TAILQ_REMOVE(&vm->vm_seglist,
bt, bt_seglist);
513 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev,
bt, bt_seglist);
520 TAILQ_INSERT_TAIL(&vm->vm_seglist,
bt, bt_seglist);
535 struct vmem_freelist *list;
556 KASSERT((
flags & M_WAITOK) == 0, (
"blocking allocation"));
559 for (i = 0; i < cnt; i++) {
561 VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX,
flags, &
addr) != 0)
563 store[i] = (
void *)
addr;
578 for (i = 0; i < cnt; i++)
590 MPASS((qcache_max & vm->vm_quantum_mask) == 0);
591 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
593 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
594 for (i = 0; i < qcache_idx_max; i++) {
595 qc = &vm->vm_qcache[i];
596 size = (i + 1) << vm->vm_quantum_shift;
613 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
614 for (i = 0; i < qcache_idx_max; i++)
615 uma_zdestroy(vm->vm_qcache[i].qc_cache);
624 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
625 for (i = 0; i < qcache_idx_max; i++)
626 uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN);
629#ifndef UMA_MD_SMALL_ALLOC
665 *pflag = UMA_SLAB_KERNEL;
673 VMEM_ADDR_MIN, VMEM_ADDR_MAX,
674 M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &
addr) == 0) {
675 if (kmem_back_domain(
domain, kernel_object,
addr, bytes,
676 M_NOWAIT | M_USE_RESERVE) == 0) {
678 return ((
void *)
addr);
706 vmem_zone = uma_zcreate(
"vmem",
707 sizeof(
struct vmem), NULL, NULL, NULL, NULL,
709 vmem_bt_zone = uma_zcreate(
"vmem btag",
710 sizeof(
struct vmem_btag), NULL, NULL, NULL, NULL,
711 UMA_ALIGN_PTR, UMA_ZONE_VM);
712#ifndef UMA_MD_SMALL_ALLOC
733 struct vmem_hashlist *newhashlist;
734 struct vmem_hashlist *oldhashlist;
735 vmem_size_t i, oldhashsize;
737 MPASS(newhashsize > 0);
739 newhashlist =
malloc(
sizeof(
struct vmem_hashlist) * newhashsize,
741 if (newhashlist == NULL)
743 for (i = 0; i < newhashsize; i++) {
744 LIST_INIT(&newhashlist[i]);
748 oldhashlist = vm->vm_hashlist;
749 oldhashsize = vm->vm_hashsize;
750 vm->vm_hashlist = newhashlist;
751 vm->vm_hashsize = newhashsize;
752 if (oldhashlist == NULL) {
756 for (i = 0; i < oldhashsize; i++) {
757 while ((
bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
764 if (oldhashlist != vm->vm_hash0)
765 free(oldhashlist, M_VMEM);
785 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
788 if (enable_vmem_check == 1) {
794 desired = 1 << flsl(vm->vm_nbusytag);
797 current = vm->vm_hashsize;
800 if (desired >= current * 2 || desired * 4 <= current)
830 bt_t *btfree, *btprev, *btspan;
834 MPASS((size & vm->vm_quantum_mask) == 0);
836 if (vm->vm_releasefn == NULL) {
844 btprev = TAILQ_LAST(&vm->vm_seglist, vmem_seglist);
845 if ((!bt_isbusy(btprev) && !
bt_isfree(btprev)) ||
852 if (btprev == NULL || bt_isbusy(btprev)) {
853 if (btprev == NULL) {
890 MPASS(vm->vm_nbusytag == 0);
892 TAILQ_REMOVE(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
893 while ((
bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
896 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
897 free(vm->vm_hashlist, M_VMEM);
903 uma_zfree(vmem_zone, vm);
912 if (vm->vm_importfn == NULL)
919 if (align != vm->vm_quantum_mask + 1)
920 size = (align * 2) + size;
921 size = roundup(size, vm->vm_import_quantum);
923 if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size)
928 error = (vm->vm_importfn)(vm->vm_arg, size,
flags, &
addr);
947 vmem_size_t
phase, vmem_size_t nocross, vmem_addr_t minaddr,
948 vmem_addr_t maxaddr, vmem_addr_t *addrp)
954 MPASS(
bt->bt_size >= size);
962 if (
start < minaddr) {
972 if (start < bt->bt_start)
975 MPASS(align < nocross);
978 if (start <= end && end - start >= size - 1) {
981 MPASS(minaddr <=
start);
982 MPASS(maxaddr == 0 ||
start + size - 1 <= maxaddr);
1003 MPASS(
bt->bt_size >= size);
1014 TAILQ_PREV(
bt, vmem_seglist, bt_seglist));
1017 if (
bt->bt_size != size &&
bt->bt_size - size > vm->vm_quantum_mask) {
1023 bt->bt_start =
bt->bt_start + size;
1024 bt->bt_size -= size;
1027 TAILQ_PREV(
bt, vmem_seglist, bt_seglist));
1034 MPASS(
bt->bt_size >= size);
1056 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1057 avail = vm->vm_size - vm->vm_inuse;
1060 if (vm->vm_qcache_max != 0)
1062 if (vm->vm_reclaimfn != NULL)
1063 vm->vm_reclaimfn(vm,
flags);
1067 if (vm->vm_size - vm->vm_inuse > avail)
1070 if ((
flags & M_NOWAIT) != 0)
1085 if (vm->vm_releasefn == NULL)
1088 prev = TAILQ_PREV(
bt, vmem_seglist, bt_seglist);
1089 MPASS(prev != NULL);
1093 vmem_addr_t spanaddr;
1094 vmem_size_t spansize;
1103 vm->vm_size -= spansize;
1106 vm->vm_releasefn(vm->vm_arg, spanaddr, spansize);
1114 const vmem_size_t
phase,
const vmem_size_t nocross,
int flags,
1134 for (cursor = &vm->vm_cursor,
bt = TAILQ_NEXT(cursor, bt_seglist);
1135 bt != cursor;
bt = TAILQ_NEXT(
bt, bt_seglist)) {
1137 bt = TAILQ_FIRST(&vm->vm_seglist);
1140 VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
1151 if ((next = TAILQ_NEXT(cursor, bt_seglist)) != NULL &&
1152 (prev = TAILQ_PREV(cursor, vmem_seglist, bt_seglist)) != NULL &&
1163 if (error == ENOMEM && prev->
bt_size >= size &&
1165 VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
1176 TAILQ_REMOVE(&vm->vm_seglist, cursor, bt_seglist);
1177 for (;
bt != NULL &&
bt->bt_start < *addrp + size;
1178 bt = TAILQ_NEXT(
bt, bt_seglist))
1181 TAILQ_INSERT_BEFORE(
bt, cursor, bt_seglist);
1183 TAILQ_INSERT_HEAD(&vm->vm_seglist, cursor, bt_seglist);
1202 vmem_release_t *releasefn,
void *arg, vmem_size_t import_quantum)
1206 KASSERT(vm->vm_size == 0, (
"%s: arena is non-empty", __func__));
1207 vm->vm_importfn = importfn;
1208 vm->vm_releasefn = releasefn;
1210 vm->vm_import_quantum = import_quantum;
1219 vm->vm_limit = limit;
1228 vm->vm_reclaimfn = reclaimfn;
1237 vmem_size_t quantum, vmem_size_t qcache_max,
int flags)
1242 MPASS((quantum & (quantum - 1)) == 0);
1244 bzero(vm,
sizeof(*vm));
1248 vm->vm_nfreetags = 0;
1249 LIST_INIT(&vm->vm_freetags);
1250 strlcpy(vm->vm_name,
name,
sizeof(vm->vm_name));
1251 vm->vm_quantum_mask = quantum - 1;
1252 vm->vm_quantum_shift = flsl(quantum) - 1;
1253 vm->vm_nbusytag = 0;
1259 TAILQ_INIT(&vm->vm_seglist);
1260 vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0;
1262 TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
1265 LIST_INIT(&vm->vm_freelist[i]);
1267 memset(&vm->vm_hash0, 0,
sizeof(vm->vm_hash0));
1269 vm->vm_hashlist = vm->vm_hash0;
1279 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1290 vmem_size_t quantum, vmem_size_t qcache_max,
int flags)
1295 vm = uma_zalloc(vmem_zone,
flags & (M_WAITOK|M_NOWAIT));
1309 LIST_REMOVE(vm, vm_alllist);
1319 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1333 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
1334 if ((
flags & M_NOWAIT) == 0)
1335 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"vmem_alloc");
1337 if (size <= vm->vm_qcache_max) {
1343 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1344 *addrp = (vmem_addr_t)uma_zalloc(qc->
qc_cache,
1345 (
flags & ~M_WAITOK) | M_NOWAIT);
1346 if (__predict_true(*addrp != 0))
1350 return (
vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1356 const vmem_size_t
phase,
const vmem_size_t nocross,
1357 const vmem_addr_t minaddr,
const vmem_addr_t maxaddr,
int flags,
1361 struct vmem_freelist *list;
1362 struct vmem_freelist *first;
1363 struct vmem_freelist *end;
1372 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
1373 MPASS((
flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
1374 if ((
flags & M_NOWAIT) == 0)
1375 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"vmem_xalloc");
1376 MPASS((align & vm->vm_quantum_mask) == 0);
1377 MPASS((align & (align - 1)) == 0);
1378 MPASS((
phase & vm->vm_quantum_mask) == 0);
1379 MPASS((nocross & vm->vm_quantum_mask) == 0);
1380 MPASS((nocross & (nocross - 1)) == 0);
1381 MPASS((align == 0 &&
phase == 0) ||
phase < align);
1382 MPASS(nocross == 0 || nocross >= size);
1383 MPASS(minaddr <= maxaddr);
1385 if (strat == M_NEXTFIT)
1386 MPASS(minaddr == VMEM_ADDR_MIN && maxaddr == VMEM_ADDR_MAX);
1389 align = vm->vm_quantum_mask + 1;
1395 if (strat == M_NEXTFIT)
1419 for (list = first; list < end; list++) {
1421 if (
bt->bt_size >= size) {
1423 nocross, minaddr, maxaddr, addrp);
1430 if (strat == M_FIRSTFIT)
1438 if (strat == M_FIRSTFIT) {
1456 if (error != 0 && (
flags & M_NOWAIT) == 0)
1457 panic(
"failed to allocate waiting allocation\n");
1471 if (size <= vm->vm_qcache_max &&
1472 __predict_true(
addr >= VMEM_ADDR_QCACHE_MIN)) {
1473 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1490 MPASS(
bt->bt_start ==
addr);
1498 t = TAILQ_NEXT(
bt, bt_seglist);
1505 t = TAILQ_PREV(
bt, vmem_seglist, bt_seglist);
1551 return vm->vm_inuse;
1553 return vm->vm_size - vm->vm_inuse;
1554 case VMEM_FREE|VMEM_ALLOC:
1559 if (LIST_EMPTY(&vm->vm_freelist[i]))
1563 vm->vm_quantum_shift);
1574#if defined(DDB) || defined(DIAGNOSTIC)
1576static void bt_dump(
const bt_t *,
int (*)(
const char *, ...)
1577 __printflike(1, 2));
1580bt_type_string(
int type)
1591 return "static span";
1601bt_dump(
const bt_t *
bt,
int (*
pr)(
const char *, ...))
1604 (*pr)(
"\t%p: %jx %jx, %d(%s)\n",
1605 bt, (intmax_t)
bt->bt_start, (intmax_t)
bt->bt_size,
1606 bt->bt_type, bt_type_string(
bt->bt_type));
1610vmem_dump(
const vmem_t *vm ,
int (*
pr)(
const char *, ...) __printflike(1, 2))
1615 (*pr)(
"vmem %p '%s'\n", vm, vm->vm_name);
1616 TAILQ_FOREACH(
bt, &vm->vm_seglist, bt_seglist) {
1621 const struct vmem_freelist *fl = &vm->vm_freelist[i];
1623 if (LIST_EMPTY(fl)) {
1627 (*pr)(
"freelist[%d]\n", i);
1640vmem_whatis_lookup(vmem_t *vm, vmem_addr_t
addr)
1644 TAILQ_FOREACH(
bt, &vm->vm_seglist, bt_seglist) {
1657vmem_whatis(vmem_addr_t
addr,
int (*
pr)(
const char *, ...))
1661 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1664 bt = vmem_whatis_lookup(vm,
addr);
1668 (*pr)(
"%p is %p+%zu in VMEM '%s' (%s)\n",
1669 (
void *)
addr, (
void *)
bt->bt_start,
1670 (vmem_size_t)(
addr -
bt->bt_start), vm->vm_name,
1676vmem_printall(
const char *modif,
int (*
pr)(
const char *, ...))
1680 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1686vmem_print(vmem_addr_t
addr,
const char *modif,
int (*
pr)(
const char *, ...))
1688 const vmem_t *vm = (
const void *)
addr;
1693DB_SHOW_COMMAND(vmemdump, vmemdump)
1697 db_printf(
"usage: show vmemdump <addr>\n");
1701 vmem_dump((
const vmem_t *)
addr, db_printf);
1704DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
1708 LIST_FOREACH(vm, &vmem_list, vm_alllist)
1709 vmem_dump(vm, db_printf);
1712DB_SHOW_COMMAND(
vmem, vmem_summ)
1714 const vmem_t *vm = (
const void *)
addr;
1721 db_printf(
"usage: show vmem <addr>\n");
1725 db_printf(
"vmem %p '%s'\n", vm, vm->vm_name);
1726 db_printf(
"\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
1727 db_printf(
"\tsize:\t%zu\n", vm->vm_size);
1728 db_printf(
"\tinuse:\t%zu\n", vm->vm_inuse);
1729 db_printf(
"\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
1730 db_printf(
"\tbusy tags:\t%d\n", vm->vm_nbusytag);
1731 db_printf(
"\tfree tags:\t%d\n", vm->vm_nfreetags);
1733 memset(&ft, 0,
sizeof(ft));
1734 memset(&ut, 0,
sizeof(ut));
1735 memset(&fs, 0,
sizeof(fs));
1736 memset(&us, 0,
sizeof(us));
1737 TAILQ_FOREACH(
bt, &vm->vm_seglist, bt_seglist) {
1741 us[ord] +=
bt->bt_size;
1744 fs[ord] +=
bt->bt_size;
1747 db_printf(
"\t\t\tinuse\tsize\t\tfree\tsize\n");
1749 if (ut[ord] == 0 && ft[ord] == 0)
1751 db_printf(
"\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
1753 ut[ord], us[ord], ft[ord], fs[ord]);
1757DB_SHOW_ALL_COMMAND(
vmem, vmem_summall)
1761 LIST_FOREACH(vm, &vmem_list, vm_alllist)
1762 vmem_summ((db_expr_t)vm, TRUE,
count, modif);
1766#define vmem_printf printf
1768#if defined(DIAGNOSTIC)
1771vmem_check_sanity(vmem_t *vm)
1777 TAILQ_FOREACH(
bt, &vm->vm_seglist, bt_seglist) {
1779 printf(
"corrupted tag\n");
1784 TAILQ_FOREACH(
bt, &vm->vm_seglist, bt_seglist) {
1786 if (
bt->bt_start != 0 ||
bt->bt_size != 0) {
1787 printf(
"corrupted cursor\n");
1792 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1804 printf(
"overwrapped tags\n");
1816vmem_check(vmem_t *vm)
1819 if (!vmem_check_sanity(vm)) {
1820 panic(
"insanity vmem %p", vm);
device_property_type_t type
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, ": enable address map randomization")
static struct bt_table bt
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
void free(void *addr, struct malloc_type *mtp)
static struct pollrec pr[POLL_LIST_LEN]
void panic(const char *fmt,...)
void callout_init(struct callout *c, int mpsafe)
struct iommu_domain ** domain
char qc_name[QC_NAME_MAX]
char vm_name[VMEM_NAME_MAX+1]
struct mtx_padalign vm_lock
int printf(const char *fmt,...)
int snprintf(char *str, size_t size, const char *format,...)
int taskqueue_enqueue(struct taskqueue *queue, struct task *task)
static int vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags)
static int qc_import(void *arg, void **store, int cnt, int domain, int flags)
static void qc_release(void *arg, void **store, int cnt)
static struct callout vmem_periodic_ch
#define VMEM_LOCK_DESTROY(vm)
static void bt_free(vmem_t *vm, bt_t *bt)
static void bt_save(vmem_t *vm)
int vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
void vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
LIST_HEAD(vmem_freelist, vmem_btag)
#define VMEM_CROSS_P(addr1, addr2, boundary)
#define VMEM_CONDVAR_WAIT(vm)
void vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
static void bt_insfree(vmem_t *vm, bt_t *bt)
void vmem_destroy(vmem_t *vm)
int vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, const vmem_size_t phase, const vmem_size_t nocross, const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags, vmem_addr_t *addrp)
static void qc_destroy(vmem_t *vm)
static void bt_insseg_tail(vmem_t *vm, bt_t *bt)
static int bt_fill(vmem_t *vm, int flags)
void vmem_set_limit(vmem_t *vm, vmem_size_t limit)
#define VMEM_CONDVAR_INIT(vm, wchan)
static void * vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, int wait)
static struct vmem_freelist * bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
static int vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp)
#define BT_TYPE_SPAN_STATIC
static struct vmem_hashlist * bt_hashhead(vmem_t *vm, vmem_addr_t addr)
static bt_t * bt_alloc(vmem_t *vm)
vmem_size_t vmem_roundup_size(vmem_t *vm, vmem_size_t size)
static void vmem_destroy1(vmem_t *vm)
#define VMEM_LOCK_INIT(vm, name)
static struct mtx_padalign __exclusive_cache_line vmem_list_lock
static void qc_drain(vmem_t *vm)
static void bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
static struct task vmem_periodic_wk
static void bt_restore(vmem_t *vm)
static void qc_init(vmem_t *vm, vmem_size_t qcache_max)
#define VMEM_HASHSIZE_MAX
static __noinline int _bt_fill(vmem_t *vm, int flags)
#define VMEM_CONDVAR_BROADCAST(vm)
static struct vmem_freelist * bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
static bool bt_isfree(bt_t *bt)
static int vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
#define VMEM_ASSERT_LOCKED(vm)
static void vmem_periodic(void *unused, int pending)
void vmem_set_import(vmem_t *vm, vmem_import_t *importfn, vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures")
vmem_t * vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, vmem_size_t quantum, vmem_size_t qcache_max, int flags)
#define ORDER2SIZE(order)
static int vmem_periodic_interval
static void vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
vmem_size_t vmem_size(vmem_t *vm, int typemask)
static void bt_remfree(vmem_t *vm __unused, bt_t *bt)
static void vmem_start_callout(void *unused)
int vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
static bt_t * bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
#define VMEM_HASHSIZE_MIN
static void bt_insbusy(vmem_t *vm, bt_t *bt)
TAILQ_HEAD(vmem_seglist, vmem_btag)
#define VMEM_CONDVAR_DESTROY(vm)
SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL)
static void bt_remseg(vmem_t *vm, bt_t *bt)
static void bt_rembusy(vmem_t *vm, bt_t *bt)
static void vmem_periodic_kick(void *dummy)
static int vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
static int vmem_xalloc_nextfit(vmem_t *vm, const vmem_size_t size, vmem_size_t align, const vmem_size_t phase, const vmem_size_t nocross, int flags, vmem_addr_t *addrp)
#define VMEM_ALIGNUP(addr, align)
vmem_t * vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, vmem_size_t quantum, vmem_size_t qcache_max, int flags)
static void bt_freetrim(vmem_t *vm, int freelimit)
static int vmem_try_release(vmem_t *vm, struct vmem_btag *bt, const bool remfree)
static void vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
void vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size __unused)
#define VMEM_QCACHE_IDX_MAX
static struct mtx_padalign __exclusive_cache_line vmem_bt_lock