FreeBSD kernel kern code
subr_vmem.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi,
5 * Copyright (c) 2013 EMC Corp.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30/*
31 * From:
32 * $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $
33 * $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $
34 */
35
36/*
37 * reference:
38 * - Magazines and Vmem: Extending the Slab Allocator
39 * to Many CPUs and Arbitrary Resources
40 * http://www.usenix.org/event/usenix01/bonwick.html
41 */
42
43#include <sys/cdefs.h>
44__FBSDID("$FreeBSD$");
45
46#include "opt_ddb.h"
47
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/kernel.h>
51#include <sys/queue.h>
52#include <sys/callout.h>
53#include <sys/hash.h>
54#include <sys/lock.h>
55#include <sys/malloc.h>
56#include <sys/mutex.h>
57#include <sys/smp.h>
58#include <sys/condvar.h>
59#include <sys/sysctl.h>
60#include <sys/taskqueue.h>
61#include <sys/vmem.h>
62#include <sys/vmmeter.h>
63
64#include "opt_vm.h"
65
66#include <vm/uma.h>
67#include <vm/vm.h>
68#include <vm/pmap.h>
69#include <vm/vm_map.h>
70#include <vm/vm_object.h>
71#include <vm/vm_kern.h>
72#include <vm/vm_extern.h>
73#include <vm/vm_param.h>
74#include <vm/vm_page.h>
75#include <vm/vm_pageout.h>
76#include <vm/vm_phys.h>
77#include <vm/vm_pagequeue.h>
78#include <vm/uma_int.h>
79
80#define VMEM_OPTORDER 5
81#define VMEM_OPTVALUE (1 << VMEM_OPTORDER)
82#define VMEM_MAXORDER \
83 (VMEM_OPTVALUE - 1 + sizeof(vmem_size_t) * NBBY - VMEM_OPTORDER)
84
85#define VMEM_HASHSIZE_MIN 16
86#define VMEM_HASHSIZE_MAX 131072
87
88#define VMEM_QCACHE_IDX_MAX 16
89
90#define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
91
92#define VMEM_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | \
93 M_BESTFIT | M_FIRSTFIT | M_NEXTFIT)
94
95#define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM)
96
97#define QC_NAME_MAX 16
98
99/*
100 * Data structures private to vmem.
101 */
102MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures");
103
104typedef struct vmem_btag bt_t;
105
106TAILQ_HEAD(vmem_seglist, vmem_btag);
107LIST_HEAD(vmem_freelist, vmem_btag);
108LIST_HEAD(vmem_hashlist, vmem_btag);
109
110struct qcache {
111 uma_zone_t qc_cache;
112 vmem_t *qc_vmem;
113 vmem_size_t qc_size;
115};
116typedef struct qcache qcache_t;
117#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache))
118
119#define VMEM_NAME_MAX 16
120
121/* boundary tag */
122struct vmem_btag {
124 union {
125 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */
126 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */
128#define bt_hashlist bt_u.u_hashlist
129#define bt_freelist bt_u.u_freelist
130 vmem_addr_t bt_start;
131 vmem_size_t bt_size;
133};
134
135/* vmem arena */
136struct vmem {
137 struct mtx_padalign vm_lock;
138 struct cv vm_cv;
140 LIST_ENTRY(vmem) vm_alllist;
141 struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN];
142 struct vmem_freelist vm_freelist[VMEM_MAXORDER];
143 struct vmem_seglist vm_seglist;
144 struct vmem_hashlist *vm_hashlist;
145 vmem_size_t vm_hashsize;
146
147 /* Constant after init */
148 vmem_size_t vm_qcache_max;
149 vmem_size_t vm_quantum_mask;
150 vmem_size_t vm_import_quantum;
151 int vm_quantum_shift;
152
153 /* Written on alloc/free */
154 LIST_HEAD(, vmem_btag) vm_freetags;
155 int vm_nfreetags;
156 int vm_nbusytag;
157 vmem_size_t vm_inuse;
158 vmem_size_t vm_size;
159 vmem_size_t vm_limit;
160 struct vmem_btag vm_cursor;
161
162 /* Used on import. */
163 vmem_import_t *vm_importfn;
164 vmem_release_t *vm_releasefn;
165 void *vm_arg;
166
167 /* Space exhaustion callback. */
168 vmem_reclaim_t *vm_reclaimfn;
169
170 /* quantum cache */
171 qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX];
172};
173
174#define BT_TYPE_SPAN 1 /* Allocated from importfn */
175#define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */
176#define BT_TYPE_FREE 3 /* Available space. */
177#define BT_TYPE_BUSY 4 /* Used space. */
178#define BT_TYPE_CURSOR 5 /* Cursor for nextfit allocations. */
179#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC)
180
181#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1)
182
183#if defined(DIAGNOSTIC)
184static int enable_vmem_check = 0;
185SYSCTL_INT(_debug, OID_AUTO, vmem_check, CTLFLAG_RWTUN,
186 &enable_vmem_check, 0, "Enable vmem check");
187static void vmem_check(vmem_t *);
188#endif
189
190static struct callout vmem_periodic_ch;
192static struct task vmem_periodic_wk;
193
194static struct mtx_padalign __exclusive_cache_line vmem_list_lock;
195static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list);
196static uma_zone_t vmem_zone;
197
198/* ---- misc */
199#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan)
200#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv)
201#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock)
202#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv)
203
204#define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock)
205#define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock)
206#define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock)
207#define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF)
208#define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock)
209#define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED);
210
211#define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align)))
212
213#define VMEM_CROSS_P(addr1, addr2, boundary) \
214 ((((addr1) ^ (addr2)) & -(boundary)) != 0)
215
216#define ORDER2SIZE(order) ((order) < VMEM_OPTVALUE ? ((order) + 1) : \
217 (vmem_size_t)1 << ((order) - (VMEM_OPTVALUE - VMEM_OPTORDER - 1)))
218#define SIZE2ORDER(size) ((size) <= VMEM_OPTVALUE ? ((size) - 1) : \
219 (flsl(size) + (VMEM_OPTVALUE - VMEM_OPTORDER - 2)))
220
221/*
222 * Maximum number of boundary tags that may be required to satisfy an
223 * allocation. Two may be required to import. Another two may be
224 * required to clip edges.
225 */
226#define BT_MAXALLOC 4
227
228/*
229 * Max free limits the number of locally cached boundary tags. We
230 * just want to avoid hitting the zone allocator for every call.
231 */
232#define BT_MAXFREE (BT_MAXALLOC * 8)
233
234/* Allocator for boundary tags. */
235static uma_zone_t vmem_bt_zone;
236
237/* boot time arena storage. */
238static struct vmem kernel_arena_storage;
239static struct vmem buffer_arena_storage;
240static struct vmem transient_arena_storage;
241/* kernel and kmem arenas are aliased for backwards KPI compat. */
242vmem_t *kernel_arena = &kernel_arena_storage;
243vmem_t *kmem_arena = &kernel_arena_storage;
244vmem_t *buffer_arena = &buffer_arena_storage;
245vmem_t *transient_arena = &transient_arena_storage;
246
247#ifdef DEBUG_MEMGUARD
248static struct vmem memguard_arena_storage;
249vmem_t *memguard_arena = &memguard_arena_storage;
250#endif
251
252static bool
253bt_isbusy(bt_t *bt)
254{
255 return (bt->bt_type == BT_TYPE_BUSY);
256}
257
258static bool
260{
261 return (bt->bt_type == BT_TYPE_FREE);
262}
263
264/*
265 * Fill the vmem's boundary tag cache. We guarantee that boundary tag
266 * allocation will not fail once bt_fill() passes. To do so we cache
267 * at least the maximum possible tag allocations in the arena.
268 */
269static __noinline int
270_bt_fill(vmem_t *vm, int flags)
271{
272 bt_t *bt;
273
275
276 /*
277 * Only allow the kernel arena and arenas derived from kernel arena to
278 * dip into reserve tags. They are where new tags come from.
279 */
280 flags &= BT_FLAGS;
281 if (vm != kernel_arena && vm->vm_arg != kernel_arena)
282 flags &= ~M_USE_RESERVE;
283
284 /*
285 * Loop until we meet the reserve. To minimize the lock shuffle
286 * and prevent simultaneous fills we first try a NOWAIT regardless
287 * of the caller's flags. Specify M_NOVM so we don't recurse while
288 * holding a vmem lock.
289 */
290 while (vm->vm_nfreetags < BT_MAXALLOC) {
291 bt = uma_zalloc(vmem_bt_zone,
292 (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM);
293 if (bt == NULL) {
294 VMEM_UNLOCK(vm);
295 bt = uma_zalloc(vmem_bt_zone, flags);
296 VMEM_LOCK(vm);
297 if (bt == NULL)
298 break;
299 }
300 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
301 vm->vm_nfreetags++;
302 }
303
304 if (vm->vm_nfreetags < BT_MAXALLOC)
305 return ENOMEM;
306
307 return 0;
308}
309
310static inline int
311bt_fill(vmem_t *vm, int flags)
312{
313 if (vm->vm_nfreetags >= BT_MAXALLOC)
314 return (0);
315 return (_bt_fill(vm, flags));
316}
317
318/*
319 * Pop a tag off of the freetag stack.
320 */
321static bt_t *
322bt_alloc(vmem_t *vm)
323{
324 bt_t *bt;
325
327 bt = LIST_FIRST(&vm->vm_freetags);
328 MPASS(bt != NULL);
329 LIST_REMOVE(bt, bt_freelist);
330 vm->vm_nfreetags--;
331
332 return bt;
333}
334
335/*
336 * Trim the per-vmem free list. Returns with the lock released to
337 * avoid allocator recursions.
338 */
339static void
340bt_freetrim(vmem_t *vm, int freelimit)
341{
342 LIST_HEAD(, vmem_btag) freetags;
343 bt_t *bt;
344
345 LIST_INIT(&freetags);
347 while (vm->vm_nfreetags > freelimit) {
348 bt = LIST_FIRST(&vm->vm_freetags);
349 LIST_REMOVE(bt, bt_freelist);
350 vm->vm_nfreetags--;
351 LIST_INSERT_HEAD(&freetags, bt, bt_freelist);
352 }
353 VMEM_UNLOCK(vm);
354 while ((bt = LIST_FIRST(&freetags)) != NULL) {
355 LIST_REMOVE(bt, bt_freelist);
356 uma_zfree(vmem_bt_zone, bt);
357 }
358}
359
360static inline void
361bt_free(vmem_t *vm, bt_t *bt)
362{
363
365 MPASS(LIST_FIRST(&vm->vm_freetags) != bt);
366 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist);
367 vm->vm_nfreetags++;
368}
369
370/*
371 * Hide MAXALLOC tags before dropping the arena lock to ensure that a
372 * concurrent allocation attempt does not grab them.
373 */
374static void
375bt_save(vmem_t *vm)
376{
377 KASSERT(vm->vm_nfreetags >= BT_MAXALLOC,
378 ("%s: insufficient free tags %d", __func__, vm->vm_nfreetags));
379 vm->vm_nfreetags -= BT_MAXALLOC;
380}
381
382static void
383bt_restore(vmem_t *vm)
384{
385 vm->vm_nfreetags += BT_MAXALLOC;
386}
387
388/*
389 * freelist[0] ... [1, 1]
390 * freelist[1] ... [2, 2]
391 * :
392 * freelist[29] ... [30, 30]
393 * freelist[30] ... [31, 31]
394 * freelist[31] ... [32, 63]
395 * freelist[33] ... [64, 127]
396 * :
397 * freelist[n] ... [(1 << (n - 26)), (1 << (n - 25)) - 1]
398 * :
399 */
400
401static struct vmem_freelist *
402bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
403{
404 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
405 const int idx = SIZE2ORDER(qsize);
406
407 MPASS(size != 0 && qsize != 0);
408 MPASS((size & vm->vm_quantum_mask) == 0);
409 MPASS(idx >= 0);
410 MPASS(idx < VMEM_MAXORDER);
411
412 return &vm->vm_freelist[idx];
413}
414
415/*
416 * bt_freehead_toalloc: return the freelist for the given size and allocation
417 * strategy.
418 *
419 * For M_FIRSTFIT, return the list in which any blocks are large enough
420 * for the requested size. otherwise, return the list which can have blocks
421 * large enough for the requested size.
422 */
423static struct vmem_freelist *
424bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
425{
426 const vmem_size_t qsize = size >> vm->vm_quantum_shift;
427 int idx = SIZE2ORDER(qsize);
428
429 MPASS(size != 0 && qsize != 0);
430 MPASS((size & vm->vm_quantum_mask) == 0);
431
432 if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) {
433 idx++;
434 /* check too large request? */
435 }
436 MPASS(idx >= 0);
437 MPASS(idx < VMEM_MAXORDER);
438
439 return &vm->vm_freelist[idx];
440}
441
442/* ---- boundary tag hash */
443
444static struct vmem_hashlist *
445bt_hashhead(vmem_t *vm, vmem_addr_t addr)
446{
447 struct vmem_hashlist *list;
448 unsigned int hash;
449
450 hash = hash32_buf(&addr, sizeof(addr), 0);
451 list = &vm->vm_hashlist[hash % vm->vm_hashsize];
452
453 return list;
454}
455
456static bt_t *
457bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
458{
459 struct vmem_hashlist *list;
460 bt_t *bt;
461
463 list = bt_hashhead(vm, addr);
464 LIST_FOREACH(bt, list, bt_hashlist) {
465 if (bt->bt_start == addr) {
466 break;
467 }
468 }
469
470 return bt;
471}
472
473static void
474bt_rembusy(vmem_t *vm, bt_t *bt)
475{
476
478 MPASS(vm->vm_nbusytag > 0);
479 vm->vm_inuse -= bt->bt_size;
480 vm->vm_nbusytag--;
481 LIST_REMOVE(bt, bt_hashlist);
482}
483
484static void
485bt_insbusy(vmem_t *vm, bt_t *bt)
486{
487 struct vmem_hashlist *list;
488
490 MPASS(bt->bt_type == BT_TYPE_BUSY);
491
492 list = bt_hashhead(vm, bt->bt_start);
493 LIST_INSERT_HEAD(list, bt, bt_hashlist);
494 vm->vm_nbusytag++;
495 vm->vm_inuse += bt->bt_size;
496}
497
498/* ---- boundary tag list */
499
500static void
501bt_remseg(vmem_t *vm, bt_t *bt)
502{
503
504 MPASS(bt->bt_type != BT_TYPE_CURSOR);
505 TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist);
506 bt_free(vm, bt);
507}
508
509static void
510bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
511{
512
513 TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist);
514}
515
516static void
517bt_insseg_tail(vmem_t *vm, bt_t *bt)
518{
519
520 TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist);
521}
522
523static void
524bt_remfree(vmem_t *vm __unused, bt_t *bt)
525{
526
527 MPASS(bt->bt_type == BT_TYPE_FREE);
528
529 LIST_REMOVE(bt, bt_freelist);
530}
531
532static void
533bt_insfree(vmem_t *vm, bt_t *bt)
534{
535 struct vmem_freelist *list;
536
537 list = bt_freehead_tofree(vm, bt->bt_size);
538 LIST_INSERT_HEAD(list, bt, bt_freelist);
539}
540
541/* ---- vmem internal functions */
542
543/*
544 * Import from the arena into the quantum cache in UMA.
545 *
546 * We use VMEM_ADDR_QCACHE_MIN instead of 0: uma_zalloc() returns 0 to indicate
547 * failure, so UMA can't be used to cache a resource with value 0.
548 */
549static int
550qc_import(void *arg, void **store, int cnt, int domain, int flags)
551{
552 qcache_t *qc;
553 vmem_addr_t addr;
554 int i;
555
556 KASSERT((flags & M_WAITOK) == 0, ("blocking allocation"));
557
558 qc = arg;
559 for (i = 0; i < cnt; i++) {
560 if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0,
561 VMEM_ADDR_QCACHE_MIN, VMEM_ADDR_MAX, flags, &addr) != 0)
562 break;
563 store[i] = (void *)addr;
564 }
565 return (i);
566}
567
568/*
569 * Release memory from the UMA cache to the arena.
570 */
571static void
572qc_release(void *arg, void **store, int cnt)
573{
574 qcache_t *qc;
575 int i;
576
577 qc = arg;
578 for (i = 0; i < cnt; i++)
579 vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size);
580}
581
582static void
583qc_init(vmem_t *vm, vmem_size_t qcache_max)
584{
585 qcache_t *qc;
586 vmem_size_t size;
587 int qcache_idx_max;
588 int i;
589
590 MPASS((qcache_max & vm->vm_quantum_mask) == 0);
591 qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift,
593 vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift;
594 for (i = 0; i < qcache_idx_max; i++) {
595 qc = &vm->vm_qcache[i];
596 size = (i + 1) << vm->vm_quantum_shift;
597 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu",
598 vm->vm_name, size);
599 qc->qc_vmem = vm;
600 qc->qc_size = size;
601 qc->qc_cache = uma_zcache_create(qc->qc_name, size,
602 NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 0);
603 MPASS(qc->qc_cache);
604 }
605}
606
607static void
608qc_destroy(vmem_t *vm)
609{
610 int qcache_idx_max;
611 int i;
612
613 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
614 for (i = 0; i < qcache_idx_max; i++)
615 uma_zdestroy(vm->vm_qcache[i].qc_cache);
616}
617
618static void
619qc_drain(vmem_t *vm)
620{
621 int qcache_idx_max;
622 int i;
623
624 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift;
625 for (i = 0; i < qcache_idx_max; i++)
626 uma_zone_reclaim(vm->vm_qcache[i].qc_cache, UMA_RECLAIM_DRAIN);
627}
628
629#ifndef UMA_MD_SMALL_ALLOC
630
631static struct mtx_padalign __exclusive_cache_line vmem_bt_lock;
632
633/*
634 * vmem_bt_alloc: Allocate a new page of boundary tags.
635 *
636 * On architectures with uma_small_alloc there is no recursion; no address
637 * space need be allocated to allocate boundary tags. For the others, we
638 * must handle recursion. Boundary tags are necessary to allocate new
639 * boundary tags.
640 *
641 * UMA guarantees that enough tags are held in reserve to allocate a new
642 * page of kva. We dip into this reserve by specifying M_USE_RESERVE only
643 * when allocating the page to hold new boundary tags. In this way the
644 * reserve is automatically filled by the allocation that uses the reserve.
645 *
646 * We still have to guarantee that the new tags are allocated atomically since
647 * many threads may try concurrently. The bt_lock provides this guarantee.
648 * We convert WAITOK allocations to NOWAIT and then handle the blocking here
649 * on failure. It's ok to return NULL for a WAITOK allocation as UMA will
650 * loop again after checking to see if we lost the race to allocate.
651 *
652 * There is a small race between vmem_bt_alloc() returning the page and the
653 * zone lock being acquired to add the page to the zone. For WAITOK
654 * allocations we just pause briefly. NOWAIT may experience a transient
655 * failure. To alleviate this we permit a small number of simultaneous
656 * fills to proceed concurrently so NOWAIT is less likely to fail unless
657 * we are really out of KVA.
658 */
659static void *
660vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag,
661 int wait)
662{
663 vmem_addr_t addr;
664
665 *pflag = UMA_SLAB_KERNEL;
666
667 /*
668 * Single thread boundary tag allocation so that the address space
669 * and memory are added in one atomic operation.
670 */
671 mtx_lock(&vmem_bt_lock);
672 if (vmem_xalloc(vm_dom[domain].vmd_kernel_arena, bytes, 0, 0, 0,
673 VMEM_ADDR_MIN, VMEM_ADDR_MAX,
674 M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, &addr) == 0) {
675 if (kmem_back_domain(domain, kernel_object, addr, bytes,
676 M_NOWAIT | M_USE_RESERVE) == 0) {
677 mtx_unlock(&vmem_bt_lock);
678 return ((void *)addr);
679 }
680 vmem_xfree(vm_dom[domain].vmd_kernel_arena, addr, bytes);
681 mtx_unlock(&vmem_bt_lock);
682 /*
683 * Out of memory, not address space. This may not even be
684 * possible due to M_USE_RESERVE page allocation.
685 */
686 if (wait & M_WAITOK)
687 vm_wait_domain(domain);
688 return (NULL);
689 }
690 mtx_unlock(&vmem_bt_lock);
691 /*
692 * We're either out of address space or lost a fill race.
693 */
694 if (wait & M_WAITOK)
695 pause("btalloc", 1);
696
697 return (NULL);
698}
699#endif
700
701void
703{
704
705 mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF);
706 vmem_zone = uma_zcreate("vmem",
707 sizeof(struct vmem), NULL, NULL, NULL, NULL,
708 UMA_ALIGN_PTR, 0);
709 vmem_bt_zone = uma_zcreate("vmem btag",
710 sizeof(struct vmem_btag), NULL, NULL, NULL, NULL,
711 UMA_ALIGN_PTR, UMA_ZONE_VM);
712#ifndef UMA_MD_SMALL_ALLOC
713 mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF);
714 uma_prealloc(vmem_bt_zone, BT_MAXALLOC);
715 /*
716 * Reserve enough tags to allocate new tags. We allow multiple
717 * CPUs to attempt to allocate new tags concurrently to limit
718 * false restarts in UMA. vmem_bt_alloc() allocates from a per-domain
719 * arena, which may involve importing a range from the kernel arena,
720 * so we need to keep at least 2 * BT_MAXALLOC tags reserved.
721 */
722 uma_zone_reserve(vmem_bt_zone, 2 * BT_MAXALLOC * mp_ncpus);
723 uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc);
724#endif
725}
726
727/* ---- rehash */
728
729static int
730vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
731{
732 bt_t *bt;
733 struct vmem_hashlist *newhashlist;
734 struct vmem_hashlist *oldhashlist;
735 vmem_size_t i, oldhashsize;
736
737 MPASS(newhashsize > 0);
738
739 newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize,
740 M_VMEM, M_NOWAIT);
741 if (newhashlist == NULL)
742 return ENOMEM;
743 for (i = 0; i < newhashsize; i++) {
744 LIST_INIT(&newhashlist[i]);
745 }
746
747 VMEM_LOCK(vm);
748 oldhashlist = vm->vm_hashlist;
749 oldhashsize = vm->vm_hashsize;
750 vm->vm_hashlist = newhashlist;
751 vm->vm_hashsize = newhashsize;
752 if (oldhashlist == NULL) {
753 VMEM_UNLOCK(vm);
754 return 0;
755 }
756 for (i = 0; i < oldhashsize; i++) {
757 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) {
758 bt_rembusy(vm, bt);
759 bt_insbusy(vm, bt);
760 }
761 }
762 VMEM_UNLOCK(vm);
763
764 if (oldhashlist != vm->vm_hash0)
765 free(oldhashlist, M_VMEM);
766
767 return 0;
768}
769
770static void
772{
773
774 taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk);
775}
776
777static void
778vmem_periodic(void *unused, int pending)
779{
780 vmem_t *vm;
781 vmem_size_t desired;
782 vmem_size_t current;
783
784 mtx_lock(&vmem_list_lock);
785 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
786#ifdef DIAGNOSTIC
787 /* Convenient time to verify vmem state. */
788 if (enable_vmem_check == 1) {
789 VMEM_LOCK(vm);
790 vmem_check(vm);
791 VMEM_UNLOCK(vm);
792 }
793#endif
794 desired = 1 << flsl(vm->vm_nbusytag);
795 desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN),
797 current = vm->vm_hashsize;
798
799 /* Grow in powers of two. Shrink less aggressively. */
800 if (desired >= current * 2 || desired * 4 <= current)
801 vmem_rehash(vm, desired);
802
803 /*
804 * Periodically wake up threads waiting for resources,
805 * so they could ask for reclamation again.
806 */
808 }
809 mtx_unlock(&vmem_list_lock);
810
812 vmem_periodic_kick, NULL);
813}
814
815static void
817{
818
819 TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL);
823 vmem_periodic_kick, NULL);
824}
825SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL);
826
827static void
828vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
829{
830 bt_t *btfree, *btprev, *btspan;
831
834 MPASS((size & vm->vm_quantum_mask) == 0);
835
836 if (vm->vm_releasefn == NULL) {
837 /*
838 * The new segment will never be released, so see if it is
839 * contiguous with respect to an existing segment. In this case
840 * a span tag is not needed, and it may be possible now or in
841 * the future to coalesce the new segment with an existing free
842 * segment.
843 */
844 btprev = TAILQ_LAST(&vm->vm_seglist, vmem_seglist);
845 if ((!bt_isbusy(btprev) && !bt_isfree(btprev)) ||
846 btprev->bt_start + btprev->bt_size != addr)
847 btprev = NULL;
848 } else {
849 btprev = NULL;
850 }
851
852 if (btprev == NULL || bt_isbusy(btprev)) {
853 if (btprev == NULL) {
854 btspan = bt_alloc(vm);
855 btspan->bt_type = type;
856 btspan->bt_start = addr;
857 btspan->bt_size = size;
858 bt_insseg_tail(vm, btspan);
859 }
860
861 btfree = bt_alloc(vm);
862 btfree->bt_type = BT_TYPE_FREE;
863 btfree->bt_start = addr;
864 btfree->bt_size = size;
865 bt_insseg_tail(vm, btfree);
866 bt_insfree(vm, btfree);
867 } else {
868 bt_remfree(vm, btprev);
869 btprev->bt_size += size;
870 bt_insfree(vm, btprev);
871 }
872
873 vm->vm_size += size;
874}
875
876static void
877vmem_destroy1(vmem_t *vm)
878{
879 bt_t *bt;
880
881 /*
882 * Drain per-cpu quantum caches.
883 */
884 qc_destroy(vm);
885
886 /*
887 * The vmem should now only contain empty segments.
888 */
889 VMEM_LOCK(vm);
890 MPASS(vm->vm_nbusytag == 0);
891
892 TAILQ_REMOVE(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
893 while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL)
894 bt_remseg(vm, bt);
895
896 if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0)
897 free(vm->vm_hashlist, M_VMEM);
898
899 bt_freetrim(vm, 0);
900
903 uma_zfree(vmem_zone, vm);
904}
905
906static int
907vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
908{
909 vmem_addr_t addr;
910 int error;
911
912 if (vm->vm_importfn == NULL)
913 return (EINVAL);
914
915 /*
916 * To make sure we get a span that meets the alignment we double it
917 * and add the size to the tail. This slightly overestimates.
918 */
919 if (align != vm->vm_quantum_mask + 1)
920 size = (align * 2) + size;
921 size = roundup(size, vm->vm_import_quantum);
922
923 if (vm->vm_limit != 0 && vm->vm_limit < vm->vm_size + size)
924 return (ENOMEM);
925
926 bt_save(vm);
927 VMEM_UNLOCK(vm);
928 error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr);
929 VMEM_LOCK(vm);
930 bt_restore(vm);
931 if (error)
932 return (ENOMEM);
933
934 vmem_add1(vm, addr, size, BT_TYPE_SPAN);
935
936 return 0;
937}
938
939/*
940 * vmem_fit: check if a bt can satisfy the given restrictions.
941 *
942 * it's a caller's responsibility to ensure the region is big enough
943 * before calling us.
944 */
945static int
946vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align,
947 vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr,
948 vmem_addr_t maxaddr, vmem_addr_t *addrp)
949{
950 vmem_addr_t start;
951 vmem_addr_t end;
952
953 MPASS(size > 0);
954 MPASS(bt->bt_size >= size); /* caller's responsibility */
955
956 /*
957 * XXX assumption: vmem_addr_t and vmem_size_t are
958 * unsigned integer of the same size.
959 */
960
961 start = bt->bt_start;
962 if (start < minaddr) {
963 start = minaddr;
964 }
965 end = BT_END(bt);
966 if (end > maxaddr)
967 end = maxaddr;
968 if (start > end)
969 return (ENOMEM);
970
971 start = VMEM_ALIGNUP(start - phase, align) + phase;
972 if (start < bt->bt_start)
973 start += align;
974 if (VMEM_CROSS_P(start, start + size - 1, nocross)) {
975 MPASS(align < nocross);
976 start = VMEM_ALIGNUP(start - phase, nocross) + phase;
977 }
978 if (start <= end && end - start >= size - 1) {
979 MPASS((start & (align - 1)) == phase);
980 MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross));
981 MPASS(minaddr <= start);
982 MPASS(maxaddr == 0 || start + size - 1 <= maxaddr);
983 MPASS(bt->bt_start <= start);
984 MPASS(BT_END(bt) - start >= size - 1);
985 *addrp = start;
986
987 return (0);
988 }
989 return (ENOMEM);
990}
991
992/*
993 * vmem_clip: Trim the boundary tag edges to the requested start and size.
994 */
995static void
996vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
997{
998 bt_t *btnew;
999 bt_t *btprev;
1000
1002 MPASS(bt->bt_type == BT_TYPE_FREE);
1003 MPASS(bt->bt_size >= size);
1004 bt_remfree(vm, bt);
1005 if (bt->bt_start != start) {
1006 btprev = bt_alloc(vm);
1007 btprev->bt_type = BT_TYPE_FREE;
1008 btprev->bt_start = bt->bt_start;
1009 btprev->bt_size = start - bt->bt_start;
1010 bt->bt_start = start;
1011 bt->bt_size -= btprev->bt_size;
1012 bt_insfree(vm, btprev);
1013 bt_insseg(vm, btprev,
1014 TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1015 }
1016 MPASS(bt->bt_start == start);
1017 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) {
1018 /* split */
1019 btnew = bt_alloc(vm);
1020 btnew->bt_type = BT_TYPE_BUSY;
1021 btnew->bt_start = bt->bt_start;
1022 btnew->bt_size = size;
1023 bt->bt_start = bt->bt_start + size;
1024 bt->bt_size -= size;
1025 bt_insfree(vm, bt);
1026 bt_insseg(vm, btnew,
1027 TAILQ_PREV(bt, vmem_seglist, bt_seglist));
1028 bt_insbusy(vm, btnew);
1029 bt = btnew;
1030 } else {
1031 bt->bt_type = BT_TYPE_BUSY;
1032 bt_insbusy(vm, bt);
1033 }
1034 MPASS(bt->bt_size >= size);
1035}
1036
1037static int
1038vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags)
1039{
1040 vmem_size_t avail;
1041
1043
1044 /*
1045 * XXX it is possible to fail to meet xalloc constraints with the
1046 * imported region. It is up to the user to specify the
1047 * import quantum such that it can satisfy any allocation.
1048 */
1049 if (vmem_import(vm, size, align, flags) == 0)
1050 return (1);
1051
1052 /*
1053 * Try to free some space from the quantum cache or reclaim
1054 * functions if available.
1055 */
1056 if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) {
1057 avail = vm->vm_size - vm->vm_inuse;
1058 bt_save(vm);
1059 VMEM_UNLOCK(vm);
1060 if (vm->vm_qcache_max != 0)
1061 qc_drain(vm);
1062 if (vm->vm_reclaimfn != NULL)
1063 vm->vm_reclaimfn(vm, flags);
1064 VMEM_LOCK(vm);
1065 bt_restore(vm);
1066 /* If we were successful retry even NOWAIT. */
1067 if (vm->vm_size - vm->vm_inuse > avail)
1068 return (1);
1069 }
1070 if ((flags & M_NOWAIT) != 0)
1071 return (0);
1072 bt_save(vm);
1074 bt_restore(vm);
1075 return (1);
1076}
1077
1078static int
1079vmem_try_release(vmem_t *vm, struct vmem_btag *bt, const bool remfree)
1080{
1081 struct vmem_btag *prev;
1082
1083 MPASS(bt->bt_type == BT_TYPE_FREE);
1084
1085 if (vm->vm_releasefn == NULL)
1086 return (0);
1087
1088 prev = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1089 MPASS(prev != NULL);
1090 MPASS(prev->bt_type != BT_TYPE_FREE);
1091
1092 if (prev->bt_type == BT_TYPE_SPAN && prev->bt_size == bt->bt_size) {
1093 vmem_addr_t spanaddr;
1094 vmem_size_t spansize;
1095
1096 MPASS(prev->bt_start == bt->bt_start);
1097 spanaddr = prev->bt_start;
1098 spansize = prev->bt_size;
1099 if (remfree)
1100 bt_remfree(vm, bt);
1101 bt_remseg(vm, bt);
1102 bt_remseg(vm, prev);
1103 vm->vm_size -= spansize;
1106 vm->vm_releasefn(vm->vm_arg, spanaddr, spansize);
1107 return (1);
1108 }
1109 return (0);
1110}
1111
1112static int
1113vmem_xalloc_nextfit(vmem_t *vm, const vmem_size_t size, vmem_size_t align,
1114 const vmem_size_t phase, const vmem_size_t nocross, int flags,
1115 vmem_addr_t *addrp)
1116{
1117 struct vmem_btag *bt, *cursor, *next, *prev;
1118 int error;
1119
1120 error = ENOMEM;
1121 VMEM_LOCK(vm);
1122
1123 /*
1124 * Make sure we have enough tags to complete the operation.
1125 */
1126 if (bt_fill(vm, flags) != 0)
1127 goto out;
1128
1129retry:
1130 /*
1131 * Find the next free tag meeting our constraints. If one is found,
1132 * perform the allocation.
1133 */
1134 for (cursor = &vm->vm_cursor, bt = TAILQ_NEXT(cursor, bt_seglist);
1135 bt != cursor; bt = TAILQ_NEXT(bt, bt_seglist)) {
1136 if (bt == NULL)
1137 bt = TAILQ_FIRST(&vm->vm_seglist);
1138 if (bt->bt_type == BT_TYPE_FREE && bt->bt_size >= size &&
1139 (error = vmem_fit(bt, size, align, phase, nocross,
1140 VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
1141 vmem_clip(vm, bt, *addrp, size);
1142 break;
1143 }
1144 }
1145
1146 /*
1147 * Try to coalesce free segments around the cursor. If we succeed, and
1148 * have not yet satisfied the allocation request, try again with the
1149 * newly coalesced segment.
1150 */
1151 if ((next = TAILQ_NEXT(cursor, bt_seglist)) != NULL &&
1152 (prev = TAILQ_PREV(cursor, vmem_seglist, bt_seglist)) != NULL &&
1153 next->bt_type == BT_TYPE_FREE && prev->bt_type == BT_TYPE_FREE &&
1154 prev->bt_start + prev->bt_size == next->bt_start) {
1155 prev->bt_size += next->bt_size;
1156 bt_remfree(vm, next);
1157 bt_remseg(vm, next);
1158
1159 /*
1160 * The coalesced segment might be able to satisfy our request.
1161 * If not, we might need to release it from the arena.
1162 */
1163 if (error == ENOMEM && prev->bt_size >= size &&
1164 (error = vmem_fit(prev, size, align, phase, nocross,
1165 VMEM_ADDR_MIN, VMEM_ADDR_MAX, addrp)) == 0) {
1166 vmem_clip(vm, prev, *addrp, size);
1167 bt = prev;
1168 } else
1169 (void)vmem_try_release(vm, prev, true);
1170 }
1171
1172 /*
1173 * If the allocation was successful, advance the cursor.
1174 */
1175 if (error == 0) {
1176 TAILQ_REMOVE(&vm->vm_seglist, cursor, bt_seglist);
1177 for (; bt != NULL && bt->bt_start < *addrp + size;
1178 bt = TAILQ_NEXT(bt, bt_seglist))
1179 ;
1180 if (bt != NULL)
1181 TAILQ_INSERT_BEFORE(bt, cursor, bt_seglist);
1182 else
1183 TAILQ_INSERT_HEAD(&vm->vm_seglist, cursor, bt_seglist);
1184 }
1185
1186 /*
1187 * Attempt to bring additional resources into the arena. If that fails
1188 * and M_WAITOK is specified, sleep waiting for resources to be freed.
1189 */
1190 if (error == ENOMEM && vmem_try_fetch(vm, size, align, flags))
1191 goto retry;
1192
1193out:
1194 VMEM_UNLOCK(vm);
1195 return (error);
1196}
1197
1198/* ---- vmem API */
1199
1200void
1201vmem_set_import(vmem_t *vm, vmem_import_t *importfn,
1202 vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
1203{
1204
1205 VMEM_LOCK(vm);
1206 KASSERT(vm->vm_size == 0, ("%s: arena is non-empty", __func__));
1207 vm->vm_importfn = importfn;
1208 vm->vm_releasefn = releasefn;
1209 vm->vm_arg = arg;
1210 vm->vm_import_quantum = import_quantum;
1211 VMEM_UNLOCK(vm);
1212}
1213
1214void
1215vmem_set_limit(vmem_t *vm, vmem_size_t limit)
1216{
1217
1218 VMEM_LOCK(vm);
1219 vm->vm_limit = limit;
1220 VMEM_UNLOCK(vm);
1221}
1222
1223void
1224vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
1225{
1226
1227 VMEM_LOCK(vm);
1228 vm->vm_reclaimfn = reclaimfn;
1229 VMEM_UNLOCK(vm);
1230}
1231
1232/*
1233 * vmem_init: Initializes vmem arena.
1234 */
1235vmem_t *
1236vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size,
1237 vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1238{
1239 vmem_size_t i;
1240
1241 MPASS(quantum > 0);
1242 MPASS((quantum & (quantum - 1)) == 0);
1243
1244 bzero(vm, sizeof(*vm));
1245
1247 VMEM_LOCK_INIT(vm, name);
1248 vm->vm_nfreetags = 0;
1249 LIST_INIT(&vm->vm_freetags);
1250 strlcpy(vm->vm_name, name, sizeof(vm->vm_name));
1251 vm->vm_quantum_mask = quantum - 1;
1252 vm->vm_quantum_shift = flsl(quantum) - 1;
1253 vm->vm_nbusytag = 0;
1254 vm->vm_size = 0;
1255 vm->vm_limit = 0;
1256 vm->vm_inuse = 0;
1257 qc_init(vm, qcache_max);
1258
1259 TAILQ_INIT(&vm->vm_seglist);
1260 vm->vm_cursor.bt_start = vm->vm_cursor.bt_size = 0;
1261 vm->vm_cursor.bt_type = BT_TYPE_CURSOR;
1262 TAILQ_INSERT_TAIL(&vm->vm_seglist, &vm->vm_cursor, bt_seglist);
1263
1264 for (i = 0; i < VMEM_MAXORDER; i++)
1265 LIST_INIT(&vm->vm_freelist[i]);
1266
1267 memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0));
1268 vm->vm_hashsize = VMEM_HASHSIZE_MIN;
1269 vm->vm_hashlist = vm->vm_hash0;
1270
1271 if (size != 0) {
1272 if (vmem_add(vm, base, size, flags) != 0) {
1273 vmem_destroy1(vm);
1274 return NULL;
1275 }
1276 }
1277
1278 mtx_lock(&vmem_list_lock);
1279 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist);
1280 mtx_unlock(&vmem_list_lock);
1281
1282 return vm;
1283}
1284
1285/*
1286 * vmem_create: create an arena.
1287 */
1288vmem_t *
1289vmem_create(const char *name, vmem_addr_t base, vmem_size_t size,
1290 vmem_size_t quantum, vmem_size_t qcache_max, int flags)
1291{
1292
1293 vmem_t *vm;
1294
1295 vm = uma_zalloc(vmem_zone, flags & (M_WAITOK|M_NOWAIT));
1296 if (vm == NULL)
1297 return (NULL);
1298 if (vmem_init(vm, name, base, size, quantum, qcache_max,
1299 flags) == NULL)
1300 return (NULL);
1301 return (vm);
1302}
1303
1304void
1305vmem_destroy(vmem_t *vm)
1306{
1307
1308 mtx_lock(&vmem_list_lock);
1309 LIST_REMOVE(vm, vm_alllist);
1310 mtx_unlock(&vmem_list_lock);
1311
1312 vmem_destroy1(vm);
1313}
1314
1315vmem_size_t
1316vmem_roundup_size(vmem_t *vm, vmem_size_t size)
1317{
1318
1319 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask;
1320}
1321
1322/*
1323 * vmem_alloc: allocate resource from the arena.
1324 */
1325int
1326vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
1327{
1328 const int strat __unused = flags & VMEM_FITMASK;
1329 qcache_t *qc;
1330
1331 flags &= VMEM_FLAGS;
1332 MPASS(size > 0);
1333 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
1334 if ((flags & M_NOWAIT) == 0)
1335 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc");
1336
1337 if (size <= vm->vm_qcache_max) {
1338 /*
1339 * Resource 0 cannot be cached, so avoid a blocking allocation
1340 * in qc_import() and give the vmem_xalloc() call below a chance
1341 * to return 0.
1342 */
1343 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1344 *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache,
1345 (flags & ~M_WAITOK) | M_NOWAIT);
1346 if (__predict_true(*addrp != 0))
1347 return (0);
1348 }
1349
1350 return (vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
1351 flags, addrp));
1352}
1353
1354int
1355vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align,
1356 const vmem_size_t phase, const vmem_size_t nocross,
1357 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags,
1358 vmem_addr_t *addrp)
1359{
1360 const vmem_size_t size = vmem_roundup_size(vm, size0);
1361 struct vmem_freelist *list;
1362 struct vmem_freelist *first;
1363 struct vmem_freelist *end;
1364 bt_t *bt;
1365 int error;
1366 int strat;
1367
1368 flags &= VMEM_FLAGS;
1369 strat = flags & VMEM_FITMASK;
1370 MPASS(size0 > 0);
1371 MPASS(size > 0);
1372 MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT || strat == M_NEXTFIT);
1373 MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK));
1374 if ((flags & M_NOWAIT) == 0)
1375 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc");
1376 MPASS((align & vm->vm_quantum_mask) == 0);
1377 MPASS((align & (align - 1)) == 0);
1378 MPASS((phase & vm->vm_quantum_mask) == 0);
1379 MPASS((nocross & vm->vm_quantum_mask) == 0);
1380 MPASS((nocross & (nocross - 1)) == 0);
1381 MPASS((align == 0 && phase == 0) || phase < align);
1382 MPASS(nocross == 0 || nocross >= size);
1383 MPASS(minaddr <= maxaddr);
1384 MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross));
1385 if (strat == M_NEXTFIT)
1386 MPASS(minaddr == VMEM_ADDR_MIN && maxaddr == VMEM_ADDR_MAX);
1387
1388 if (align == 0)
1389 align = vm->vm_quantum_mask + 1;
1390 *addrp = 0;
1391
1392 /*
1393 * Next-fit allocations don't use the freelists.
1394 */
1395 if (strat == M_NEXTFIT)
1396 return (vmem_xalloc_nextfit(vm, size0, align, phase, nocross,
1397 flags, addrp));
1398
1399 end = &vm->vm_freelist[VMEM_MAXORDER];
1400 /*
1401 * choose a free block from which we allocate.
1402 */
1403 first = bt_freehead_toalloc(vm, size, strat);
1404 VMEM_LOCK(vm);
1405
1406 /*
1407 * Make sure we have enough tags to complete the operation.
1408 */
1409 error = bt_fill(vm, flags);
1410 if (error != 0)
1411 goto out;
1412 for (;;) {
1413 /*
1414 * Scan freelists looking for a tag that satisfies the
1415 * allocation. If we're doing BESTFIT we may encounter
1416 * sizes below the request. If we're doing FIRSTFIT we
1417 * inspect only the first element from each list.
1418 */
1419 for (list = first; list < end; list++) {
1420 LIST_FOREACH(bt, list, bt_freelist) {
1421 if (bt->bt_size >= size) {
1422 error = vmem_fit(bt, size, align, phase,
1423 nocross, minaddr, maxaddr, addrp);
1424 if (error == 0) {
1425 vmem_clip(vm, bt, *addrp, size);
1426 goto out;
1427 }
1428 }
1429 /* FIRST skips to the next list. */
1430 if (strat == M_FIRSTFIT)
1431 break;
1432 }
1433 }
1434
1435 /*
1436 * Retry if the fast algorithm failed.
1437 */
1438 if (strat == M_FIRSTFIT) {
1439 strat = M_BESTFIT;
1440 first = bt_freehead_toalloc(vm, size, strat);
1441 continue;
1442 }
1443
1444 /*
1445 * Try a few measures to bring additional resources into the
1446 * arena. If all else fails, we will sleep waiting for
1447 * resources to be freed.
1448 */
1449 if (!vmem_try_fetch(vm, size, align, flags)) {
1450 error = ENOMEM;
1451 break;
1452 }
1453 }
1454out:
1455 VMEM_UNLOCK(vm);
1456 if (error != 0 && (flags & M_NOWAIT) == 0)
1457 panic("failed to allocate waiting allocation\n");
1458
1459 return (error);
1460}
1461
1462/*
1463 * vmem_free: free the resource to the arena.
1464 */
1465void
1466vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
1467{
1468 qcache_t *qc;
1469 MPASS(size > 0);
1470
1471 if (size <= vm->vm_qcache_max &&
1472 __predict_true(addr >= VMEM_ADDR_QCACHE_MIN)) {
1473 qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift];
1474 uma_zfree(qc->qc_cache, (void *)addr);
1475 } else
1476 vmem_xfree(vm, addr, size);
1477}
1478
1479void
1480vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size __unused)
1481{
1482 bt_t *bt;
1483 bt_t *t;
1484
1485 MPASS(size > 0);
1486
1487 VMEM_LOCK(vm);
1488 bt = bt_lookupbusy(vm, addr);
1489 MPASS(bt != NULL);
1490 MPASS(bt->bt_start == addr);
1491 MPASS(bt->bt_size == vmem_roundup_size(vm, size) ||
1492 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask);
1493 MPASS(bt->bt_type == BT_TYPE_BUSY);
1494 bt_rembusy(vm, bt);
1495 bt->bt_type = BT_TYPE_FREE;
1496
1497 /* coalesce */
1498 t = TAILQ_NEXT(bt, bt_seglist);
1499 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1500 MPASS(BT_END(bt) < t->bt_start); /* YYY */
1501 bt->bt_size += t->bt_size;
1502 bt_remfree(vm, t);
1503 bt_remseg(vm, t);
1504 }
1505 t = TAILQ_PREV(bt, vmem_seglist, bt_seglist);
1506 if (t != NULL && t->bt_type == BT_TYPE_FREE) {
1507 MPASS(BT_END(t) < bt->bt_start); /* YYY */
1508 bt->bt_size += t->bt_size;
1509 bt->bt_start = t->bt_start;
1510 bt_remfree(vm, t);
1511 bt_remseg(vm, t);
1512 }
1513
1514 if (!vmem_try_release(vm, bt, false)) {
1515 bt_insfree(vm, bt);
1518 }
1519}
1520
1521/*
1522 * vmem_add:
1523 *
1524 */
1525int
1526vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
1527{
1528 int error;
1529
1530 flags &= VMEM_FLAGS;
1531
1532 VMEM_LOCK(vm);
1533 error = bt_fill(vm, flags);
1534 if (error == 0)
1536 VMEM_UNLOCK(vm);
1537
1538 return (error);
1539}
1540
1541/*
1542 * vmem_size: information about arenas size
1543 */
1544vmem_size_t
1545vmem_size(vmem_t *vm, int typemask)
1546{
1547 int i;
1548
1549 switch (typemask) {
1550 case VMEM_ALLOC:
1551 return vm->vm_inuse;
1552 case VMEM_FREE:
1553 return vm->vm_size - vm->vm_inuse;
1554 case VMEM_FREE|VMEM_ALLOC:
1555 return vm->vm_size;
1556 case VMEM_MAXFREE:
1557 VMEM_LOCK(vm);
1558 for (i = VMEM_MAXORDER - 1; i >= 0; i--) {
1559 if (LIST_EMPTY(&vm->vm_freelist[i]))
1560 continue;
1561 VMEM_UNLOCK(vm);
1562 return ((vmem_size_t)ORDER2SIZE(i) <<
1563 vm->vm_quantum_shift);
1564 }
1565 VMEM_UNLOCK(vm);
1566 return (0);
1567 default:
1568 panic("vmem_size");
1569 }
1570}
1571
1572/* ---- debug */
1573
1574#if defined(DDB) || defined(DIAGNOSTIC)
1575
1576static void bt_dump(const bt_t *, int (*)(const char *, ...)
1577 __printflike(1, 2));
1578
1579static const char *
1580bt_type_string(int type)
1581{
1582
1583 switch (type) {
1584 case BT_TYPE_BUSY:
1585 return "busy";
1586 case BT_TYPE_FREE:
1587 return "free";
1588 case BT_TYPE_SPAN:
1589 return "span";
1591 return "static span";
1592 case BT_TYPE_CURSOR:
1593 return "cursor";
1594 default:
1595 break;
1596 }
1597 return "BOGUS";
1598}
1599
1600static void
1601bt_dump(const bt_t *bt, int (*pr)(const char *, ...))
1602{
1603
1604 (*pr)("\t%p: %jx %jx, %d(%s)\n",
1605 bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size,
1606 bt->bt_type, bt_type_string(bt->bt_type));
1607}
1608
1609static void
1610vmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2))
1611{
1612 const bt_t *bt;
1613 int i;
1614
1615 (*pr)("vmem %p '%s'\n", vm, vm->vm_name);
1616 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1617 bt_dump(bt, pr);
1618 }
1619
1620 for (i = 0; i < VMEM_MAXORDER; i++) {
1621 const struct vmem_freelist *fl = &vm->vm_freelist[i];
1622
1623 if (LIST_EMPTY(fl)) {
1624 continue;
1625 }
1626
1627 (*pr)("freelist[%d]\n", i);
1628 LIST_FOREACH(bt, fl, bt_freelist) {
1629 bt_dump(bt, pr);
1630 }
1631 }
1632}
1633
1634#endif /* defined(DDB) || defined(DIAGNOSTIC) */
1635
1636#if defined(DDB)
1637#include <ddb/ddb.h>
1638
1639static bt_t *
1640vmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr)
1641{
1642 bt_t *bt;
1643
1644 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1645 if (BT_ISSPAN_P(bt)) {
1646 continue;
1647 }
1648 if (bt->bt_start <= addr && addr <= BT_END(bt)) {
1649 return bt;
1650 }
1651 }
1652
1653 return NULL;
1654}
1655
1656void
1657vmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...))
1658{
1659 vmem_t *vm;
1660
1661 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1662 bt_t *bt;
1663
1664 bt = vmem_whatis_lookup(vm, addr);
1665 if (bt == NULL) {
1666 continue;
1667 }
1668 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n",
1669 (void *)addr, (void *)bt->bt_start,
1670 (vmem_size_t)(addr - bt->bt_start), vm->vm_name,
1671 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free");
1672 }
1673}
1674
1675void
1676vmem_printall(const char *modif, int (*pr)(const char *, ...))
1677{
1678 const vmem_t *vm;
1679
1680 LIST_FOREACH(vm, &vmem_list, vm_alllist) {
1681 vmem_dump(vm, pr);
1682 }
1683}
1684
1685void
1686vmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...))
1687{
1688 const vmem_t *vm = (const void *)addr;
1689
1690 vmem_dump(vm, pr);
1691}
1692
1693DB_SHOW_COMMAND(vmemdump, vmemdump)
1694{
1695
1696 if (!have_addr) {
1697 db_printf("usage: show vmemdump <addr>\n");
1698 return;
1699 }
1700
1701 vmem_dump((const vmem_t *)addr, db_printf);
1702}
1703
1704DB_SHOW_ALL_COMMAND(vmemdump, vmemdumpall)
1705{
1706 const vmem_t *vm;
1707
1708 LIST_FOREACH(vm, &vmem_list, vm_alllist)
1709 vmem_dump(vm, db_printf);
1710}
1711
1712DB_SHOW_COMMAND(vmem, vmem_summ)
1713{
1714 const vmem_t *vm = (const void *)addr;
1715 const bt_t *bt;
1716 size_t ft[VMEM_MAXORDER], ut[VMEM_MAXORDER];
1717 size_t fs[VMEM_MAXORDER], us[VMEM_MAXORDER];
1718 int ord;
1719
1720 if (!have_addr) {
1721 db_printf("usage: show vmem <addr>\n");
1722 return;
1723 }
1724
1725 db_printf("vmem %p '%s'\n", vm, vm->vm_name);
1726 db_printf("\tquantum:\t%zu\n", vm->vm_quantum_mask + 1);
1727 db_printf("\tsize:\t%zu\n", vm->vm_size);
1728 db_printf("\tinuse:\t%zu\n", vm->vm_inuse);
1729 db_printf("\tfree:\t%zu\n", vm->vm_size - vm->vm_inuse);
1730 db_printf("\tbusy tags:\t%d\n", vm->vm_nbusytag);
1731 db_printf("\tfree tags:\t%d\n", vm->vm_nfreetags);
1732
1733 memset(&ft, 0, sizeof(ft));
1734 memset(&ut, 0, sizeof(ut));
1735 memset(&fs, 0, sizeof(fs));
1736 memset(&us, 0, sizeof(us));
1737 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1738 ord = SIZE2ORDER(bt->bt_size >> vm->vm_quantum_shift);
1739 if (bt->bt_type == BT_TYPE_BUSY) {
1740 ut[ord]++;
1741 us[ord] += bt->bt_size;
1742 } else if (bt->bt_type == BT_TYPE_FREE) {
1743 ft[ord]++;
1744 fs[ord] += bt->bt_size;
1745 }
1746 }
1747 db_printf("\t\t\tinuse\tsize\t\tfree\tsize\n");
1748 for (ord = 0; ord < VMEM_MAXORDER; ord++) {
1749 if (ut[ord] == 0 && ft[ord] == 0)
1750 continue;
1751 db_printf("\t%-15zu %zu\t%-15zu %zu\t%-16zu\n",
1752 ORDER2SIZE(ord) << vm->vm_quantum_shift,
1753 ut[ord], us[ord], ft[ord], fs[ord]);
1754 }
1755}
1756
1757DB_SHOW_ALL_COMMAND(vmem, vmem_summall)
1758{
1759 const vmem_t *vm;
1760
1761 LIST_FOREACH(vm, &vmem_list, vm_alllist)
1762 vmem_summ((db_expr_t)vm, TRUE, count, modif);
1763}
1764#endif /* defined(DDB) */
1765
1766#define vmem_printf printf
1767
1768#if defined(DIAGNOSTIC)
1769
1770static bool
1771vmem_check_sanity(vmem_t *vm)
1772{
1773 const bt_t *bt, *bt2;
1774
1775 MPASS(vm != NULL);
1776
1777 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1778 if (bt->bt_start > BT_END(bt)) {
1779 printf("corrupted tag\n");
1780 bt_dump(bt, vmem_printf);
1781 return false;
1782 }
1783 }
1784 TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) {
1785 if (bt->bt_type == BT_TYPE_CURSOR) {
1786 if (bt->bt_start != 0 || bt->bt_size != 0) {
1787 printf("corrupted cursor\n");
1788 return false;
1789 }
1790 continue;
1791 }
1792 TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) {
1793 if (bt == bt2) {
1794 continue;
1795 }
1796 if (bt2->bt_type == BT_TYPE_CURSOR) {
1797 continue;
1798 }
1799 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) {
1800 continue;
1801 }
1802 if (bt->bt_start <= BT_END(bt2) &&
1803 bt2->bt_start <= BT_END(bt)) {
1804 printf("overwrapped tags\n");
1805 bt_dump(bt, vmem_printf);
1806 bt_dump(bt2, vmem_printf);
1807 return false;
1808 }
1809 }
1810 }
1811
1812 return true;
1813}
1814
1815static void
1816vmem_check(vmem_t *vm)
1817{
1818
1819 if (!vmem_check_sanity(vm)) {
1820 panic("insanity vmem %p", vm);
1821 }
1822}
1823
1824#endif /* defined(DIAGNOSTIC) */
int * count
Definition: cpufreq_if.m:63
device_property_type_t type
Definition: bus_if.m:941
const char * name
Definition: kern_fail.c:145
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN, &__elfN(aslr_enabled), 0, ": enable address map randomization")
static struct bt_table bt
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
Definition: kern_malloc.c:632
void free(void *addr, struct malloc_type *mtp)
Definition: kern_malloc.c:907
static struct pollrec pr[POLL_LIST_LEN]
Definition: kern_poll.c:261
static uint32_t phase
Definition: kern_poll.c:239
void panic(const char *fmt,...)
void callout_init(struct callout *c, int mpsafe)
void *** start
Definition: linker_if.m:98
struct iommu_domain ** domain
Definition: msi_if.m:96
uint64_t * addr
Definition: msi_if.m:89
vmem_size_t qc_size
Definition: subr_vmem.c:113
uma_zone_t qc_cache
Definition: subr_vmem.c:111
char qc_name[QC_NAME_MAX]
Definition: subr_vmem.c:114
vmem_t * qc_vmem
Definition: subr_vmem.c:112
vmem_addr_t bt_start
Definition: subr_vmem.c:130
int bt_type
Definition: subr_vmem.c:132
vmem_size_t bt_size
Definition: subr_vmem.c:131
TAILQ_ENTRY(vmem_btag)
Definition: subr_vmem.c:123
struct cv vm_cv
Definition: subr_vmem.c:138
char vm_name[VMEM_NAME_MAX+1]
Definition: subr_vmem.c:139
struct mtx_padalign vm_lock
Definition: subr_vmem.c:137
int hz
Definition: subr_param.c:85
int printf(const char *fmt,...)
Definition: subr_prf.c:397
int snprintf(char *str, size_t size, const char *format,...)
Definition: subr_prf.c:550
int mp_ncpus
Definition: subr_smp.c:72
uint16_t flags
Definition: subr_stats.c:2
int taskqueue_enqueue(struct taskqueue *queue, struct task *task)
static int vmem_try_fetch(vmem_t *vm, const vmem_size_t size, vmem_size_t align, int flags)
Definition: subr_vmem.c:1038
static int qc_import(void *arg, void **store, int cnt, int domain, int flags)
Definition: subr_vmem.c:550
static void qc_release(void *arg, void **store, int cnt)
Definition: subr_vmem.c:572
#define VMEM_UNLOCK(vm)
static struct callout vmem_periodic_ch
Definition: subr_vmem.c:190
#define VMEM_LOCK_DESTROY(vm)
static void bt_free(vmem_t *vm, bt_t *bt)
Definition: subr_vmem.c:361
static void bt_save(vmem_t *vm)
Definition: subr_vmem.c:375
int vmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp)
Definition: subr_vmem.c:1326
void vmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn)
Definition: subr_vmem.c:1224
LIST_HEAD(vmem_freelist, vmem_btag)
#define VMEM_CROSS_P(addr1, addr2, boundary)
#define VMEM_CONDVAR_WAIT(vm)
void vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size)
Definition: subr_vmem.c:1466
static void bt_insfree(vmem_t *vm, bt_t *bt)
Definition: subr_vmem.c:533
void vmem_destroy(vmem_t *vm)
Definition: subr_vmem.c:1305
int vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, const vmem_size_t phase, const vmem_size_t nocross, const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags, vmem_addr_t *addrp)
Definition: subr_vmem.c:1355
static void qc_destroy(vmem_t *vm)
Definition: subr_vmem.c:608
static void bt_insseg_tail(vmem_t *vm, bt_t *bt)
Definition: subr_vmem.c:517
#define bt_freelist
Definition: subr_vmem.c:129
static int bt_fill(vmem_t *vm, int flags)
Definition: subr_vmem.c:311
#define BT_ISSPAN_P(bt)
Definition: subr_vmem.c:179
#define BT_TYPE_CURSOR
Definition: subr_vmem.c:178
void vmem_set_limit(vmem_t *vm, vmem_size_t limit)
Definition: subr_vmem.c:1215
#define VMEM_CONDVAR_INIT(vm, wchan)
static void * vmem_bt_alloc(uma_zone_t zone, vm_size_t bytes, int domain, uint8_t *pflag, int wait)
Definition: subr_vmem.c:660
static struct vmem_freelist * bt_freehead_tofree(vmem_t *vm, vmem_size_t size)
Definition: subr_vmem.c:402
static int vmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp)
Definition: subr_vmem.c:946
#define SIZE2ORDER(size)
#define BT_TYPE_SPAN_STATIC
Definition: subr_vmem.c:175
#define VMEM_LOCK(vm)
#define QC_NAME_MAX
Definition: subr_vmem.c:97
#define BT_TYPE_SPAN
Definition: subr_vmem.c:174
#define VMEM_NAME_MAX
Definition: subr_vmem.c:119
static struct vmem_hashlist * bt_hashhead(vmem_t *vm, vmem_addr_t addr)
Definition: subr_vmem.c:445
void vmem_startup(void)
Definition: subr_vmem.c:702
#define BT_TYPE_BUSY
Definition: subr_vmem.c:177
#define vmem_printf
Definition: subr_vmem.c:1766
static bt_t * bt_alloc(vmem_t *vm)
Definition: subr_vmem.c:322
#define VMEM_FITMASK
Definition: subr_vmem.c:90
#define VMEM_MAXORDER
Definition: subr_vmem.c:82
vmem_size_t vmem_roundup_size(vmem_t *vm, vmem_size_t size)
Definition: subr_vmem.c:1316
static void vmem_destroy1(vmem_t *vm)
Definition: subr_vmem.c:877
#define VMEM_LOCK_INIT(vm, name)
static struct mtx_padalign __exclusive_cache_line vmem_list_lock
Definition: subr_vmem.c:194
static void qc_drain(vmem_t *vm)
Definition: subr_vmem.c:619
static void bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev)
Definition: subr_vmem.c:510
static struct task vmem_periodic_wk
Definition: subr_vmem.c:192
static void bt_restore(vmem_t *vm)
Definition: subr_vmem.c:383
static void qc_init(vmem_t *vm, vmem_size_t qcache_max)
Definition: subr_vmem.c:583
#define VMEM_HASHSIZE_MAX
Definition: subr_vmem.c:86
static __noinline int _bt_fill(vmem_t *vm, int flags)
Definition: subr_vmem.c:270
#define VMEM_CONDVAR_BROADCAST(vm)
static struct vmem_freelist * bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat)
Definition: subr_vmem.c:424
static bool bt_isfree(bt_t *bt)
Definition: subr_vmem.c:259
static int vmem_import(vmem_t *vm, vmem_size_t size, vmem_size_t align, int flags)
Definition: subr_vmem.c:907
#define VMEM_ASSERT_LOCKED(vm)
static void vmem_periodic(void *unused, int pending)
Definition: subr_vmem.c:778
void vmem_set_import(vmem_t *vm, vmem_import_t *importfn, vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum)
Definition: subr_vmem.c:1201
MALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures")
vmem_t * vmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, vmem_size_t quantum, vmem_size_t qcache_max, int flags)
Definition: subr_vmem.c:1236
#define VMEM_FLAGS
Definition: subr_vmem.c:92
#define ORDER2SIZE(order)
__FBSDID("$FreeBSD$")
static int vmem_periodic_interval
Definition: subr_vmem.c:191
static void vmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size)
Definition: subr_vmem.c:996
vmem_size_t vmem_size(vmem_t *vm, int typemask)
Definition: subr_vmem.c:1545
static void bt_remfree(vmem_t *vm __unused, bt_t *bt)
Definition: subr_vmem.c:524
static void vmem_start_callout(void *unused)
Definition: subr_vmem.c:816
int vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags)
Definition: subr_vmem.c:1526
static bt_t * bt_lookupbusy(vmem_t *vm, vmem_addr_t addr)
Definition: subr_vmem.c:457
#define VMEM_HASHSIZE_MIN
Definition: subr_vmem.c:85
static void bt_insbusy(vmem_t *vm, bt_t *bt)
Definition: subr_vmem.c:485
TAILQ_HEAD(vmem_seglist, vmem_btag)
#define VMEM_CONDVAR_DESTROY(vm)
SYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL)
static void bt_remseg(vmem_t *vm, bt_t *bt)
Definition: subr_vmem.c:501
#define BT_MAXALLOC
static void bt_rembusy(vmem_t *vm, bt_t *bt)
Definition: subr_vmem.c:474
static void vmem_periodic_kick(void *dummy)
Definition: subr_vmem.c:771
static int vmem_rehash(vmem_t *vm, vmem_size_t newhashsize)
Definition: subr_vmem.c:730
static int vmem_xalloc_nextfit(vmem_t *vm, const vmem_size_t size, vmem_size_t align, const vmem_size_t phase, const vmem_size_t nocross, int flags, vmem_addr_t *addrp)
Definition: subr_vmem.c:1113
#define BT_MAXFREE
#define VMEM_ALIGNUP(addr, align)
vmem_t * vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, vmem_size_t quantum, vmem_size_t qcache_max, int flags)
Definition: subr_vmem.c:1289
static void bt_freetrim(vmem_t *vm, int freelimit)
Definition: subr_vmem.c:340
static int vmem_try_release(vmem_t *vm, struct vmem_btag *bt, const bool remfree)
Definition: subr_vmem.c:1079
#define BT_FLAGS
Definition: subr_vmem.c:95
#define bt_hashlist
Definition: subr_vmem.c:128
static void vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type)
Definition: subr_vmem.c:828
#define BT_TYPE_FREE
Definition: subr_vmem.c:176
#define BT_END(bt)
Definition: subr_vmem.c:181
void vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size __unused)
Definition: subr_vmem.c:1480
#define VMEM_QCACHE_IDX_MAX
Definition: subr_vmem.c:88
static struct mtx_padalign __exclusive_cache_line vmem_bt_lock
Definition: subr_vmem.c:631
static int dummy