58#include <sys/kernel.h>
60#include <sys/malloc.h>
63#include <sys/vmmeter.h>
68#include <sys/sysctl.h>
77#include <vm/vm_domainset.h>
78#include <vm/vm_pageout.h>
79#include <vm/vm_param.h>
80#include <vm/vm_kern.h>
81#include <vm/vm_extern.h>
83#include <vm/vm_page.h>
84#include <vm/vm_phys.h>
85#include <vm/vm_pagequeue.h>
87#include <vm/uma_int.h>
88#include <vm/uma_dbg.h>
91#include <vm/memguard.h>
94#include <vm/redzone.h>
97#if defined(INVARIANTS) && defined(__i386__)
98#include <machine/cpu.h>
104#include <sys/dtrace_bsd.h>
106bool __read_frequently dtrace_malloc_enabled;
110#if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \
111 defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
112#define MALLOC_DEBUG 1
115#if defined(KASAN) || defined(DEBUG_REDZONE)
116#define DEBUG_REDZONE_ARG_DEF , unsigned long osize
117#define DEBUG_REDZONE_ARG , osize
119#define DEBUG_REDZONE_ARG_DEF
120#define DEBUG_REDZONE_ARG
129#ifndef REALLOC_FRACTION
130#define REALLOC_FRACTION 1
145#define KMEM_ZMASK (KMEM_ZBASE - 1)
147#define KMEM_ZMAX 65536
148#define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
151#ifndef MALLOC_DEBUG_MAXZONES
152#define MALLOC_DEBUG_MAXZONES 1
174 {128,
"malloc-128", },
175 {256,
"malloc-256", },
176 {384,
"malloc-384", },
177 {512,
"malloc-512", },
178 {1024,
"malloc-1024", },
179 {2048,
"malloc-2048", },
180 {4096,
"malloc-4096", },
181 {8192,
"malloc-8192", },
182 {16384,
"malloc-16384", },
183 {32768,
"malloc-32768", },
184 {65536,
"malloc-65536", },
190 "Size of kernel memory");
194 "Maximum allocation size that malloc(9) would use UMA as backend");
198 "Minimum size of kernel memory");
202 "Maximum size of kernel memory");
206 "Scale factor for kernel memory size");
210 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
215 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
219 "Malloc information");
224 "Number of malloc zones");
228 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
238#if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
240 "Kernel malloc debugging options");
247#ifdef MALLOC_MAKE_FAILURES
248static int malloc_failure_rate;
249static int malloc_nowait_count;
250static int malloc_failure_count;
251SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
252 &malloc_failure_rate, 0,
"Every (n) mallocs with M_NOWAIT will fail");
253SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
254 &malloc_failure_count, 0,
"Number of imposed M_NOWAIT malloc failures");
287 for (i = 0; i < nitems(
kmemzones); i++) {
291 return (SYSCTL_OUT(req, &sizes,
sizeof(sizes)));
298#if MALLOC_DEBUG_MAXZONES > 1
300tunable_set_numzones(
void)
303 TUNABLE_INT_FETCH(
"debug.malloc.numzones",
312SYSINIT(
numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
314 &
numzones, 0,
"Number of malloc uma subzones");
320static u_int zone_offset = __FreeBSD_version;
321TUNABLE_INT(
"debug.malloc.zone_offset", &zone_offset);
322SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
323 &zone_offset, 0,
"Separate malloc types by examining the "
324 "Nth character in the malloc type short description.");
329 struct malloc_type_internal *mtip;
335 desc = mtp->ks_shortdesc;
336 if (desc == NULL || (len = strlen(desc)) == 0)
339 val = desc[zone_offset % len];
346 struct malloc_type_internal *mtip;
351 (
"mti_zone %u out of range %d",
353 return (mtip->mti_zone);
355#elif MALLOC_DEBUG_MAXZONES == 0
356#error "MALLOC_DEBUG_MAXZONES must be positive."
361 struct malloc_type_internal *mtip;
385 struct malloc_type_internal *mtip;
386 struct malloc_type_stats *mtsp;
390 mtsp = zpcpu_get(mtip->mti_stats);
392 mtsp->mts_memalloced += size;
393 mtsp->mts_numallocs++;
396 mtsp->mts_size |= 1 << zindx;
399 if (__predict_false(dtrace_malloc_enabled)) {
400 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
402 (dtrace_malloc_probe)(probe_id,
403 (uintptr_t) mtp, (uintptr_t) mtip,
404 (uintptr_t) mtsp, size, zindx);
428 struct malloc_type_internal *mtip;
429 struct malloc_type_stats *mtsp;
433 mtsp = zpcpu_get(mtip->mti_stats);
434 mtsp->mts_memfreed += size;
435 mtsp->mts_numfrees++;
438 if (__predict_false(dtrace_malloc_enabled)) {
439 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
441 (dtrace_malloc_probe)(probe_id,
442 (uintptr_t) mtp, (uintptr_t) mtip,
443 (uintptr_t) mtsp, size, 0);
460 vm_paddr_t low, vm_paddr_t high,
unsigned long alignment,
465 ret = (
void *)kmem_alloc_contig(size,
flags, low, high, alignment,
466 boundary, VM_MEMATTR_DEFAULT);
474 struct domainset *ds,
int flags, vm_paddr_t low, vm_paddr_t high,
475 unsigned long alignment, vm_paddr_t boundary)
479 ret = (
void *)kmem_alloc_contig_domainset(ds, size,
flags, low, high,
480 alignment, boundary, VM_MEMATTR_DEFAULT);
497 kmem_free((vm_offset_t)
addr, size);
503malloc_dbg(caddr_t *vap,
size_t *sizep,
struct malloc_type *mtp,
509 KASSERT(mtp->ks_version == M_VERSION, (
"malloc: bad malloc type version"));
513 indx =
flags & (M_WAITOK | M_NOWAIT);
514 if (indx != M_NOWAIT && indx != M_WAITOK) {
515 static struct timeval lasterr;
516 static int curerr, once;
518 printf(
"Bad malloc flags: %x\n", indx);
525#ifdef MALLOC_MAKE_FAILURES
526 if ((
flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
527 atomic_add_int(&malloc_nowait_count, 1);
528 if ((malloc_nowait_count % malloc_failure_rate) == 0) {
529 atomic_add_int(&malloc_failure_count, 1);
531 return (EJUSTRETURN);
535 if (
flags & M_WAITOK) {
536 KASSERT(curthread->td_intr_nesting_level == 0,
537 (
"malloc(M_WAITOK) in interrupt context"));
538 if (__predict_false(!THREAD_CAN_SLEEP())) {
540 epoch_trace_list(curthread);
543 (
"malloc(M_WAITOK) with sleeping prohibited"));
546 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
547 (
"malloc: called with spinlock or critical section held"));
550 if (memguard_cmp_mtp(mtp, *sizep)) {
551 *vap = memguard_alloc(*sizep,
flags);
553 return (EJUSTRETURN);
559 *sizep = redzone_size_ntor(*sizep);
574 va = (uintptr_t)slab;
575 return ((va & 1) != 0);
583 va = (uintptr_t)slab;
587static caddr_t __noinline
588malloc_large(
size_t size,
struct malloc_type *mtp,
struct domainset *policy,
594 size = roundup(size, PAGE_SIZE);
595 kva = kmem_malloc_domainset(policy, size,
flags);
598 vsetzoneslab(kva, NULL, (
void *)((size << 1) | 1));
603 if (__predict_false(va == NULL)) {
604 KASSERT((
flags & M_WAITOK) == 0,
605 (
"malloc(M_WAITOK) returned NULL"));
608 va = redzone_setup(va, osize);
610 kasan_mark((
void *)va, osize, size, KASAN_MALLOC_REDZONE);
619 kmem_free((vm_offset_t)
addr, size);
632(
malloc)(
size_t size,
struct malloc_type *mtp,
int flags)
637#if defined(DEBUG_REDZONE) || defined(KASAN)
638 unsigned long osize = size;
641 MPASS((
flags & M_EXEC) == 0);
645 if (malloc_dbg(&va, &size, mtp,
flags) != 0)
657 va = uma_zalloc(zone,
flags);
659 size = zone->uz_size;
660 if ((
flags & M_ZERO) == 0) {
662 kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
666 if (__predict_false(va == NULL)) {
667 KASSERT((
flags & M_WAITOK) == 0,
668 (
"malloc(M_WAITOK) returned NULL"));
672 va = redzone_setup(va, osize);
676 kasan_mark((
void *)va, osize, size, KASAN_MALLOC_REDZONE);
678 return ((
void *) va);
692 (
"malloc_domain: Called with bad flag / size combination."));
699 *sizep = zone->uz_size;
708 struct vm_domainset_iter di;
712#if defined(KASAN) || defined(DEBUG_REDZONE)
713 unsigned long osize = size;
716 MPASS((
flags & M_EXEC) == 0);
720 if (malloc_dbg(&va, &size, mtp,
flags) != 0)
728 vm_domainset_iter_policy_init(&di, ds, &
domain, &
flags);
731 }
while (va == NULL && vm_domainset_iter_policy(&di, &
domain) == 0);
733 if (__predict_false(va == NULL)) {
734 KASSERT((
flags & M_WAITOK) == 0,
735 (
"malloc(M_WAITOK) returned NULL"));
739 va = redzone_setup(va, osize);
743 kasan_mark((
void *)va, osize, size, KASAN_MALLOC_REDZONE);
746 if ((
flags & M_ZERO) == 0) {
748 kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
768#if defined(DEBUG_REDZONE) || defined(KASAN)
769 unsigned long osize = size;
779 if (malloc_dbg(&va, &size, mtp,
flags) != 0)
795 struct malloc_type *mtp,
struct domainset *ds,
int flags)
800 KASSERT(powerof2(align),
801 (
"malloc_domainset_aligned: wrong align %#zx size %#zx",
803 KASSERT(align <= PAGE_SIZE,
804 (
"malloc_domainset_aligned: align %#zx (size %#zx) too large",
817 asize = size <= align ? align : 1UL << flsl(size - 1);
820 KASSERT(
res == NULL || ((uintptr_t)
res & (align - 1)) == 0,
821 (
"malloc_domainset_aligned: result not aligned %p size %#zx "
822 "allocsize %#zx align %#zx",
res, size, asize, align));
830 if (WOULD_OVERFLOW(nmemb, size))
831 panic(
"mallocarray: %zu * %zu overflowed", nmemb, size);
838 struct domainset *ds,
int flags)
841 if (WOULD_OVERFLOW(nmemb, size))
842 panic(
"mallocarray_domainset: %zu * %zu overflowed", nmemb, size);
847#if defined(INVARIANTS) && !defined(KASAN)
849free_save_type(
void *
addr,
struct malloc_type *mtp, u_long size)
851 struct malloc_type **mtpp =
addr;
861 mtpp = (
struct malloc_type **) ((
unsigned long)mtpp & ~UMA_ALIGN_PTR);
862 mtpp += (size -
sizeof(
struct malloc_type *)) /
863 sizeof(
struct malloc_type *);
870free_dbg(
void **addrp,
struct malloc_type *mtp)
875 KASSERT(mtp->ks_version == M_VERSION, (
"free: bad malloc type version"));
876 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
877 (
"free: called with spinlock or critical section held"));
881 return (EJUSTRETURN);
884 if (is_memguard_addr(
addr)) {
886 return (EJUSTRETURN);
892 *addrp = redzone_addr_ntor(
addr);
914 if (free_dbg(&
addr, mtp) != 0)
921 vtozoneslab((vm_offset_t)
addr & (~UMA_SLAB_MASK), &zone, &slab);
923 panic(
"free: address %p(%p) has not been allocated.\n",
924 addr, (
void *)((u_long)
addr & (~UMA_SLAB_MASK)));
927 size = zone->uz_size;
928#if defined(INVARIANTS) && !defined(KASAN)
929 free_save_type(
addr, mtp, size);
931 uma_zfree_arg(zone,
addr, slab);
954 if (free_dbg(&
addr, mtp) != 0)
961 vtozoneslab((vm_offset_t)
addr & (~UMA_SLAB_MASK), &zone, &slab);
963 panic(
"free: address %p(%p) has not been allocated.\n",
964 addr, (
void *)((u_long)
addr & (~UMA_SLAB_MASK)));
967 size = zone->uz_size;
968#if defined(INVARIANTS) && !defined(KASAN)
969 free_save_type(
addr, mtp, size);
972 explicit_bzero(
addr, size);
973 uma_zfree_arg(zone,
addr, slab);
977 explicit_bzero(
addr, size);
994 KASSERT(mtp->ks_version == M_VERSION,
995 (
"realloc: bad malloc type version"));
996 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
997 (
"realloc: called with spinlock or critical section held"));
1008#ifdef DEBUG_MEMGUARD
1009 if (is_memguard_addr(
addr))
1010 return (memguard_realloc(
addr, size, mtp,
flags));
1016 alloc = redzone_get_size(
addr);
1018 vtozoneslab((vm_offset_t)
addr & (~UMA_SLAB_MASK), &zone, &slab);
1021 KASSERT(slab != NULL,
1022 (
"realloc: address %p out of range", (
void *)
addr));
1026 alloc = zone->uz_size;
1031 if (size <= alloc &&
1047 bcopy(
addr, newaddr, min(size, alloc));
1088#ifndef DEBUG_REDZONE
1097#ifdef DEBUG_MEMGUARD
1098 if (is_memguard_addr(__DECONST(
void *,
addr)))
1099 return (memguard_get_req_size(
addr));
1103 size = redzone_get_size(__DECONST(
void *,
addr));
1105 vtozoneslab((vm_offset_t)
addr & (~UMA_SLAB_MASK), &zone, &slab);
1107 panic(
"malloc_usable_size: address %p(%p) is not allocated.\n",
1108 addr, (
void *)((u_long)
addr & (~UMA_SLAB_MASK)));
1111 size = zone->uz_size;
1140#ifdef VM_KMEM_SIZE_MIN
1144#ifdef VM_KMEM_SIZE_MAX
1164 mem_size = vm_cnt.v_page_count;
1165 if (mem_size <= 32768)
1185 panic(
"Tune VM_KMEM_SIZE_* for the platform");
1205 (KASAN_SHADOW_SCALE + 1);
1210#ifdef DEBUG_MEMGUARD
1217#ifdef DEBUG_MEMGUARD
1223 memguard_init(kernel_arena);
1237 mtx_init(&
malloc_mtx,
"malloc", NULL, MTX_DEF);
1241 if (kmem_zmax < PAGE_SIZE || kmem_zmax >
KMEM_ZMAX)
1244 for (i = 0, indx = 0;
kmemzones[indx].kz_size != 0; indx++) {
1250 align = UMA_ALIGN_PTR;
1251 if (powerof2(size) && size >
sizeof(
void *))
1252 align = MIN(size, PAGE_SIZE) - 1;
1253 for (subzone = 0; subzone <
numzones; subzone++) {
1255 uma_zcreate(
name, size,
1256#
if defined(INVARIANTS) && !defined(KASAN) && !defined(KMSAN)
1257 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
1259 NULL, NULL, NULL, NULL,
1261 align, UMA_ZONE_MALLOC);
1272 struct malloc_type_internal *mtip;
1273 struct malloc_type *mtp;
1275 KASSERT(vm_cnt.v_page_count != 0, (
"malloc_register before vm_init"));
1278 if (mtp->ks_version != M_VERSION)
1279 panic(
"malloc_init: type %s with unsupported version %lu",
1280 mtp->ks_shortdesc, mtp->ks_version);
1282 mtip = &mtp->ks_mti;
1283 mtip->mti_stats = uma_zalloc_pcpu(
pcpu_zone_64, M_WAITOK | M_ZERO);
1296 struct malloc_type_internal *mtip;
1297 struct malloc_type_stats *mtsp;
1298 struct malloc_type *mtp, *temp;
1299 long temp_allocs, temp_bytes;
1303 KASSERT(mtp->ks_version == M_VERSION,
1304 (
"malloc_uninit: bad malloc type version"));
1307 mtip = &mtp->ks_mti;
1310 temp = temp->ks_next) {
1311 if (temp->ks_next == mtp) {
1312 temp->ks_next = mtp->ks_next;
1317 (
"malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
1326 temp_allocs = temp_bytes = 0;
1328 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1329 temp_allocs += mtsp->mts_numallocs;
1330 temp_allocs -= mtsp->mts_numfrees;
1331 temp_bytes += mtsp->mts_memalloced;
1332 temp_bytes -= mtsp->mts_memfreed;
1334 if (temp_allocs > 0 || temp_bytes > 0) {
1335 printf(
"Warning: memory type %s leaked memory on destroy "
1336 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
1337 temp_allocs, temp_bytes);
1346 struct malloc_type *mtp;
1350 if (strcmp(mtp->ks_shortdesc, desc) == 0)
1359 struct malloc_type_stream_header mtsh;
1360 struct malloc_type_internal *mtip;
1361 struct malloc_type_stats *mtsp, zeromts;
1362 struct malloc_type_header mth;
1363 struct malloc_type *mtp;
1374 bzero(&zeromts,
sizeof(zeromts));
1379 bzero(&mtsh,
sizeof(mtsh));
1380 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1381 mtsh.mtsh_maxcpus = MAXCPU;
1383 (void)
sbuf_bcat(&sbuf, &mtsh,
sizeof(mtsh));
1389 mtip = &mtp->ks_mti;
1394 bzero(&mth,
sizeof(mth));
1395 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
1396 (void)
sbuf_bcat(&sbuf, &mth,
sizeof(mth));
1402 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1403 (void)
sbuf_bcat(&sbuf, mtsp,
sizeof(*mtsp));
1408 for (; i < MAXCPU; i++) {
1409 (void)
sbuf_bcat(&sbuf, &zeromts,
sizeof(zeromts));
1419 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0,
1421 "Return malloc types");
1424 "Count of kernel malloc types");
1429 struct malloc_type *mtp, **bufmtp;
1439 buflen =
sizeof(
struct malloc_type *) *
count;
1440 bufmtp =
malloc(buflen, M_TEMP, M_WAITOK);
1445 free(bufmtp, M_TEMP);
1449 for (mtp =
kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
1454 for (i = 0; i <
count; i++)
1455 (func)(bufmtp[i], arg);
1457 free(bufmtp, M_TEMP);
1462get_malloc_stats(
const struct malloc_type_internal *mtip, uint64_t *allocs,
1465 const struct malloc_type_stats *mtsp;
1466 uint64_t frees, alloced, freed;
1474 mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1476 *allocs += mtsp->mts_numallocs;
1477 frees += mtsp->mts_numfrees;
1478 alloced += mtsp->mts_memalloced;
1479 freed += mtsp->mts_memfreed;
1481 *inuse = *allocs - frees;
1482 return (alloced - freed);
1485DB_SHOW_COMMAND(
malloc, db_show_malloc)
1487 const char *fmt_hdr, *fmt_entry;
1488 struct malloc_type *mtp;
1489 uint64_t allocs, inuse;
1492 struct malloc_type *last_mtype, *cur_mtype;
1493 int64_t cur_size, last_size;
1496 if (modif[0] ==
'i') {
1497 fmt_hdr =
"%s,%s,%s,%s\n";
1498 fmt_entry =
"\"%s\",%ju,%jdK,%ju\n";
1500 fmt_hdr =
"%18s %12s %12s %12s\n";
1501 fmt_entry =
"%18s %12ju %12jdK %12ju\n";
1504 db_printf(fmt_hdr,
"Type",
"InUse",
"MemUse",
"Requests");
1508 last_size = INT64_MAX;
1522 if (mtp == last_mtype) {
1526 size = get_malloc_stats(&mtp->ks_mti, &allocs,
1528 if (size > cur_size && size < last_size + ties) {
1533 if (cur_mtype == NULL)
1536 size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
1537 db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
1538 howmany(size, 1024), allocs);
1543 last_mtype = cur_mtype;
1544 last_size = cur_size;
1548#if MALLOC_DEBUG_MAXZONES > 1
1549DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1551 struct malloc_type_internal *mtip;
1552 struct malloc_type *mtp;
1556 db_printf(
"Usage: show multizone_matches <malloc type/addr>\n");
1560 if (mtp->ks_version != M_VERSION) {
1561 db_printf(
"Version %lx does not match expected %x\n",
1562 mtp->ks_version, M_VERSION);
1566 mtip = &mtp->ks_mti;
1567 subzone = mtip->mti_zone;
1570 mtip = &mtp->ks_mti;
1571 if (mtip->mti_zone != subzone)
1573 db_printf(
"%s\n", mtp->ks_shortdesc);
device_property_type_t type
TUNABLE_INT("kern.eventtimer.periodic", &want_periodic)
static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
static void free_large(void *addr, size_t size)
static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)
#define DEBUG_REDZONE_ARG
static bool malloc_large_slab(uma_slab_t slab)
void *() malloc(size_t size, struct malloc_type *mtp, int flags)
static u_long vm_kmem_size_min
void * contigmalloc(unsigned long size, struct malloc_type *type, int flags, vm_paddr_t low, vm_paddr_t high, unsigned long alignment, vm_paddr_t boundary)
static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD|CTLFLAG_MPSAFE, 0, "Malloc information")
#define DEBUG_REDZONE_ARG_DEF
static uint8_t kmemsize[KMEM_ZSIZE+1]
SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0, "Size of kernel memory")
void * malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds, int flags)
MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches")
void contigfree(void *addr, unsigned long size, struct malloc_type *type)
uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES]
struct malloc_type * malloc_desc2type(const char *desc)
void * contigmalloc_domainset(unsigned long size, struct malloc_type *type, struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high, unsigned long alignment, vm_paddr_t boundary)
void malloc_type_list(malloc_type_list_func_t *func, void *arg)
void * reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
static struct malloc_type * kmemstatistics
static u_int mtp_get_subzone(struct malloc_type *mtp)
static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
void * malloc_domainset_aligned(size_t size, size_t align, struct malloc_type *mtp, struct domainset *ds, int flags)
void zfree(void *addr, struct malloc_type *mtp)
size_t malloc_usable_size(const void *addr)
static u_long vm_kmem_size_max
static void mallocinit(void *dummy)
void * malloc_exec(size_t size, struct malloc_type *mtp, int flags)
void * malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds, int flags)
void * mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
size_t malloc_size(size_t size)
static void malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, int zindx)
void malloc_uninit(void *data)
#define MALLOC_DEBUG_MAXZONES
static size_t malloc_large_size(uma_slab_t slab)
static u_int vm_malloc_zone_count
SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL)
static void mtp_set_subzone(struct malloc_type *mtp)
SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size, CTLFLAG_RD|CTLTYPE_ULONG|CTLFLAG_MPSAFE, NULL, 0, sysctl_kmem_map_size, "LU", "Current kmem allocation size")
static void * malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain, int flags)
void * realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
void malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
static u_int vm_kmem_size_scale
static caddr_t __noinline malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy, int flags DEBUG_REDZONE_ARG_DEF)
void malloc_init(void *data)
void * mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type, struct domainset *ds, int flags)
void * malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags)
void malloc_type_freed(struct malloc_type *mtp, unsigned long size)
CTASSERT(VM_KMEM_SIZE_SCALE >=1)
void free(void *addr, struct malloc_type *mtp)
SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0, "Scale factor for kernel memory size")
SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, "Count of kernel malloc types")
void panic(const char *fmt,...)
int sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
int sysctl_handle_long(SYSCTL_HANDLER_ARGS)
struct sbuf * sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length, struct sysctl_req *req)
int ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps)
struct iommu_domain ** domain
static bool kasan_enabled __read_mostly
void kasan_mark(const void *addr, size_t size, size_t redzsize, uint8_t code)
void kmsan_orig(const void *addr, size_t size, int type, uintptr_t pc)
void kmsan_mark(const void *addr, size_t size, uint8_t c)
int printf(const char *fmt,...)
void sbuf_clear_flags(struct sbuf *s, int flags)
int sbuf_finish(struct sbuf *s)
void sbuf_delete(struct sbuf *s)
int sbuf_bcat(struct sbuf *s, const void *buf, size_t len)