39#include <sys/kernel.h>
41#include <sys/malloc.h>
44#include <sys/resource.h>
45#include <sys/rwlock.h>
47#include <sys/vmmeter.h>
59#include <sys/sysctl.h>
62 .v_swtch = EARLY_COUNTER,
63 .v_trap = EARLY_COUNTER,
64 .v_syscall = EARLY_COUNTER,
65 .v_intr = EARLY_COUNTER,
66 .v_soft = EARLY_COUNTER,
67 .v_vm_faults = EARLY_COUNTER,
68 .v_io_faults = EARLY_COUNTER,
69 .v_cow_faults = EARLY_COUNTER,
70 .v_cow_optim = EARLY_COUNTER,
71 .v_zfod = EARLY_COUNTER,
72 .v_ozfod = EARLY_COUNTER,
73 .v_swapin = EARLY_COUNTER,
74 .v_swapout = EARLY_COUNTER,
75 .v_swappgsin = EARLY_COUNTER,
76 .v_swappgsout = EARLY_COUNTER,
77 .v_vnodein = EARLY_COUNTER,
78 .v_vnodeout = EARLY_COUNTER,
79 .v_vnodepgsin = EARLY_COUNTER,
80 .v_vnodepgsout = EARLY_COUNTER,
81 .v_intrans = EARLY_COUNTER,
82 .v_reactivated = EARLY_COUNTER,
83 .v_pdwakeups = EARLY_COUNTER,
84 .v_pdpages = EARLY_COUNTER,
85 .v_pdshortfalls = EARLY_COUNTER,
86 .v_dfree = EARLY_COUNTER,
87 .v_pfree = EARLY_COUNTER,
88 .v_tfree = EARLY_COUNTER,
89 .v_forks = EARLY_COUNTER,
90 .v_vforks = EARLY_COUNTER,
91 .v_rforks = EARLY_COUNTER,
92 .v_kthreads = EARLY_COUNTER,
93 .v_forkpages = EARLY_COUNTER,
94 .v_vforkpages = EARLY_COUNTER,
95 .v_rforkpages = EARLY_COUNTER,
96 .v_kthreadpages = EARLY_COUNTER,
97 .v_wire_count = EARLY_COUNTER,
105 counter_u64_t *cnt = (counter_u64_t *)&
vm_cnt;
107 COUNTER_ARRAY_ALLOC(cnt, VM_METER_NCOUNTERS, M_WAITOK);
112 CTLFLAG_RW, &
vm_cnt.v_free_min, 0,
"Minimum low-free-pages threshold");
114 CTLFLAG_RW, &
vm_cnt.v_free_target, 0,
"Desired free pages");
116 CTLFLAG_RW, &
vm_cnt.v_free_reserved, 0,
"Pages reserved for deadlock");
118 CTLFLAG_RW, &
vm_cnt.v_inactive_target, 0,
"Pages desired inactive");
120 CTLFLAG_RW, &
vm_cnt.v_pageout_free_min, 0,
"Min pages reserved for kernel");
122 CTLFLAG_RW, &
vm_cnt.v_free_severe, 0,
"Severe page depletion point");
131 if (req->flags & SCTL_MASK32) {
132 la[0] = averunnable.ldavg[0];
133 la[1] = averunnable.ldavg[1];
134 la[2] = averunnable.ldavg[2];
135 la[3] = averunnable.fscale;
136 return SYSCTL_OUT(req, la,
sizeof(la));
139 return SYSCTL_OUT(req, &averunnable,
sizeof(averunnable));
143 "Machine loadaverage history");
145#if defined(COMPAT_FREEBSD11)
168#if defined(COMPAT_FREEBSD11)
169 struct vmtotal11 total11;
175 if (req->oldptr == NULL) {
176#if defined(COMPAT_FREEBSD11)
177 if (curproc->p_osrel < P_OSREL_VMTOTAL64)
178 return (SYSCTL_OUT(req, NULL,
sizeof(total11)));
180 return (SYSCTL_OUT(req, NULL,
sizeof(total)));
182 bzero(&total,
sizeof(total));
187 sx_slock(&allproc_lock);
188 FOREACH_PROC_IN_SYSTEM(p) {
189 if ((p->p_flag & P_SYSTEM) != 0)
192 if (p->p_state != PRS_NEW) {
193 FOREACH_THREAD_IN_PROC(p, td) {
195 switch (TD_GET_STATE(td)) {
197 if (TD_IS_SWAPPED(td))
199 else if (TD_IS_SLEEPING(td)) {
200 if (td->td_priority <= PZERO)
221 sx_sunlock(&allproc_lock);
238 if (object->ref_count == 0) {
245 if (object->ref_count == 1 &&
254 total.t_vm +=
object->size;
255 total.t_rm +=
object->resident_page_count;
257 total.t_avm +=
object->size;
258 total.t_arm +=
object->resident_page_count;
260 if (object->shadow_count > 1) {
262 total.t_vmshr +=
object->size;
263 total.t_rmshr +=
object->resident_page_count;
265 total.t_avmshr +=
object->size;
266 total.t_armshr +=
object->resident_page_count;
273#if defined(COMPAT_FREEBSD11)
275 if (curproc->p_osrel < P_OSREL_VMTOTAL64 && (req->oldlen ==
276 sizeof(total11) || req->oldlen == 2 *
sizeof(total11))) {
277 bzero(&total11,
sizeof(total11));
278 total11.t_rq = total.t_rq;
279 total11.t_dw = total.t_dw;
280 total11.t_pw = total.t_pw;
281 total11.t_sl = total.t_sl;
282 total11.t_sw = total.t_sw;
283 total11.t_vm = total.t_vm;
284 total11.t_avm = total.t_avm;
285 total11.t_rm = total.t_rm;
286 total11.t_arm = total.t_arm;
287 total11.t_vmshr = total.t_vmshr;
288 total11.t_avmshr = total.t_avmshr;
289 total11.t_rmshr = total.t_rmshr;
290 total11.t_armshr = total.t_armshr;
291 total11.t_free = total.t_free;
292 return (SYSCTL_OUT(req, &total11,
sizeof(total11)));
295 return (SYSCTL_OUT(req, &total,
sizeof(total)));
299 CTLFLAG_MPSAFE, NULL, 0,
vmtotal,
"S,vmtotal",
300 "System virtual memory statistics");
303static SYSCTL_NODE(_vm_stats, OID_AUTO, sys, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
304 "VM meter sys stats");
305static SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
306 "VM meter vm stats");
307SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
308 "VM meter misc stats");
314#ifdef COMPAT_FREEBSD11
318 val = counter_u64_fetch(*(counter_u64_t *)arg1);
319#ifdef COMPAT_FREEBSD11
320 if (req->oldlen ==
sizeof(val32)) {
322 return (SYSCTL_OUT(req, &val32,
sizeof(val32)));
325 return (SYSCTL_OUT(req, &val,
sizeof(val)));
328#define VM_STATS(parent, var, descr) \
329 SYSCTL_OID(parent, OID_AUTO, var, CTLTYPE_U64 | CTLFLAG_MPSAFE | \
330 CTLFLAG_RD, &vm_cnt.var, 0, sysctl_handle_vmstat, "QU", descr)
331#define VM_STATS_VM(var, descr) VM_STATS(_vm_stats_vm, var, descr)
332#define VM_STATS_SYS(var, descr) VM_STATS(_vm_stats_sys, var, descr)
367VM_STATS_VM(v_kthreadpages,
"VM pages affected by fork() by kernel");
377 return (SYSCTL_OUT(req, &val,
sizeof(val)));
380#define VM_STATS_PROC(var, descr, fn) \
381 SYSCTL_OID(_vm_stats_vm, OID_AUTO, var, CTLTYPE_U32 | CTLFLAG_MPSAFE | \
382 CTLFLAG_RD, fn, 0, sysctl_handle_vmstat_proc, "IU", descr)
384#define VM_STATS_UINT(var, descr) \
385 SYSCTL_UINT(_vm_stats_vm, OID_AUTO, var, CTLFLAG_RD, &vm_cnt.var, 0, descr)
386#define VM_STATS_ULONG(var, descr) \
387 SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, var, CTLFLAG_RD, &vm_cnt.var, 0, descr)
408#ifdef COMPAT_FREEBSD11
413SYSCTL_UINT(_vm_stats_vm, OID_AUTO, v_cache_count, CTLFLAG_RD,
414 SYSCTL_NULL_UINT_PTR, 0,
"Dummy for compatibility");
415SYSCTL_UINT(_vm_stats_vm, OID_AUTO, v_tcached, CTLFLAG_RD,
416 SYSCTL_NULL_UINT_PTR, 0,
"Dummy for compatibility");
427 v +=
vm_dom[i].vmd_free_count;
473 ret = counter_u64_fetch(
vm_cnt.v_pdpages);
479 return (SYSCTL_OUT(req, &ret,
sizeof(ret)));
483 "Pages analyzed by pagedaemon");
488 struct sysctl_oid *oid;
490 vmd->
vmd_oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(parent), OID_AUTO,
491 vmd->
vmd_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"");
492 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(vmd->
vmd_oid), OID_AUTO,
493 "stats", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"");
494 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
495 "free_count", CTLFLAG_RD, &vmd->vmd_free_count, 0,
497 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
500 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
501 "actpdpgs", CTLFLAG_RD,
503 "Active pages scanned by the page daemon");
504 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
507 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
508 "inactpdpgs", CTLFLAG_RD,
510 "Inactive pages scanned by the page daemon");
511 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
514 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
515 "laundpdpgs", CTLFLAG_RD,
517 "Laundry pages scanned by the page daemon");
518 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
"unswappable",
520 "Unswappable pages");
521 SYSCTL_ADD_U64(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
522 "unswppdpgs", CTLFLAG_RD,
524 "Unswappable pages scanned by the page daemon");
525 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
527 "Target inactive pages");
528 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
530 "Target free pages");
531 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
533 "Reserved free pages");
534 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
536 "Minimum free pages");
537 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
539 "Severe free pages");
540 SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(oid), OID_AUTO,
542 "inactive pages freed/second");
549 struct sysctl_oid *oid;
552 oid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_vm), OID_AUTO,
553 "domain", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"");
u_int vmd_inactive_target
char vmd_name[sizeof(__XSTRING(MAXMEMDOM))]
struct sysctl_oid * vmd_oid
struct vm_pagequeue vmd_pagequeues[PQ_COUNT]
u_int vm_wait_count(void)
static int sysctl_handle_vmstat(SYSCTL_HANDLER_ARGS)
static int sysctl_handle_vmstat_proc(SYSCTL_HANDLER_ARGS)
#define VM_STATS_VM(var, descr)
SYSCTL_NODE(_vm, OID_AUTO, stats, CTLFLAG_RW|CTLFLAG_MPSAFE, 0, "VM meter stats")
static int sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS)
struct vmmeter __read_mostly vm_cnt
SYSINIT(counter, SI_SUB_KMEM, SI_ORDER_FIRST, vmcounter_startup, NULL)
#define VM_STATS_SYS(var, descr)
u_long __exclusive_cache_line vm_user_wire_count
u_int vm_active_count(void)
SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min, CTLFLAG_RW, &vm_cnt.v_free_min, 0, "Minimum low-free-pages threshold")
static int sysctl_vm_pdpages(SYSCTL_HANDLER_ARGS)
u_int vm_laundry_count(void)
static void vm_domain_stats_init(struct vm_domain *vmd, struct sysctl_oid *parent)
u_int vm_free_count(void)
static int vmtotal(SYSCTL_HANDLER_ARGS)
u_int vm_inactive_count(void)
static u_int vm_pagequeue_count(int pq)
static void vm_stats_init(void *arg __unused)
SYSCTL_PROC(_vm, VM_LOADAVG, loadavg, CTLTYPE_STRUCT|CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_loadavg, "S,loadavg", "Machine loadaverage history")
SYSCTL_ULONG(_vm_stats_vm, OID_AUTO, v_user_wire_count, CTLFLAG_RD, &vm_user_wire_count, 0, "User-wired virtual memory")
#define VM_STATS_PROC(var, descr, fn)
#define VM_STATS_UINT(var, descr)
static void vmcounter_startup(void)
struct mtx vm_object_list_mtx
bool vm_object_is_active(vm_object_t obj)
struct object_q vm_object_list
struct vm_domain vm_dom[MAXMEMDOM]
struct vm_pagequeue vmd_pagequeues[PQ_COUNT]
#define VM_V_PAGEOUT_FREE_MIN
#define VM_V_INACTIVE_TARGET
#define VM_V_FREE_RESERVED