77#ifndef VM_BATCHQUEUE_SIZE
78#define VM_BATCHQUEUE_SIZE 7
87#include <sys/_blockcount.h>
88#include <sys/pidctrl.h>
253 uint8_t
vmd_pad[CACHE_LINE_SIZE - (
sizeof(u_int) * 2)];
301#define VM_DOMAIN(n) (&vm_dom[(n)])
302#define VM_DOMAIN_EMPTY(n) (vm_dom[(n)].vmd_page_count == 0)
304#define vm_pagequeue_assert_locked(pq) mtx_assert(&(pq)->pq_mutex, MA_OWNED)
305#define vm_pagequeue_lock(pq) mtx_lock(&(pq)->pq_mutex)
306#define vm_pagequeue_lockptr(pq) (&(pq)->pq_mutex)
307#define vm_pagequeue_trylock(pq) mtx_trylock(&(pq)->pq_mutex)
308#define vm_pagequeue_unlock(pq) mtx_unlock(&(pq)->pq_mutex)
310#define vm_domain_free_assert_locked(n) \
311 mtx_assert(vm_domain_free_lockptr((n)), MA_OWNED)
312#define vm_domain_free_assert_unlocked(n) \
313 mtx_assert(vm_domain_free_lockptr((n)), MA_NOTOWNED)
314#define vm_domain_free_lock(d) \
315 mtx_lock(vm_domain_free_lockptr((d)))
316#define vm_domain_free_lockptr(d) \
318#define vm_domain_free_trylock(d) \
319 mtx_trylock(vm_domain_free_lockptr((d)))
320#define vm_domain_free_unlock(d) \
321 mtx_unlock(vm_domain_free_lockptr((d)))
323#define vm_domain_pageout_lockptr(d) \
324 (&(d)->vmd_pageout_mtx)
325#define vm_domain_pageout_assert_locked(n) \
326 mtx_assert(vm_domain_pageout_lockptr((n)), MA_OWNED)
327#define vm_domain_pageout_assert_unlocked(n) \
328 mtx_assert(vm_domain_pageout_lockptr((n)), MA_NOTOWNED)
329#define vm_domain_pageout_lock(d) \
330 mtx_lock(vm_domain_pageout_lockptr((d)))
331#define vm_domain_pageout_unlock(d) \
332 mtx_unlock(vm_domain_pageout_lockptr((d)))
341#define vm_pagequeue_cnt_inc(pq) vm_pagequeue_cnt_add((pq), 1)
342#define vm_pagequeue_cnt_dec(pq) vm_pagequeue_cnt_add((pq), -1)
348 TAILQ_REMOVE(&pq->
pq_pl, m, plinks.q);
370static inline vm_page_t
454 old = atomic_fetchadd_int(&vmd->vmd_free_count, adj);
vm_page_t bq_pa[VM_BATCHQUEUE_SIZE]
volatile u_int vmd_inactive_freed
u_int vmd_inactive_target
blockcount_t vmd_inactive_starting
struct vmem * vmd_kernel_rwx_arena
struct vm_domain::vm_pgcache vmd_pgcache[VM_NFREEPOOL]
u_int __aligned(CACHE_LINE_SIZE) vmd_free_count
u_int vmd_clean_pages_freed
struct vm_page vmd_clock[2]
u_int vmd_inactive_threads
struct vm_page vmd_markers[PQ_COUNT]
struct mtx_padalign vmd_free_mtx
int vmd_pageout_pages_needed
volatile u_int vmd_addl_shortage
u_int vmd_pageout_free_min
char vmd_name[sizeof(__XSTRING(MAXMEMDOM))]
struct sysctl_oid * vmd_oid
u_int vmd_background_launder_target
blockcount_t vmd_inactive_running
volatile u_int vmd_inactive_us
struct vmem * vmd_kernel_arena
u_int vmd_inactive_shortage
struct vm_page vmd_inacthead
u_int vmd_pageout_wakeup_thresh
u_int vmd_pageout_deficit
enum vm_domain::@14 vmd_laundry_request
u_int vmd_interrupt_free_min
struct vm_pagequeue vmd_pagequeues[PQ_COUNT]
uint8_t vmd_pad[CACHE_LINE_SIZE -(sizeof(u_int) *2)]
struct mtx_padalign vmd_pageout_mtx
const char *const pq_name
static int vm_page_domain(vm_page_t m)
void pagedaemon_wakeup(int domain)
static int vm_paging_needed(struct vm_domain *vmd, u_int free_count)
static bool vm_batchqueue_insert(struct vm_batchqueue *bq, vm_page_t m)
#define vm_pagequeue_assert_locked(pq)
static struct vm_domain * vm_pagequeue_domain(vm_page_t m)
void vm_domain_set(struct vm_domain *vmd)
static void vm_domain_freecnt_inc(struct vm_domain *vmd, int adj)
static int vm_paging_severe(struct vm_domain *vmd)
u_int vmd_pageout_wakeup_thresh
#define vm_pagequeue_cnt_dec(pq)
struct vm_pagequeue __aligned(CACHE_LINE_SIZE)
static int vm_laundry_target(struct vm_domain *vmd)
static __inline void vm_pagequeue_cnt_add(struct vm_pagequeue *pq, int addend)
static void vm_batchqueue_init(struct vm_batchqueue *bq)
u_int vmd_pageout_free_min
void vm_domain_clear(struct vm_domain *vmd)
static int vm_paging_min(struct vm_domain *vmd)
static vm_page_t vm_batchqueue_pop(struct vm_batchqueue *bq)
int vm_domain_allocate(struct vm_domain *vmd, int req, int npages)
struct vm_domain vm_dom[MAXMEMDOM]
static void vm_pagequeue_remove(struct vm_pagequeue *pq, vm_page_t m)
#define VM_BATCHQUEUE_SIZE
static int vm_paging_target(struct vm_domain *vmd)