203#define VM_PAGE_BITS_ALL 0xffu
204typedef uint8_t vm_page_bits_t;
205#elif PAGE_SIZE == 8192
206#define VM_PAGE_BITS_ALL 0xffffu
207typedef uint16_t vm_page_bits_t;
208#elif PAGE_SIZE == 16384
209#define VM_PAGE_BITS_ALL 0xffffffffu
210typedef uint32_t vm_page_bits_t;
211#elif PAGE_SIZE == 32768
212#define VM_PAGE_BITS_ALL 0xfffffffffffffffflu
213typedef uint64_t vm_page_bits_t;
275#define VPRC_BLOCKED 0x40000000u
276#define VPRC_OBJREF 0x80000000u
277#define VPRC_WIRE_COUNT(c) ((c) & ~(VPRC_BLOCKED | VPRC_OBJREF))
278#define VPRC_WIRE_COUNT_MAX (~(VPRC_BLOCKED | VPRC_OBJREF))
294#define VPO_KMEM_EXEC 0x01
295#define VPO_SWAPSLEEP 0x02
296#define VPO_UNMANAGED 0x04
297#define VPO_SWAPINPROG 0x08
306#define VPB_BIT_SHARED 0x01
307#define VPB_BIT_EXCLUSIVE 0x02
308#define VPB_BIT_WAITERS 0x04
309#define VPB_BIT_FLAGMASK \
310 (VPB_BIT_SHARED | VPB_BIT_EXCLUSIVE | VPB_BIT_WAITERS)
312#define VPB_SHARERS_SHIFT 3
313#define VPB_SHARERS(x) \
314 (((x) & ~VPB_BIT_FLAGMASK) >> VPB_SHARERS_SHIFT)
315#define VPB_SHARERS_WORD(x) ((x) << VPB_SHARERS_SHIFT | VPB_BIT_SHARED)
316#define VPB_ONE_SHARER (1 << VPB_SHARERS_SHIFT)
318#define VPB_SINGLE_EXCLUSIVE VPB_BIT_EXCLUSIVE
320#define VPB_CURTHREAD_EXCLUSIVE \
321 (VPB_BIT_EXCLUSIVE | ((u_int)(uintptr_t)curthread & ~VPB_BIT_FLAGMASK))
323#define VPB_CURTHREAD_EXCLUSIVE VPB_SINGLE_EXCLUSIVE
326#define VPB_UNBUSIED VPB_SHARERS_WORD(0)
329#define VPB_FREED (0xffffffff - VPB_BIT_SHARED)
335#define PQ_UNSWAPPABLE 3
338#ifndef VM_PAGE_HAVE_PGLIST
340#define VM_PAGE_HAVE_PGLIST
348extern struct mtx_padalign
pa_lock[];
351#define PDRSHIFT PDR_SHIFT
352#elif !defined(PDRSHIFT)
356#define pa_index(pa) ((pa) >> PDRSHIFT)
357#define PA_LOCKPTR(pa) ((struct mtx *)(&pa_lock[pa_index(pa) % PA_LOCK_COUNT]))
358#define PA_LOCKOBJPTR(pa) ((struct lock_object *)PA_LOCKPTR((pa)))
359#define PA_LOCK(pa) mtx_lock(PA_LOCKPTR(pa))
360#define PA_TRYLOCK(pa) mtx_trylock(PA_LOCKPTR(pa))
361#define PA_UNLOCK(pa) mtx_unlock(PA_LOCKPTR(pa))
362#define PA_UNLOCK_COND(pa) \
370#define PA_LOCK_ASSERT(pa, a) mtx_assert(PA_LOCKPTR(pa), (a))
372#if defined(KLD_MODULE) && !defined(KLD_TIED)
373#define vm_page_lock(m) vm_page_lock_KBI((m), LOCK_FILE, LOCK_LINE)
374#define vm_page_unlock(m) vm_page_unlock_KBI((m), LOCK_FILE, LOCK_LINE)
375#define vm_page_trylock(m) vm_page_trylock_KBI((m), LOCK_FILE, LOCK_LINE)
377#define vm_page_lockptr(m) (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
378#define vm_page_lock(m) mtx_lock(vm_page_lockptr((m)))
379#define vm_page_unlock(m) mtx_unlock(vm_page_lockptr((m)))
380#define vm_page_trylock(m) mtx_trylock(vm_page_lockptr((m)))
382#if defined(INVARIANTS)
383#define vm_page_assert_locked(m) \
384 vm_page_assert_locked_KBI((m), __FILE__, __LINE__)
385#define vm_page_lock_assert(m, a) \
386 vm_page_lock_assert_KBI((m), (a), __FILE__, __LINE__)
388#define vm_page_assert_locked(m)
389#define vm_page_lock_assert(m, a)
437#define PGA_WRITEABLE 0x0001
438#define PGA_REFERENCED 0x0002
439#define PGA_EXECUTABLE 0x0004
440#define PGA_ENQUEUED 0x0008
441#define PGA_DEQUEUE 0x0010
442#define PGA_REQUEUE 0x0020
443#define PGA_REQUEUE_HEAD 0x0040
444#define PGA_NOSYNC 0x0080
445#define PGA_SWAP_FREE 0x0100
446#define PGA_SWAP_SPACE 0x0200
448#define PGA_QUEUE_OP_MASK (PGA_DEQUEUE | PGA_REQUEUE | PGA_REQUEUE_HEAD)
449#define PGA_QUEUE_STATE_MASK (PGA_ENQUEUED | PGA_QUEUE_OP_MASK)
459#define PG_PCPU_CACHE 0x01
460#define PG_FICTITIOUS 0x02
462#define PG_MARKER 0x08
463#define PG_NODUMP 0x10
475#include <sys/kassert.h>
476#include <machine/atomic.h>
506#define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)
535#define VM_ALLOC_NORMAL 0
536#define VM_ALLOC_INTERRUPT 1
537#define VM_ALLOC_SYSTEM 2
538#define VM_ALLOC_CLASS_MASK 3
539#define VM_ALLOC_WAITOK 0x0008
540#define VM_ALLOC_WAITFAIL 0x0010
541#define VM_ALLOC_WIRED 0x0020
542#define VM_ALLOC_ZERO 0x0040
543#define VM_ALLOC_NORECLAIM 0x0080
544#define VM_ALLOC_AVAIL0 0x0100
545#define VM_ALLOC_NOBUSY 0x0200
546#define VM_ALLOC_NOCREAT 0x0400
547#define VM_ALLOC_AVAIL1 0x0800
548#define VM_ALLOC_IGN_SBUSY 0x1000
549#define VM_ALLOC_NODUMP 0x2000
550#define VM_ALLOC_SBUSY 0x4000
551#define VM_ALLOC_NOWAIT 0x8000
552#define VM_ALLOC_COUNT_MAX 0xffff
553#define VM_ALLOC_COUNT_SHIFT 16
554#define VM_ALLOC_COUNT_MASK (VM_ALLOC_COUNT(VM_ALLOC_COUNT_MAX))
555#define VM_ALLOC_COUNT(count) ({ \
556 KASSERT((count) <= VM_ALLOC_COUNT_MAX, \
557 ("%s: invalid VM_ALLOC_COUNT value", __func__)); \
558 (count) << VM_ALLOC_COUNT_SHIFT; \
563malloc2vm_flags(
int malloc_flags)
567 KASSERT((malloc_flags & M_USE_RESERVE) == 0 ||
568 (malloc_flags & M_NOWAIT) != 0,
569 (
"M_USE_RESERVE requires M_NOWAIT"));
572 if ((malloc_flags & M_ZERO) != 0)
574 if ((malloc_flags & M_NODUMP) != 0)
576 if ((malloc_flags & M_NOWAIT))
578 if ((malloc_flags & M_WAITOK))
580 if ((malloc_flags & M_NORECLAIM))
594#define PS_ALL_DIRTY 0x1
595#define PS_ALL_VALID 0x2
596#define PS_NONE_BUSY 0x4
603 vm_pindex_t pindex,
const char *wmesg,
int allocflags);
615 u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
616 vm_paddr_t boundary, vm_memattr_t memattr);
618 vm_pindex_t pindex,
int domain,
int req, u_long npages, vm_paddr_t low,
619 vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
620 vm_memattr_t memattr);
626 vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
627 vm_memattr_t memattr);
629 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary,
630 vm_memattr_t memattr);
631void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set);
636 vm_page_t *ma,
int count);
638 int allocflags, vm_page_t *ma,
int count);
642 vm_pindex_t pindex,
int allocflags);
668 vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
670 vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);
672#define VPR_TRYFREE 0x01
673#define VPR_NOREUSE 0x02
681 vm_pindex_t pindex, vm_page_t mold);
684 vm_page_t m_end, u_long alignment, vm_paddr_t boundary,
int options);
716#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
717void vm_page_assert_locked_KBI(vm_page_t m,
const char *file,
int line);
718void vm_page_lock_assert_KBI(vm_page_t m,
int a,
const char *file,
int line);
721#define vm_page_busy_fetch(m) atomic_load_int(&(m)->busy_lock)
723#define vm_page_assert_busied(m) \
724 KASSERT(vm_page_busied(m), \
725 ("vm_page_assert_busied: page %p not busy @ %s:%d", \
726 (m), __FILE__, __LINE__))
728#define vm_page_assert_sbusied(m) \
729 KASSERT(vm_page_sbusied(m), \
730 ("vm_page_assert_sbusied: page %p not shared busy @ %s:%d", \
731 (m), __FILE__, __LINE__))
733#define vm_page_assert_unbusied(m) \
734 KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) != \
735 VPB_CURTHREAD_EXCLUSIVE, \
736 ("vm_page_assert_xbusied: page %p busy_lock %#x owned" \
738 (m), (m)->busy_lock, __FILE__, __LINE__)); \
740#define vm_page_assert_xbusied_unchecked(m) do { \
741 KASSERT(vm_page_xbusied(m), \
742 ("vm_page_assert_xbusied: page %p not exclusive busy @ %s:%d", \
743 (m), __FILE__, __LINE__)); \
745#define vm_page_assert_xbusied(m) do { \
746 vm_page_assert_xbusied_unchecked(m); \
747 KASSERT((vm_page_busy_fetch(m) & ~VPB_BIT_WAITERS) == \
748 VPB_CURTHREAD_EXCLUSIVE, \
749 ("vm_page_assert_xbusied: page %p busy_lock %#x not owned" \
751 (m), (m)->busy_lock, __FILE__, __LINE__)); \
754#define vm_page_busied(m) \
755 (vm_page_busy_fetch(m) != VPB_UNBUSIED)
757#define vm_page_xbusied(m) \
758 ((vm_page_busy_fetch(m) & VPB_SINGLE_EXCLUSIVE) != 0)
760#define vm_page_busy_freed(m) \
761 (vm_page_busy_fetch(m) == VPB_FREED)
764#define vm_page_xunbusy(m) do { \
765 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
766 VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \
767 vm_page_xunbusy_hard(m); \
769#define vm_page_xunbusy_unchecked(m) do { \
770 if (!atomic_cmpset_rel_int(&(m)->busy_lock, \
771 VPB_CURTHREAD_EXCLUSIVE, VPB_UNBUSIED)) \
772 vm_page_xunbusy_hard_unchecked(m); \
776void vm_page_object_busy_assert(vm_page_t m);
777#define VM_PAGE_OBJECT_BUSY_ASSERT(m) vm_page_object_busy_assert(m)
778void vm_page_assert_pga_writeable(vm_page_t m, uint16_t bits);
779#define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) \
780 vm_page_assert_pga_writeable(m, bits)
786#define vm_page_xbusy_claim(m) do { \
789 vm_page_assert_xbusied_unchecked((m)); \
791 _busy_lock = vm_page_busy_fetch(m); \
792 } while (!atomic_cmpset_int(&(m)->busy_lock, _busy_lock, \
793 (_busy_lock & VPB_BIT_FLAGMASK) | VPB_CURTHREAD_EXCLUSIVE)); \
796#define VM_PAGE_OBJECT_BUSY_ASSERT(m) (void)0
797#define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits) (void)0
798#define vm_page_xbusy_claim(m)
801#if BYTE_ORDER == BIG_ENDIAN
802#define VM_PAGE_AFLAG_SHIFT 16
804#define VM_PAGE_AFLAG_SHIFT 0
815 a.
_bits = atomic_load_32(&m->a._bits);
827 (
"%s: invalid head requeue request for page %p", __func__, m));
829 (
"%s: setting PGA_ENQUEUED with PQ_NONE in page %p", __func__, m));
830 KASSERT(
new._bits != old->
_bits,
831 (
"%s: bits are unchanged", __func__));
833 return (atomic_fcmpset_32(&m->a._bits, &old->
_bits,
new._bits) != 0);
849 addr = (
void *)&m->a;
851 atomic_clear_32(addr, val);
869 addr = (
void *)&m->a;
871 atomic_set_32(addr, val);
889#if (defined(KLD_MODULE) && !defined(KLD_TIED)) || defined(INVARIANTS)
892 m->dirty = VM_PAGE_BITS_ALL;
967 atomic_thread_fence_rel();
968 old = atomic_fetchadd_int(&m->ref_count, -val);
970 (
"vm_page_drop: page %p has an invalid refcount value", m));
993 return (m->valid == VM_PAGE_BITS_ALL);
1000 return (m->valid == 0);
1010 KASSERT(segind <
vm_phys_nsegs, (
"segind %d m %p", segind, m));
1012 KASSERT(domn >= 0 && domn <
vm_ndomains, (
"domain %d m %p", domn, m));
struct vm_phys_seg vm_phys_segs[]
struct vm_page::@10::@11 s
struct vm_page::@10::@13 uma
union vm_page::@10 plinks
TAILQ_ENTRY(vm_page) listq
struct vm_page::@10::@12 memguard
void vm_page_xunbusy_hard(vm_page_t m)
int vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
static void vm_page_aflag_set(vm_page_t m, uint16_t bits)
vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t)
int vm_page_insert(vm_page_t, vm_object_t, vm_pindex_t)
vm_page_t vm_page_alloc(vm_object_t, vm_pindex_t, int)
vm_page_t vm_page_alloc_noobj_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
static bool vm_page_inactive(vm_page_t m)
int vm_page_grab_pages(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count)
vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
static bool vm_page_in_laundry(vm_page_t m)
vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa)
static bool vm_page_all_valid(vm_page_t m)
struct mtx_padalign pa_lock[]
void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
int vm_page_grab_pages_unlocked(vm_object_t object, vm_pindex_t pindex, int allocflags, vm_page_t *ma, int count)
void vm_page_init_marker(vm_page_t marker, int queue, uint16_t aflags)
void vm_page_set_validclean(vm_page_t, int, int)
void vm_page_advise(vm_page_t m, int advice)
bool vm_page_remove(vm_page_t)
void vm_page_xunbusy_hard_unchecked(vm_page_t m)
void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
vm_page_t vm_page_alloc_domain(vm_object_t, vm_pindex_t, int, int)
void vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
void vm_page_reference(vm_page_t m)
bool vm_page_blacklist_add(vm_paddr_t pa, bool verbose)
static uint8_t _vm_page_queue(vm_page_astate_t as)
vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
SLIST_HEAD(spglist, vm_page)
void vm_page_launder(vm_page_t m)
void vm_page_unhold_pages(vm_page_t *ma, int count)
void vm_page_release_locked(vm_page_t m, int flags)
int vm_page_grab_valid_unlocked(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags)
#define VM_ALLOC_INTERRUPT
void vm_page_wire(vm_page_t)
vm_page_t vm_page_alloc_noobj_domain(int, int)
vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t)
void vm_page_test_dirty(vm_page_t)
bool vm_page_pqstate_commit(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
void vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex, vm_page_t mold)
bool vm_page_busy_acquire(vm_page_t m, int allocflags)
void vm_page_unswappable(vm_page_t m)
vm_page_t vm_page_alloc_freelist_domain(int, int, int)
#define VM_PAGE_OBJECT_BUSY_ASSERT(m)
void vm_page_bits_set(vm_page_t m, vm_page_bits_t *bits, vm_page_bits_t set)
vm_page_t vm_page_grab(vm_object_t, vm_pindex_t, int)
void vm_page_putfake(vm_page_t m)
vm_page_t vm_page_alloc_noobj(int)
vm_page_t vm_page_prev(vm_page_t m)
vm_page_t vm_page_alloc_noobj_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
int vm_page_tryxbusy(vm_page_t m)
static bool vm_page_none_valid(vm_page_t m)
static bool vm_page_astate_fcmpset(vm_page_t m, vm_page_astate_t *old, vm_page_astate_t new)
void vm_page_activate(vm_page_t)
static __inline void vm_page_undirty(vm_page_t m)
vm_page_t vm_page_relookup(vm_object_t, vm_pindex_t)
vm_page_t vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end, u_long alignment, vm_paddr_t boundary, int options)
static u_int vm_page_drop(vm_page_t m, u_int val)
void vm_page_free_invalid(vm_page_t)
vm_page_t vm_page_alloc_domain_after(vm_object_t, vm_pindex_t, int, int, vm_page_t)
void vm_page_readahead_finish(vm_page_t m)
void vm_page_busy_sleep_unlocked(vm_object_t obj, vm_page_t m, vm_pindex_t pindex, const char *wmesg, int allocflags)
#define VM_ALLOC_NORECLAIM
#define VM_PAGE_AFLAG_SHIFT
static uint8_t vm_page_queue(vm_page_t m)
void vm_page_free_zero(vm_page_t m)
void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue)
int vm_page_sbusied(vm_page_t m)
void vm_page_valid(vm_page_t m)
vm_page_t vm_page_next(vm_page_t m)
void vm_page_dirty_KBI(vm_page_t m)
void vm_page_pqbatch_drain(void)
int vm_page_rename(vm_page_t, vm_object_t, vm_pindex_t)
vm_page_t vm_page_grab_unlocked(vm_object_t, vm_pindex_t, int)
bool vm_page_busy_sleep(vm_page_t m, const char *msg, int allocflags)
void vm_page_set_valid_range(vm_page_t m, int base, int size)
bool vm_page_wire_mapped(vm_page_t m)
vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t)
bool vm_page_ps_test(vm_page_t m, int flags, vm_page_t skip_m)
vm_page_bits_t vm_page_set_dirty(vm_page_t m)
void vm_page_free_pages_toq(struct spglist *free, bool update_wire_count)
vm_page_t vm_page_alloc_freelist(int, int)
bool vm_page_try_remove_all(vm_page_t m)
void vm_page_lock_KBI(vm_page_t m, const char *file, int line)
int vm_page_busy_tryupgrade(vm_page_t m)
bool vm_page_remove_xbusy(vm_page_t)
vm_page_bits_t vm_page_bits(int base, int size)
int vm_page_grab_valid(vm_page_t *mp, vm_object_t object, vm_pindex_t pindex, int allocflags)
static bool vm_page_active(vm_page_t m)
union vm_page_astate vm_page_astate_t
int vm_page_is_valid(vm_page_t, int, int)
static vm_page_astate_t vm_page_astate_load(vm_page_t m)
vm_page_t vm_page_alloc_contig_domain(vm_object_t object, vm_pindex_t pindex, int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, vm_memattr_t memattr)
static int vm_page_domain(vm_page_t m)
void vm_page_set_invalid(vm_page_t, int, int)
bool vm_page_reclaim_contig_domain(int domain, int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
void vm_page_invalid(vm_page_t m)
#define VM_PAGE_ASSERT_PGA_WRITEABLE(m, bits)
bool vm_page_unwire_noq(vm_page_t m)
void vm_page_init_page(vm_page_t m, vm_paddr_t pa, int segind)
void vm_page_sunbusy(vm_page_t m)
static void vm_page_aflag_clear(vm_page_t m, uint16_t bits)
#define VPRC_WIRE_COUNT(c)
void vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
void vm_page_clear_dirty(vm_page_t, int, int)
void vm_page_busy_downgrade(vm_page_t m)
void vm_page_release(vm_page_t m, int flags)
vm_page_t vm_page_alloc_after(vm_object_t, vm_pindex_t, int, vm_page_t)
vm_offset_t vm_page_startup(vm_offset_t vaddr)
void vm_page_deactivate(vm_page_t)
void vm_page_dequeue(vm_page_t m)
bool vm_page_try_remove_write(vm_page_t m)
int vm_page_trysbusy(vm_page_t m)
void vm_page_free(vm_page_t m)
static __inline void vm_page_dirty(vm_page_t m)
static bool vm_page_wired(vm_page_t m)
TAILQ_HEAD(pglist, vm_page)
void vm_page_unwire(vm_page_t m, uint8_t queue)
void vm_page_deactivate_noreuse(vm_page_t)
bool vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
void vm_page_dequeue_deferred(vm_page_t m)