FreeBSD virtual memory subsystem code
vm_pager.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63/*
64 * Paging space routine stubs. Emulates a matchmaker-like interface
65 * for builtin pagers.
66 */
67
68#include <sys/cdefs.h>
69__FBSDID("$FreeBSD$");
70
71#include "opt_param.h"
72
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/kernel.h>
76#include <sys/vnode.h>
77#include <sys/bio.h>
78#include <sys/buf.h>
79#include <sys/ucred.h>
80#include <sys/malloc.h>
81#include <sys/rwlock.h>
82#include <sys/user.h>
83
84#include <vm/vm.h>
85#include <vm/vm_param.h>
86#include <vm/vm_kern.h>
87#include <vm/vm_object.h>
88#include <vm/vm_page.h>
89#include <vm/vm_pager.h>
90#include <vm/vm_extern.h>
91#include <vm/uma.h>
92
94static int pbuf_init(void *, int, int);
95static int pbuf_ctor(void *, int, void *, int);
96static void pbuf_dtor(void *, int, void *);
97
98static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
99static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
100 vm_ooffset_t, struct ucred *);
101static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
102static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
104static void dead_pager_getvp(vm_object_t, struct vnode **, bool *);
105
106static int
107dead_pager_getpages(vm_object_t obj, vm_page_t *ma, int count, int *rbehind,
108 int *rahead)
109{
110
111 return (VM_PAGER_FAIL);
112}
113
114static vm_object_t
115dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
116 vm_ooffset_t off, struct ucred *cred)
117{
118
119 return (NULL);
120}
121
122static void
123dead_pager_putpages(vm_object_t object, vm_page_t *m, int count,
124 int flags, int *rtvals)
125{
126 int i;
127
128 for (i = 0; i < count; i++)
129 rtvals[i] = VM_PAGER_AGAIN;
130}
131
132static int
133dead_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *prev, int *next)
134{
135
136 if (prev != NULL)
137 *prev = 0;
138 if (next != NULL)
139 *next = 0;
140 return (FALSE);
141}
142
143static void
145{
146
147}
148
149static void
150dead_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
151{
152 /*
153 * For OBJT_DEAD objects, v_writecount was handled in
154 * vnode_pager_dealloc().
155 */
156}
157
158static const struct pagerops deadpagerops = {
159 .pgo_kvme_type = KVME_TYPE_DEAD,
160 .pgo_alloc = dead_pager_alloc,
161 .pgo_dealloc = dead_pager_dealloc,
162 .pgo_getpages = dead_pager_getpages,
163 .pgo_putpages = dead_pager_putpages,
164 .pgo_haspage = dead_pager_haspage,
165 .pgo_getvp = dead_pager_getvp,
166};
167
168const struct pagerops *pagertab[16] __read_mostly = {
175 [OBJT_SG] = &sgpagerops,
177};
178static struct mtx pagertab_lock;
179
180void
182{
183 const struct pagerops **pgops;
184 int i;
185
186 mtx_init(&pagertab_lock, "dynpag", NULL, MTX_DEF);
187
188 /*
189 * Initialize known pagers
190 */
191 for (i = 0; i < OBJT_FIRST_DYN; i++) {
192 pgops = &pagertab[i];
193 if ((*pgops)->pgo_init != NULL)
194 (*(*pgops)->pgo_init)();
195 }
196}
197
198static int nswbuf_max;
199
200void
202{
203
204 /* Main zone for paging bufs. */
205 pbuf_zone = uma_zcreate("pbuf",
206 sizeof(struct buf) + PBUF_PAGES * sizeof(vm_page_t),
209 /* Few systems may still use this zone directly, so it needs a limit. */
210 nswbuf_max += uma_zone_set_max(pbuf_zone, NSWBUF_MIN);
211}
212
214pbuf_zsecond_create(const char *name, int max)
215{
216 uma_zone_t zone;
217
218 zone = uma_zsecond_create(name, pbuf_ctor, pbuf_dtor, NULL, NULL,
219 pbuf_zone);
220
221#ifdef KMSAN
222 /*
223 * Shrink the size of the pbuf pools if KMSAN is enabled, otherwise the
224 * shadows of the large KVA allocations eat up too much memory.
225 */
226 max /= 3;
227#endif
228
229 /*
230 * uma_prealloc() rounds up to items per slab. If we would prealloc
231 * immediately on every pbuf_zsecond_create(), we may accumulate too
232 * much of difference between hard limit and prealloced items, which
233 * means wasted memory.
234 */
235 if (nswbuf_max > 0)
236 nswbuf_max += uma_zone_set_max(zone, max);
237 else
239
240 return (zone);
241}
242
243static void
244pbuf_prealloc(void *arg __unused)
245{
246
248 nswbuf_max = -1;
249}
250
251SYSINIT(pbuf, SI_SUB_KTHREAD_BUF, SI_ORDER_ANY, pbuf_prealloc, NULL);
252
253/*
254 * Allocate an instance of a pager of the given type.
255 * Size, protection and offset parameters are passed in for pagers that
256 * need to perform page-level validation (e.g. the device pager).
257 */
259vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
260 vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
261{
262 MPASS(type < nitems(pagertab));
263
264 return ((*pagertab[type]->pgo_alloc)(handle, size, prot, off, cred));
265}
266
267/*
268 * The object must be locked.
269 */
270void
272{
273
275 MPASS(object->type < nitems(pagertab));
276 (*pagertab[object->type]->pgo_dealloc) (object);
277}
278
279static void
280vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
281{
282#ifdef INVARIANTS
283
284 /*
285 * All pages must be consecutive, busied, not mapped, not fully valid,
286 * not dirty and belong to the proper object. Some pages may be the
287 * bogus page, but the first and last pages must be a real ones.
288 */
289
292 KASSERT(count > 0, ("%s: 0 count", __func__));
293 for (int i = 0 ; i < count; i++) {
294 if (m[i] == bogus_page) {
295 KASSERT(i != 0 && i != count - 1,
296 ("%s: page %d is the bogus page", __func__, i));
297 continue;
298 }
300 KASSERT(!pmap_page_is_mapped(m[i]),
301 ("%s: page %p is mapped", __func__, m[i]));
302 KASSERT(m[i]->valid != VM_PAGE_BITS_ALL,
303 ("%s: request for a valid page %p", __func__, m[i]));
304 KASSERT(m[i]->dirty == 0,
305 ("%s: page %p is dirty", __func__, m[i]));
306 KASSERT(m[i]->object == object,
307 ("%s: wrong object %p/%p", __func__, object, m[i]->object));
308 KASSERT(m[i]->pindex == m[0]->pindex + i,
309 ("%s: page %p isn't consecutive", __func__, m[i]));
310 }
311#endif
312}
313
314/*
315 * Page in the pages for the object using its associated pager.
316 * The requested page must be fully valid on successful return.
317 */
318int
319vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
320 int *rahead)
321{
322#ifdef INVARIANTS
323 vm_pindex_t pindex = m[0]->pindex;
324#endif
325 int r;
326
327 MPASS(object->type < nitems(pagertab));
328 vm_pager_assert_in(object, m, count);
329
330 r = (*pagertab[object->type]->pgo_getpages)(object, m, count, rbehind,
331 rahead);
332 if (r != VM_PAGER_OK)
333 return (r);
334
335 for (int i = 0; i < count; i++) {
336 /*
337 * If pager has replaced a page, assert that it had
338 * updated the array.
339 */
340#ifdef INVARIANTS
341 KASSERT(m[i] == vm_page_relookup(object, pindex++),
342 ("%s: mismatch page %p pindex %ju", __func__,
343 m[i], (uintmax_t )pindex - 1));
344#endif
345
346 /*
347 * Zero out partially filled data.
348 */
349 if (m[i]->valid != VM_PAGE_BITS_ALL)
350 vm_page_zero_invalid(m[i], TRUE);
351 }
352 return (VM_PAGER_OK);
353}
354
355int
356vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count,
357 int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
358{
359
360 MPASS(object->type < nitems(pagertab));
361 vm_pager_assert_in(object, m, count);
362
363 return ((*pagertab[object->type]->pgo_getpages_async)(object, m,
364 count, rbehind, rahead, iodone, arg));
365}
366
367/*
368 * vm_pager_put_pages() - inline, see vm/vm_pager.h
369 * vm_pager_has_page() - inline, see vm/vm_pager.h
370 */
371
372/*
373 * Search the specified pager object list for an object with the
374 * specified handle. If an object with the specified handle is found,
375 * increase its reference count and return it. Otherwise, return NULL.
376 *
377 * The pager object list must be locked.
378 */
380vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
381{
382 vm_object_t object;
383
384 TAILQ_FOREACH(object, pg_list, pager_object_list) {
385 if (object->handle == handle) {
386 VM_OBJECT_WLOCK(object);
387 if ((object->flags & OBJ_DEAD) == 0) {
389 VM_OBJECT_WUNLOCK(object);
390 break;
391 }
392 VM_OBJECT_WUNLOCK(object);
393 }
394 }
395 return (object);
396}
397
398int
399vm_pager_alloc_dyn_type(struct pagerops *ops, int base_type)
400{
401 int res;
402
403 mtx_lock(&pagertab_lock);
404 MPASS(base_type == -1 ||
405 (base_type >= OBJT_DEFAULT && base_type < nitems(pagertab)));
406 for (res = OBJT_FIRST_DYN; res < nitems(pagertab); res++) {
407 if (pagertab[res] == NULL)
408 break;
409 }
410 if (res == nitems(pagertab)) {
411 mtx_unlock(&pagertab_lock);
412 return (-1);
413 }
414 if (base_type != -1) {
415 MPASS(pagertab[base_type] != NULL);
416#define FIX(n) \
417 if (ops->pgo_##n == NULL) \
418 ops->pgo_##n = pagertab[base_type]->pgo_##n
419 FIX(init);
420 FIX(alloc);
421 FIX(dealloc);
422 FIX(getpages);
423 FIX(getpages_async);
424 FIX(putpages);
425 FIX(haspage);
426 FIX(populate);
427 FIX(pageunswapped);
428 FIX(update_writecount);
429 FIX(release_writecount);
430 FIX(set_writeable_dirty);
431 FIX(mightbedirty);
432 FIX(getvp);
433 FIX(freespace);
434#undef FIX
435 }
436 pagertab[res] = ops; /* XXXKIB should be rel, but acq is too much */
437 mtx_unlock(&pagertab_lock);
438 return (res);
439}
440
441void
443{
444 MPASS(type >= OBJT_FIRST_DYN && type < nitems(pagertab));
445
446 mtx_lock(&pagertab_lock);
447 MPASS(pagertab[type] != NULL);
448 pagertab[type] = NULL;
449 mtx_unlock(&pagertab_lock);
450}
451
452static int
453pbuf_ctor(void *mem, int size, void *arg, int flags)
454{
455 struct buf *bp = mem;
456
457 bp->b_vp = NULL;
458 bp->b_bufobj = NULL;
459
460 /* copied from initpbuf() */
461 bp->b_rcred = NOCRED;
462 bp->b_wcred = NOCRED;
463 bp->b_qindex = 0; /* On no queue (QUEUE_NONE) */
464 bp->b_data = bp->b_kvabase;
465 bp->b_xflags = 0;
466 bp->b_flags = B_MAXPHYS;
467 bp->b_ioflags = 0;
468 bp->b_iodone = NULL;
469 bp->b_error = 0;
470 BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL);
471
472 return (0);
473}
474
475static void
476pbuf_dtor(void *mem, int size, void *arg)
477{
478 struct buf *bp = mem;
479
480 if (bp->b_rcred != NOCRED) {
481 crfree(bp->b_rcred);
482 bp->b_rcred = NOCRED;
483 }
484 if (bp->b_wcred != NOCRED) {
485 crfree(bp->b_wcred);
486 bp->b_wcred = NOCRED;
487 }
488
489 BUF_UNLOCK(bp);
490}
491
492static const char pbuf_wmesg[] = "pbufwait";
493
494static int
495pbuf_init(void *mem, int size, int flags)
496{
497 struct buf *bp = mem;
498
499 bp->b_kvabase = (void *)kva_alloc(ptoa(PBUF_PAGES));
500 if (bp->b_kvabase == NULL)
501 return (ENOMEM);
502 bp->b_kvasize = ptoa(PBUF_PAGES);
503 BUF_LOCKINIT(bp, pbuf_wmesg);
504 LIST_INIT(&bp->b_dep);
505 bp->b_rcred = bp->b_wcred = NOCRED;
506 bp->b_xflags = 0;
507
508 return (0);
509}
510
511/*
512 * Associate a p-buffer with a vnode.
513 *
514 * Also sets B_PAGING flag to indicate that vnode is not fully associated
515 * with the buffer. i.e. the bp has not been linked into the vnode or
516 * ref-counted.
517 */
518void
519pbgetvp(struct vnode *vp, struct buf *bp)
520{
521
522 KASSERT(bp->b_vp == NULL, ("pbgetvp: not free"));
523 KASSERT(bp->b_bufobj == NULL, ("pbgetvp: not free (bufobj)"));
524
525 bp->b_vp = vp;
526 bp->b_flags |= B_PAGING;
527 bp->b_bufobj = &vp->v_bufobj;
528}
529
530/*
531 * Associate a p-buffer with a vnode.
532 *
533 * Also sets B_PAGING flag to indicate that vnode is not fully associated
534 * with the buffer. i.e. the bp has not been linked into the vnode or
535 * ref-counted.
536 */
537void
538pbgetbo(struct bufobj *bo, struct buf *bp)
539{
540
541 KASSERT(bp->b_vp == NULL, ("pbgetbo: not free (vnode)"));
542 KASSERT(bp->b_bufobj == NULL, ("pbgetbo: not free (bufobj)"));
543
544 bp->b_flags |= B_PAGING;
545 bp->b_bufobj = bo;
546}
547
548/*
549 * Disassociate a p-buffer from a vnode.
550 */
551void
552pbrelvp(struct buf *bp)
553{
554
555 KASSERT(bp->b_vp != NULL, ("pbrelvp: NULL"));
556 KASSERT(bp->b_bufobj != NULL, ("pbrelvp: NULL bufobj"));
557 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
558 ("pbrelvp: pager buf on vnode list."));
559
560 bp->b_vp = NULL;
561 bp->b_bufobj = NULL;
562 bp->b_flags &= ~B_PAGING;
563}
564
565/*
566 * Disassociate a p-buffer from a bufobj.
567 */
568void
569pbrelbo(struct buf *bp)
570{
571
572 KASSERT(bp->b_vp == NULL, ("pbrelbo: vnode"));
573 KASSERT(bp->b_bufobj != NULL, ("pbrelbo: NULL bufobj"));
574 KASSERT((bp->b_xflags & (BX_VNDIRTY | BX_VNCLEAN)) == 0,
575 ("pbrelbo: pager buf on vnode list."));
576
577 bp->b_bufobj = NULL;
578 bp->b_flags &= ~B_PAGING;
579}
580
581void
583{
585
586 MPASS(object->type < nitems(pagertab));
587
588 method = pagertab[object->type]->pgo_set_writeable_dirty;
589 if (method != NULL)
590 method(object);
591}
592
593bool
595{
596 pgo_mightbedirty_t *method;
597
598 MPASS(object->type < nitems(pagertab));
599
600 method = pagertab[object->type]->pgo_mightbedirty;
601 if (method == NULL)
602 return (false);
603 return (method(object));
604}
605
606/*
607 * Return the kvme type of the given object.
608 * If vpp is not NULL, set it to the object's vm_object_vnode() or NULL.
609 */
610int
611vm_object_kvme_type(vm_object_t object, struct vnode **vpp)
612{
614 MPASS(object->type < nitems(pagertab));
615
616 if (vpp != NULL)
617 *vpp = vm_object_vnode(object);
618 return (pagertab[object->type]->pgo_kvme_type);
619}
const struct pagerops defaultpagerops
Definition: default_pager.c:74
const struct pagerops mgtdevicepagerops
Definition: device_pager.c:88
const struct pagerops devicepagerops
Definition: device_pager.c:78
const struct pagerops physpagerops
Definition: phys_pager.c:302
const struct pagerops sgpagerops
Definition: sg_pager.c:63
pgo_alloc_t * pgo_alloc
Definition: vm_pager.h:76
int pgo_kvme_type
Definition: vm_pager.h:74
objtype_t type
Definition: vm_object.h:114
void * handle
Definition: vm_object.h:124
u_short flags
Definition: vm_object.h:115
const struct pagerops swappagerops
Definition: swap_pager.c:442
#define UMA_ALIGN_CACHE
Definition: uma.h:273
uma_zone_t uma_zsecond_create(const char *name, uma_ctor ctor, uma_dtor dtor, uma_init zinit, uma_fini zfini, uma_zone_t primary)
Definition: uma_core.c:3297
int uma_zone_set_max(uma_zone_t zone, int nitems)
Definition: uma_core.c:4841
void uma_prealloc(uma_zone_t zone, int itemcnt)
Definition: uma_core.c:5133
uma_zone_t uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor, uma_init uminit, uma_fini fini, int align, uint32_t flags)
Definition: uma_core.c:3251
#define UMA_ZONE_NOFREE
Definition: uma.h:241
u_char objtype_t
Definition: vm.h:102
u_char vm_prot_t
Definition: vm.h:76
@ OBJT_DEFAULT
Definition: vm.h:92
@ OBJT_SG
Definition: vm.h:98
@ OBJT_MGTDEVICE
Definition: vm.h:99
@ OBJT_VNODE
Definition: vm.h:94
@ OBJT_PHYS
Definition: vm.h:96
@ OBJT_DEAD
Definition: vm.h:97
@ OBJT_SWAP
Definition: vm.h:93
@ OBJT_FIRST_DYN
Definition: vm.h:100
@ OBJT_DEVICE
Definition: vm.h:95
vm_offset_t kva_alloc(vm_size_t)
Definition: vm_kern.c:146
void vm_object_reference_locked(vm_object_t object)
Definition: vm_object.c:526
struct vnode * vm_object_vnode(vm_object_t object)
Definition: vm_object.c:2481
#define VM_OBJECT_ASSERT_UNLOCKED(object)
Definition: vm_object.h:254
#define VM_OBJECT_ASSERT_LOCKED(object)
Definition: vm_object.h:248
#define VM_OBJECT_WLOCK(object)
Definition: vm_object.h:270
#define VM_OBJECT_ASSERT_PAGING(object)
Definition: vm_object.h:281
#define OBJ_DEAD
Definition: vm_object.h:199
#define VM_OBJECT_WUNLOCK(object)
Definition: vm_object.h:274
#define VM_OBJECT_ASSERT_WLOCKED(object)
Definition: vm_object.h:252
void vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
Definition: vm_page.c:5343
vm_page_t vm_page_relookup(vm_object_t object, vm_pindex_t pindex)
Definition: vm_page.c:1656
vm_page_t bogus_page
Definition: vm_page.c:153
#define vm_page_assert_xbusied(m)
Definition: vm_page.h:745
static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *)
Definition: vm_pager.c:133
int vm_pager_alloc_dyn_type(struct pagerops *ops, int base_type)
Definition: vm_pager.c:399
static int pbuf_ctor(void *, int, void *, int)
Definition: vm_pager.c:453
static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t, struct ucred *)
Definition: vm_pager.c:115
static void dead_pager_getvp(vm_object_t, struct vnode **, bool *)
Definition: vm_pager.c:150
vm_object_t vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
Definition: vm_pager.c:259
uma_zone_t pbuf_zsecond_create(const char *name, int max)
Definition: vm_pager.c:214
#define FIX(n)
void vm_object_set_writeable_dirty(vm_object_t object)
Definition: vm_pager.c:582
int vm_pager_get_pages(vm_object_t object, vm_page_t *m, int count, int *rbehind, int *rahead)
Definition: vm_pager.c:319
static struct mtx pagertab_lock
Definition: vm_pager.c:178
static void pbuf_prealloc(void *arg __unused)
Definition: vm_pager.c:244
void pbrelbo(struct buf *bp)
Definition: vm_pager.c:569
uma_zone_t pbuf_zone
Definition: vm_pager.c:93
static void vm_pager_assert_in(vm_object_t object, vm_page_t *m, int count)
Definition: vm_pager.c:280
void pbgetvp(struct vnode *vp, struct buf *bp)
Definition: vm_pager.c:519
SYSINIT(pbuf, SI_SUB_KTHREAD_BUF, SI_ORDER_ANY, pbuf_prealloc, NULL)
void vm_pager_init(void)
Definition: vm_pager.c:181
void vm_pager_bufferinit(void)
Definition: vm_pager.c:201
int vm_object_kvme_type(vm_object_t object, struct vnode **vpp)
Definition: vm_pager.c:611
static const struct pagerops deadpagerops
Definition: vm_pager.c:158
vm_object_t vm_pager_object_lookup(struct pagerlst *pg_list, void *handle)
Definition: vm_pager.c:380
void vm_pager_deallocate(vm_object_t object)
Definition: vm_pager.c:271
__FBSDID("$FreeBSD$")
static void dead_pager_dealloc(vm_object_t)
Definition: vm_pager.c:144
void pbrelvp(struct buf *bp)
Definition: vm_pager.c:552
const struct pagerops *pagertab[16] __read_mostly
Definition: vm_pager.c:168
static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *)
Definition: vm_pager.c:107
static void pbuf_dtor(void *, int, void *)
Definition: vm_pager.c:476
bool vm_object_mightbedirty(vm_object_t object)
Definition: vm_pager.c:594
void vm_pager_free_dyn_type(objtype_t type)
Definition: vm_pager.c:442
static const char pbuf_wmesg[]
Definition: vm_pager.c:492
int vm_pager_get_pages_async(vm_object_t object, vm_page_t *m, int count, int *rbehind, int *rahead, pgo_getpages_iodone_t iodone, void *arg)
Definition: vm_pager.c:356
static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *)
Definition: vm_pager.c:123
static int pbuf_init(void *, int, int)
Definition: vm_pager.c:495
void pbgetbo(struct bufobj *bo, struct buf *bp)
Definition: vm_pager.c:538
static int nswbuf_max
Definition: vm_pager.c:198
#define VM_PAGER_AGAIN
Definition: vm_pager.h:115
#define PBUF_PAGES
Definition: vm_pager.h:131
#define VM_PAGER_FAIL
Definition: vm_pager.h:112
void pgo_getpages_iodone_t(void *, vm_page_t *, int, int)
Definition: vm_pager.h:57
void pgo_set_writeable_dirty_t(vm_object_t)
Definition: vm_pager.h:66
const struct pagerops vnodepagerops
Definition: vnode_pager.c:111
bool pgo_mightbedirty_t(vm_object_t)
Definition: vm_pager.h:67
#define VM_PAGER_OK
Definition: vm_pager.h:110